diff --git a/.backportrc.json b/.backportrc.json new file mode 100644 index 0000000000000..1b6d16f031561 --- /dev/null +++ b/.backportrc.json @@ -0,0 +1,16 @@ +{ + "upstream": "elastic/elasticsearch", + "targetBranchChoices": [ + { "name": "master", "checked": true }, + { "name": "7.x", "checked": true }, + "7.14", + "7.13", + "6.8" + ], + "targetPRLabels": ["backport"], + "branchLabelMapping": { + "^v8.0.0$": "master", + "^v7.15.0$": "7.x", + "^v(\\d+).(\\d+).\\d+$": "$1.$2" + } +} diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e420788409fbb..9277aad1c73c9 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -37,5 +37,8 @@ BWC_VERSION: - "7.13.1" - "7.13.2" - "7.13.3" + - "7.13.4" - "7.14.0" + - "7.14.1" + - "7.15.0" - "8.0.0" diff --git a/.ci/dockerOnLinuxExclusions b/.ci/dockerOnLinuxExclusions index d4f937ad03d8b..56cefff7c6ccf 100644 --- a/.ci/dockerOnLinuxExclusions +++ b/.ci/dockerOnLinuxExclusions @@ -11,6 +11,7 @@ sles-12.3 # older version used in Vagrant image sles-12.5 sles-15.1 sles-15.2 +sles-15.3 # These OSes are deprecated and filtered starting with 8.0.0, but need to be excluded # for PR checks diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index 3f0544aa061b9..7edd7fdacd553 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -5,4 +5,4 @@ # are 'java' or 'openjdk' followed by the major release number. ES_BUILD_JAVA=openjdk16 -ES_RUNTIME_JAVA=openjdk11 +ES_RUNTIME_JAVA=java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+folder+pull-request.yml b/.ci/jobs.t/elastic+elasticsearch+folder+pull-request.yml deleted file mode 100644 index cef72323297a0..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+folder+pull-request.yml +++ /dev/null @@ -1,4 +0,0 @@ -- job: - name: elastic+elasticsearch+%BRANCH%+pull-request - display-name: Pull Requests - project-type: folder diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml index 8d8fd7f6c607f..d8f9f7e4a4ffe 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml @@ -17,7 +17,6 @@ - centos-8-packaging - debian-9-packaging - debian-10-packaging - - fedora-32-packaging - opensuse-15-1-packaging - oraclelinux-7-packaging - oraclelinux-8-packaging diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml index 54bd6169945a2..9a120435dcac7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml @@ -20,6 +20,7 @@ - inject: properties-file: '.ci/java-versions-aarch64.properties' properties-content: | + COMPOSE_HTTP_TIMEOUT=120 JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA JAVA15_HOME=$HOME/.java/jdk15 diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml index ee797e25f3806..7525f0661bbd6 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml @@ -17,7 +17,6 @@ - "centos-8&&immutable" - "debian-9&&immutable" - "debian-10&&immutable" - - "fedora-32&&immutable" - "opensuse-15-1&&immutable" - "oraclelinux-7&&immutable" - "oraclelinux-8&&immutable" diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+java-fips-matrix.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+java-fips-matrix.yml index d4fa229963dcb..4235e99fbfa4b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+java-fips-matrix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+java-fips-matrix.yml @@ -4,6 +4,6 @@ vars: - job-name: elastic+elasticsearch+%BRANCH%+periodic+java-fips-matrix - job-display-name: "elastic / elasticsearch # %BRANCH% - java fips compatibility matrix" - job-description: "Testing of the Elasticsearch %BRANCH% branch java FIPS compatibility matrix.\n" - - matrix-yaml-file: ".ci/matrix-runtime-javas.yml" + - matrix-yaml-file: ".ci/matrix-runtime-javas-fips.yml" - matrix-variable: ES_RUNTIME_JAVA - gradle-args: "-Dbwc.checkout.align=true -Dtests.fips.enabled=true check" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc.yml index b3c1281e46d93..ec7672ad63825 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc.yml @@ -1,5 +1,37 @@ --- -jjbb-template: pull-request-gradle-unix.yml -vars: - - pr-job: "bwc" - - gradle-args: "-Dignore.tests.seed bwcTestSnapshots" +- job: + name: "elastic+elasticsearch+pull-request+bwc" + display-name: "elastic / elasticsearch - pull request bwc" + description: "Testing of Elasticsearch pull requests - bwc" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+bwc" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/bwc.*' + github-hooks: true + status-context: elasticsearch-ci/bwc + cancel-builds-on-update: true + excluded-regions: + - ^docs/.* + black-list-labels: + - '>test-mute' + - 'test-full-bwc' + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + JAVA15_HOME=$HOME/.java/openjdk15 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed bwcTestSnapshots diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml index e72a0cb584161..ac36d86477445 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+docs-check" - display-name: "elastic / elasticsearch # %BRANCH% - pull request docs-check" + name: "elastic+elasticsearch+pull-request+docs-check" + display-name: "elastic / elasticsearch - pull request docs-check" description: "Testing of Elasticsearch pull requests - docs-check" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+docs-check" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+docs-check" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,9 +18,6 @@ github-hooks: true status-context: elasticsearch-ci/docs-check cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* included-regions: - ^docs/.* black-list-labels: @@ -30,6 +27,8 @@ properties-file: '.ci/java-versions.properties' properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - shell: | #!/usr/local/bin/runbld --redirect-stderr diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml index 26c71b30e1230..be749c200557b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+eql-correctness" - display-name: "elastic / elasticsearch # %BRANCH% - pull request eql-correctness" + name: "elastic+elasticsearch+pull-request+eql-correctness" + display-name: "elastic / elasticsearch - pull request eql-correctness" description: "Testing of Elasticsearch pull requests - eql-correctness" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+eql-correctness" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+eql-correctness" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,9 +18,8 @@ github-hooks: true status-context: elasticsearch-ci/eql-correctness cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml new file mode 100644 index 0000000000000..813dce173898e --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml @@ -0,0 +1,50 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+full-bwc" + display-name: "elastic / elasticsearch - pull request full-bwc" + description: "Testing of Elasticsearch pull requests - full-bwc" + project-type: matrix + node: master + child-workspace: "/dev/shm/elastic+elasticsearch+pull-request+full-bwc" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/full-bwc.*' + github-hooks: true + status-context: elasticsearch-ci/full-bwc + cancel-builds-on-update: true + excluded-regions: + - ^docs/.* + white-list-labels: + - 'test-full-bwc' + black-list-labels: + - '>test-mute' + axes: + - axis: + type: slave + name: nodes + values: + - "general-purpose" + - axis: + type: yaml + filename: ".ci/bwcVersions" + name: "BWC_VERSION" + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + JAVA15_HOME=$HOME/.java/openjdk15 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed v$BWC_VERSION#bwcTest diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index 07b629ea9be23..6568fa4ee0444 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+packaging-tests-unix-sample" - display-name: "elastic / elasticsearch # %BRANCH% - pull request packaging-tests-unix-sample" + name: "elastic+elasticsearch+pull-request+packaging-tests-unix-sample" + display-name: "elastic / elasticsearch - pull request packaging-tests-unix-sample" description: "Testing of Elasticsearch pull requests - packaging-tests-unix-sample" project-type: matrix node: master @@ -18,10 +18,9 @@ trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix-sample - cancel-builds-on-update: - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + cancel-builds-on-update: true + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index 00289bf7aa1a7..fd979e15413f7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+packaging-tests-unix" - display-name: "elastic / elasticsearch # %BRANCH% - pull request packaging-tests-unix" + name: "elastic+elasticsearch+pull-request+packaging-tests-unix" + display-name: "elastic / elasticsearch - pull request packaging-tests-unix" description: "Testing of Elasticsearch pull requests - packaging-tests-unix" project-type: matrix node: master @@ -19,9 +19,8 @@ github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: @@ -37,7 +36,6 @@ - centos-8-packaging - debian-9-packaging - debian-10-packaging - - fedora-32-packaging - opensuse-15-1-packaging - oraclelinux-7-packaging - oraclelinux-8-packaging diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 40819599bd05a..84656c31a2fdb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+packaging-tests-windows-sample" - display-name: "elastic / elasticsearch # %BRANCH% - pull request packaging-tests-windows-sample" + name: "elastic+elasticsearch+pull-request+packaging-tests-windows-sample" + display-name: "elastic / elasticsearch - pull request packaging-tests-windows-sample" description: "Testing of Elasticsearch pull requests - packaging-tests-windows-sample" # We use a hard-coded workspace directory here to avoid hitting windows path length limits child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" @@ -21,9 +21,8 @@ github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml index 2792e83e962a2..3d580dda36766 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+packaging-tests-windows" - display-name: "elastic / elasticsearch # %BRANCH% - pull request packaging-tests-windows" + name: "elastic+elasticsearch+pull-request+packaging-tests-windows" + display-name: "elastic / elasticsearch - pull request packaging-tests-windows" description: "Testing of Elasticsearch pull requests - packaging-tests-windows" # We use a hard-coded workspace directory here to avoid hitting windows path length limits child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" @@ -21,9 +21,8 @@ github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml new file mode 100644 index 0000000000000..bec53a96e99b6 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml @@ -0,0 +1,53 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+packaging-upgrade-tests" + display-name: "elastic / elasticsearch - pull request packaging-upgrade-tests" + description: "Testing of Elasticsearch pull requests - packaging-upgrade-tests" + project-type: matrix + node: master + child-workspace: "/dev/shm/elastic+elasticsearch+pull-request+packaging-upgrade-tests" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/packaging-upgrade-tests.*' + github-hooks: true + status-context: elasticsearch-ci/packaging-upgrade-tests + cancel-builds-on-update: true + black-list-target-branches: + - 6.8 + excluded-regions: + - ^docs/.* + white-list-labels: + - ':Delivery/Packaging' + black-list-labels: + - '>test-mute' + axes: + - axis: + type: label-expression + name: os + values: + - centos-8-packaging + - ubuntu-20.04-packaging + - axis: + type: yaml + filename: ".ci/bwcVersions" + name: "BWC_VERSION" + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + JAVA15_HOME=$HOME/.java/openjdk15 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + ./.ci/os.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructiveDistroUpgradeTest.v$BWC_VERSION diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml index 53a34623e3392..766b158b59d7a 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+part-1-fips" - display-name: "elastic / elasticsearch # %BRANCH% - pull request part-1 fips" + name: "elastic+elasticsearch+pull-request+part-1-fips" + display-name: "elastic / elasticsearch - pull request part-1 fips" description: "Testing of Elasticsearch pull requests - part-1 fips" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+part-1-fips" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-1-fips" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,13 +18,12 @@ github-hooks: true status-context: elasticsearch-ci/part-1-fips cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: - - ':Security/FIPS' + - 'Team:Security' black-list-labels: - '>test-mute' builders: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml index a10119fa64bac..a557271685d2e 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+part-1-windows" - display-name: "elastic / elasticsearch # %BRANCH% - pull request part-1 windows" + name: "elastic+elasticsearch+pull-request+part-1-windows" + display-name: "elastic / elasticsearch - pull request part-1 windows" description: "Testing of Elasticsearch pull requests - part-1 windows" node: "windows-immutable" workspace: "C:\\Users\\jenkins\\workspace\\pr-part-1\\${BUILD_NUMBER}" @@ -19,9 +19,8 @@ github-hooks: true status-context: elasticsearch-ci/part-1-windows cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: @@ -34,6 +33,7 @@ properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA15_HOME=$USERPROFILE\\.java\\openjdk15 GRADLE_TASK=checkPart1 - batch: | del /f /s /q %USERPROFILE%\.gradle\init.d\*.* diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml index 50e6feb5656ff..f22125ba5f51d 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+part-2-fips" - display-name: "elastic / elasticsearch # %BRANCH% - pull request part-2 fips" + name: "elastic+elasticsearch+pull-request+part-2-fips" + display-name: "elastic / elasticsearch - pull request part-2 fips" description: "Testing of Elasticsearch pull requests - part-2 fips" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+part-2-fips" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-2-fips" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,13 +18,12 @@ github-hooks: true status-context: elasticsearch-ci/part-2-fips cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: - - ':Security/FIPS' + - 'Team:Security' black-list-labels: - '>test-mute' builders: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml index 457fbd4f44bca..ad353d79c0d14 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml @@ -1,7 +1,7 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+part-2-windows" - display-name: "elastic / elasticsearch # %BRANCH% - pull request part-2 windows" + name: "elastic+elasticsearch+pull-request+part-2-windows" + display-name: "elastic / elasticsearch - pull request part-2 windows" description: "Testing of Elasticsearch pull requests - part-2 windows" node: "windows-immutable" workspace: "C:\\Users\\jenkins\\workspace\\pr-part-2\\${BUILD_NUMBER}" @@ -19,9 +19,8 @@ github-hooks: true status-context: elasticsearch-ci/part-2-windows cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* + black-list-target-branches: + - 6.8 excluded-regions: - ^docs/.* white-list-labels: @@ -34,6 +33,7 @@ properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA15_HOME=$USERPROFILE\\.java\\openjdk15 GRADLE_TASK=checkPart2 - batch: | del /f /s /q %USERPROFILE%\.gradle\init.d\*.* diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml index db697f4989087..e245df4fd599c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+precommit" - display-name: "elastic / elasticsearch # %BRANCH% - pull request precommit" + name: "elastic+elasticsearch+pull-request+precommit" + display-name: "elastic / elasticsearch - pull request precommit" description: "Testing of Elasticsearch pull requests - precommit" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+precommit" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+precommit" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,9 +18,6 @@ github-hooks: true status-context: elasticsearch-ci/precommit cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* included-regions: - ^docs/.* black-list-labels: @@ -31,6 +28,8 @@ properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 - shell: | #!/usr/local/bin/runbld --redirect-stderr $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed precommit diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index 58f2bf302a06f..591730cc00a43 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -1,5 +1,38 @@ --- -jjbb-template: pull-request-gradle-unix.yml -vars: - - pr-job: "rest-compatibility" - - gradle-args: "-Dignore.tests.seed checkRestCompat" +- job: + name: "elastic+elasticsearch+pull-request+rest-compatibility" + display-name: "elastic / elasticsearch - pull request rest-compatibility" + description: "Testing of Elasticsearch pull requests - rest-compatibility" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+rest-compatibility" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/rest-compatibility.*' + github-hooks: true + status-context: elasticsearch-ci/rest-compatibility + cancel-builds-on-update: true + white-list-target-branches: + - master + excluded-regions: + - ^docs/.* + black-list-labels: + - '>test-mute' + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + JAVA15_HOME=$HOME/.java/openjdk15 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed checkRestCompat diff --git a/.ci/matrix-runtime-javas-fips.yml b/.ci/matrix-runtime-javas-fips.yml new file mode 100644 index 0000000000000..a8c3449a25789 --- /dev/null +++ b/.ci/matrix-runtime-javas-fips.yml @@ -0,0 +1,12 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# java11 should always be included as we only support Oracle Java 11 in +# FIPS 140-2 mode. +# We also want to test with the bundled JDK so that we proactively find +# issues that might later be backported to JDK11. Current bundled JDK is +# openjdk16 + +ES_RUNTIME_JAVA: + - java11 + - openjdk16 diff --git a/.ci/templates.t/pull-request-gradle-unix.yml b/.ci/templates.t/pull-request-gradle-unix.yml index f6be38532a38a..ec77b5a04fb9b 100644 --- a/.ci/templates.t/pull-request-gradle-unix.yml +++ b/.ci/templates.t/pull-request-gradle-unix.yml @@ -1,9 +1,9 @@ --- - job: - name: "elastic+elasticsearch+%BRANCH%+pull-request/elastic+elasticsearch+%BRANCH%+pull-request+{pr-job}" - display-name: "elastic / elasticsearch # %BRANCH% - pull request {pr-job}" + name: "elastic+elasticsearch+pull-request+{pr-job}" + display-name: "elastic / elasticsearch - pull request {pr-job}" description: "Testing of Elasticsearch pull requests - {pr-job}" - workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+pull-request+{pr-job}" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+{pr-job}" scm: - git: refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" @@ -18,9 +18,6 @@ github-hooks: true status-context: elasticsearch-ci/{pr-job} cancel-builds-on-update: true - white-list-target-branches: - - %BRANCH% - - ^feat(ure)?/.* excluded-regions: - ^docs/.* black-list-labels: @@ -31,6 +28,8 @@ properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 JAVA15_HOME=$HOME/.java/openjdk15 - shell: | #!/usr/local/bin/runbld --redirect-stderr diff --git a/.ci/views.t/views.yml b/.ci/views.t/views.yml index 6d386193467ce..19ef490825a83 100644 --- a/.ci/views.t/views.yml +++ b/.ci/views.t/views.yml @@ -2,3 +2,7 @@ name: "Elasticsearch %BRANCH%" view-type: list regex: '^elastic[-+]elasticsearch\+%BRANCH%\+((?!multijob).)*$' +- view: + name: "Elasticsearch Pull Requests" + view-type: list + regex: '^elastic[-+]elasticsearch\+pull[-+]request\+.*$' diff --git a/.editorconfig b/.editorconfig index 229c8667b1c6e..0c8a9dfd38ba2 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,5 +1,7 @@ # EditorConfig: http://editorconfig.org/ +# Options specific to IntelliJ are prefixed with `ij_` + root = true [*] @@ -8,24 +10,210 @@ trim_trailing_whitespace = true insert_final_newline = true indent_style = space -# IntelliJ-specific options -ij_java_class_count_to_use_import_on_demand = 999 -ij_java_names_count_to_use_import_on_demand = 999 -ij_java_imports_layout = *,|,com.**,|,org.**,|,java.**,javax.**,|,$* +ij_formatter_off_tag = @formatter:off +ij_formatter_on_tag = @formatter:on +ij_formatter_tags_enabled = false [*.gradle] +ij_continuation_indent_size = 2 indent_size = 2 +max_line_length = 150 + +[*.md] +max_line_length = 80 [*.groovy] indent_size = 4 +ij_continuation_indent_size = 4 max_line_length = 140 ij_groovy_class_count_to_use_import_on_demand = 999 ij_groovy_names_count_to_use_import_on_demand = 999 ij_groovy_imports_layout = *,|,com.**,|,org.**,|,java.**,javax.**,|,$* +[{*.gradle,*.groovy}] +ij_groovy_align_group_field_declarations = false +ij_groovy_align_multiline_array_initializer_expression = false +ij_groovy_align_multiline_assignment = false +ij_groovy_align_multiline_binary_operation = false +ij_groovy_align_multiline_chained_methods = false +ij_groovy_align_multiline_extends_list = false +ij_groovy_align_multiline_for = true +ij_groovy_align_multiline_list_or_map = true +ij_groovy_align_multiline_method_parentheses = false +ij_groovy_align_multiline_parameters = true +ij_groovy_align_multiline_parameters_in_calls = false +ij_groovy_align_multiline_resources = true +ij_groovy_align_multiline_ternary_operation = false +ij_groovy_align_multiline_throws_list = false +ij_groovy_align_named_args_in_map = true +ij_groovy_align_throws_keyword = false +ij_groovy_array_initializer_new_line_after_left_brace = true +ij_groovy_array_initializer_right_brace_on_new_line = true +ij_groovy_array_initializer_wrap = on_every_item +ij_groovy_assert_statement_wrap = on_every_item +ij_groovy_assignment_wrap = on_every_item +ij_groovy_binary_operation_wrap = normal +ij_groovy_blank_lines_after_class_header = 0 +ij_groovy_blank_lines_after_imports = 1 +ij_groovy_blank_lines_after_package = 1 +ij_groovy_blank_lines_around_class = 1 +ij_groovy_blank_lines_around_field = 0 +ij_groovy_blank_lines_around_field_in_interface = 0 +ij_groovy_blank_lines_around_method = 1 +ij_groovy_blank_lines_around_method_in_interface = 1 +ij_groovy_blank_lines_before_imports = 1 +ij_groovy_blank_lines_before_method_body = 0 +ij_groovy_blank_lines_before_package = 0 +ij_groovy_block_brace_style = end_of_line +ij_groovy_block_comment_at_first_column = true +ij_groovy_call_parameters_new_line_after_left_paren = true +ij_groovy_call_parameters_right_paren_on_new_line = true +ij_groovy_call_parameters_wrap = on_every_item +ij_groovy_catch_on_new_line = false +ij_groovy_class_annotation_wrap = split_into_lines +ij_groovy_class_brace_style = end_of_line +ij_groovy_do_while_brace_force = always +ij_groovy_else_on_new_line = false +ij_groovy_enum_constants_wrap = on_every_item +ij_groovy_extends_keyword_wrap = normal +ij_groovy_extends_list_wrap = on_every_item +ij_groovy_field_annotation_wrap = split_into_lines +ij_groovy_finally_on_new_line = false +ij_groovy_for_brace_force = always +ij_groovy_for_statement_new_line_after_left_paren = false +ij_groovy_for_statement_right_paren_on_new_line = false +ij_groovy_for_statement_wrap = off +ij_groovy_if_brace_force = never +ij_groovy_import_annotation_wrap = 2 +ij_groovy_imports_layout = *,com.**,|,org.**,|,java.**,javax.**,|,$* +ij_groovy_indent_case_from_switch = true +ij_groovy_indent_label_blocks = true +ij_groovy_insert_inner_class_imports = false +ij_groovy_keep_blank_lines_before_right_brace = 2 +ij_groovy_keep_blank_lines_in_code = 2 +ij_groovy_keep_blank_lines_in_declarations = 2 +ij_groovy_keep_control_statement_in_one_line = true +ij_groovy_keep_first_column_comment = true +ij_groovy_keep_indents_on_empty_lines = false +ij_groovy_keep_line_breaks = true +ij_groovy_keep_multiple_expressions_in_one_line = false +ij_groovy_keep_simple_blocks_in_one_line = false +ij_groovy_keep_simple_classes_in_one_line = true +ij_groovy_keep_simple_lambdas_in_one_line = true +ij_groovy_keep_simple_methods_in_one_line = true +ij_groovy_label_indent_absolute = false +ij_groovy_label_indent_size = 0 +ij_groovy_lambda_brace_style = end_of_line +ij_groovy_layout_static_imports_separately = true +ij_groovy_line_comment_add_space = false +ij_groovy_line_comment_at_first_column = true +ij_groovy_method_annotation_wrap = split_into_lines +ij_groovy_method_brace_style = end_of_line +ij_groovy_method_call_chain_wrap = off +ij_groovy_method_parameters_new_line_after_left_paren = false +ij_groovy_method_parameters_right_paren_on_new_line = false +ij_groovy_method_parameters_wrap = on_every_item +ij_groovy_modifier_list_wrap = false +ij_groovy_names_count_to_use_import_on_demand = 3 +ij_groovy_parameter_annotation_wrap = off +ij_groovy_parentheses_expression_new_line_after_left_paren = false +ij_groovy_parentheses_expression_right_paren_on_new_line = false +ij_groovy_prefer_parameters_wrap = false +ij_groovy_resource_list_new_line_after_left_paren = false +ij_groovy_resource_list_right_paren_on_new_line = false +ij_groovy_resource_list_wrap = off +ij_groovy_space_after_assert_separator = true +ij_groovy_space_after_colon = true +ij_groovy_space_after_comma = true +ij_groovy_space_after_comma_in_type_arguments = true +ij_groovy_space_after_for_semicolon = true +ij_groovy_space_after_quest = true +ij_groovy_space_after_type_cast = true +ij_groovy_space_before_annotation_parameter_list = false +ij_groovy_space_before_array_initializer_left_brace = true +ij_groovy_space_before_assert_separator = false +ij_groovy_space_before_catch_keyword = true +ij_groovy_space_before_catch_left_brace = true +ij_groovy_space_before_catch_parentheses = true +ij_groovy_space_before_class_left_brace = true +ij_groovy_space_before_closure_left_brace = true +ij_groovy_space_before_colon = true +ij_groovy_space_before_comma = false +ij_groovy_space_before_do_left_brace = true +ij_groovy_space_before_else_keyword = true +ij_groovy_space_before_else_left_brace = true +ij_groovy_space_before_finally_keyword = true +ij_groovy_space_before_finally_left_brace = true +ij_groovy_space_before_for_left_brace = true +ij_groovy_space_before_for_parentheses = true +ij_groovy_space_before_for_semicolon = false +ij_groovy_space_before_if_left_brace = true +ij_groovy_space_before_if_parentheses = true +ij_groovy_space_before_method_call_parentheses = false +ij_groovy_space_before_method_left_brace = true +ij_groovy_space_before_method_parentheses = false +ij_groovy_space_before_quest = true +ij_groovy_space_before_switch_left_brace = true +ij_groovy_space_before_switch_parentheses = true +ij_groovy_space_before_synchronized_left_brace = true +ij_groovy_space_before_synchronized_parentheses = true +ij_groovy_space_before_try_left_brace = true +ij_groovy_space_before_try_parentheses = true +ij_groovy_space_before_while_keyword = true +ij_groovy_space_before_while_left_brace = true +ij_groovy_space_before_while_parentheses = true +ij_groovy_space_in_named_argument = true +ij_groovy_space_in_named_argument_before_colon = false +ij_groovy_space_within_empty_array_initializer_braces = false +ij_groovy_space_within_empty_method_call_parentheses = false +ij_groovy_spaces_around_additive_operators = true +ij_groovy_spaces_around_assignment_operators = true +ij_groovy_spaces_around_bitwise_operators = true +ij_groovy_spaces_around_equality_operators = true +ij_groovy_spaces_around_lambda_arrow = true +ij_groovy_spaces_around_logical_operators = true +ij_groovy_spaces_around_multiplicative_operators = true +ij_groovy_spaces_around_regex_operators = true +ij_groovy_spaces_around_relational_operators = true +ij_groovy_spaces_around_shift_operators = true +ij_groovy_spaces_within_annotation_parentheses = false +ij_groovy_spaces_within_array_initializer_braces = true +ij_groovy_spaces_within_braces = true +ij_groovy_spaces_within_brackets = false +ij_groovy_spaces_within_cast_parentheses = false +ij_groovy_spaces_within_catch_parentheses = false +ij_groovy_spaces_within_for_parentheses = false +ij_groovy_spaces_within_gstring_injection_braces = false +ij_groovy_spaces_within_if_parentheses = false +ij_groovy_spaces_within_list_or_map = false +ij_groovy_spaces_within_method_call_parentheses = false +ij_groovy_spaces_within_method_parentheses = false +ij_groovy_spaces_within_parentheses = false +ij_groovy_spaces_within_switch_parentheses = false +ij_groovy_spaces_within_synchronized_parentheses = false +ij_groovy_spaces_within_try_parentheses = false +ij_groovy_spaces_within_tuple_expression = false +ij_groovy_spaces_within_while_parentheses = false +ij_groovy_special_else_if_treatment = true +ij_groovy_ternary_operation_wrap = on_every_item +ij_groovy_throws_keyword_wrap = off +ij_groovy_throws_list_wrap = on_every_item +ij_groovy_use_flying_geese_braces = false +ij_groovy_use_fq_class_names = false +ij_groovy_use_fq_class_names_in_javadoc = true +ij_groovy_use_relative_indents = false +ij_groovy_use_single_class_imports = true +ij_groovy_variable_annotation_wrap = off +ij_groovy_while_brace_force = always +ij_groovy_while_on_new_line = false +ij_groovy_wrap_long_lines = false + [*.java] indent_size = 4 max_line_length = 140 +ij_java_class_count_to_use_import_on_demand = 999 +ij_java_names_count_to_use_import_on_demand = 999 +ij_java_imports_layout = *,|,com.**,|,org.**,|,java.**,javax.**,|,$* [*.json] indent_size = 2 diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml index 5cf789707c58c..3f3eb5218afed 100644 --- a/.idea/inspectionProfiles/Project_Default.xml +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -5,5 +5,6 @@ + \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 46963321a0960..3e08516f86814 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,7 +1,7 @@ Contributing to elasticsearch ============================= -Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. +Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. If you want to be rewarded for your contributions, sign up for the [Elastic Contributor Program](https://www.elastic.co/community/contributor). Each time you make a valid contribution, you’ll earn points that increase your chances of winning prizes and being recognized as a top contributor. @@ -38,14 +38,14 @@ Contributing code and documentation changes ------------------------------------------- If you would like to contribute a new feature or a bug fix to Elasticsearch, -please discuss your idea first on the Github issue. If there is no Github issue +please discuss your idea first on the GitHub issue. If there is no GitHub issue for your idea, please open one. It may be that somebody is already working on it, or that there are particular complexities that you should know about before starting the implementation. There are often a number of ways to fix a problem and it is important to find the right approach before spending time on a PR that cannot be merged. -We add the `help wanted` label to existing Github issues for which community +We add the `help wanted` label to existing GitHub issues for which community contributions are particularly welcome, and we use the `good first issue` label to mark issues that we think will be suitable for new contributors. @@ -147,7 +147,6 @@ and then run `curl` in another window like this: curl -u elastic:password localhost:9200 - ### Importing the project into IntelliJ IDEA The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1 @@ -427,26 +426,25 @@ We require license headers on all Java files. With the exception of the top-level `x-pack` directory, all contributed code should have the following license header unless instructed otherwise: - /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ + /* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ The top-level `x-pack` directory contains code covered by the [Elastic license](licenses/ELASTIC-LICENSE-2.0.txt). Community contributions to this code are welcome, and should have the following license header unless instructed otherwise: - /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - + /* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ It is important that the only code covered by the Elastic licence is contained within the top-level `x-pack` directory. The build will fail its pre-commit @@ -456,52 +454,63 @@ checks if contributed code does not have the appropriate license headers. > be automatically configured to add the correct license header to new source > files based on the source location. +### Type-checking, generics and casting + +You should try to write code that does not require suppressing any warnings from +the compiler, e.g. suppressing type-checking, raw generics, and so on. However, +this isn't always possible or practical. In such cases, you should use the +`@SuppressWarnings` annotations to silence the compiler warning, trying to keep +the scope of the suppression as small as possible. Where a piece of code +requires a lot of suppressions, it may be better to apply a single suppression +at a higher level e.g. at the method or even class level. Use your judgement. + +There are also cases where the compiler simply refuses to accept an assignment +or cast of any kind, because it lacks the information to know that the types are +OK. In such cases, you can use +the [`Types.forciblyCast`](libs/core/src/main/java/org/elasticsearch/core/Types.java) +utility method. As the name suggests, you can coerce any type to any other type, +so please use it as a last resort. + ### Creating A Distribution Run all build commands from within the root directory: -```sh -cd elasticsearch/ -``` + cd elasticsearch/ To build a darwin-tar distribution, run this command: -```sh -./gradlew -p distribution/archives/darwin-tar assemble -``` + ./gradlew -p distribution/archives/darwin-tar assemble You will find the distribution under: -`./distribution/archives/darwin-tar/build/distributions/` + + ./distribution/archives/darwin-tar/build/distributions/ To create all build artifacts (e.g., plugins and Javadocs) as well as distributions in all formats, run this command: -```sh -./gradlew assemble -``` + ./gradlew assemble -> **NOTE:** Running the task above will fail if you don't have a available +> **NOTE:** Running the task above will fail if you don't have an available > Docker installation. The package distributions (Debian and RPM) can be found under: -`./distribution/packages/(deb|rpm|oss-deb|oss-rpm)/build/distributions/` + + ./distribution/packages/(deb|rpm|oss-deb|oss-rpm)/build/distributions/ The archive distributions (tar and zip) can be found under: -`./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/` + + ./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/ ### Running The Full Test Suite Before submitting your changes, run the test suite to make sure that nothing is broken, with: -```sh -./gradlew check -``` + ./gradlew check If your changes affect only the documentation, run: -```sh -./gradlew -p docs check -``` + ./gradlew -p docs check + For more information about testing code examples in the documentation, see https://github.com/elastic/elasticsearch/blob/master/docs/README.asciidoc diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index d0308d2166bfa..19829074d96f1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -35,6 +34,8 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.DocReader; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.search.lookup.SearchLookup; @@ -79,7 +80,7 @@ public class ScriptScoreBenchmark { private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class)); private final Map fieldTypes = Map.ofEntries( - Map.entry("n", new NumberFieldType("n", NumberType.LONG, false, false, true, true, null, Map.of(), null)) + Map.entry("n", new NumberFieldType("n", NumberType.LONG, false, false, true, true, null, Map.of(), null, false)) ); private final IndexFieldDataCache fieldDataCache = new IndexFieldDataCache.None(); private final CircuitBreakerService breakerService = new NoneCircuitBreakerService(); @@ -154,7 +155,7 @@ public TopDocs benchmark() throws IOException { private Query scriptScoreQuery(ScoreScript.Factory factory) { ScoreScript.LeafFactory leafFactory = factory.newFactory(Map.of(), lookup); - return new ScriptScoreQuery(new MatchAllDocsQuery(), null, leafFactory, null, "test", 0, Version.CURRENT); + return new ScriptScoreQuery(new MatchAllDocsQuery(), null, leafFactory, lookup, null, "test", 0, Version.CURRENT); } private ScoreScript.Factory bareMetalScript() { @@ -163,9 +164,9 @@ private ScoreScript.Factory bareMetalScript() { IndexNumericFieldData ifd = (IndexNumericFieldData) lookup.getForField(type); return new ScoreScript.LeafFactory() { @Override - public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - SortedNumericDocValues values = ifd.load(ctx).getLongValues(); - return new ScoreScript(params, lookup, ctx) { + public ScoreScript newInstance(DocReader docReader) throws IOException { + SortedNumericDocValues values = ifd.load(((DocValuesDocReader) docReader).getLeafReaderContext()).getLongValues(); + return new ScoreScript(params, null, docReader) { private int docId; @Override diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java index d15c298fa689d..aa09ae4503095 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java @@ -154,7 +154,7 @@ private StringTerms newTerms(Random rand, BytesRef[] dict, boolean withNested) { true, 0, buckets, - 0 + null ); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java index 66bb638c69f01..a5786b75e05cf 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java @@ -70,7 +70,7 @@ private StringTerms newTerms(boolean withNested) { false, 100000, resultBuckets, - 0 + null ); } diff --git a/build-conventions/build.gradle b/build-conventions/build.gradle index 2d0989fc77d99..c6bfe397e58cf 100644 --- a/build-conventions/build.gradle +++ b/build-conventions/build.gradle @@ -41,9 +41,9 @@ gradlePlugin { id = 'elasticsearch.licensing' implementationClass = 'org.elasticsearch.gradle.internal.conventions.LicensingPlugin' } - basics { - id = 'elasticsearch.basic-build-tool-conventions' - implementationClass = 'org.elasticsearch.gradle.internal.conventions.BasicBuildToolConventionsPlugin' + buildTools { + id = 'elasticsearch.build-tools' + implementationClass = 'org.elasticsearch.gradle.internal.conventions.BuildToolsConventionsPlugin' } } } @@ -60,7 +60,7 @@ dependencies { compileOnly "com.puppycrawl.tools:checkstyle:8.42" } -project.getPlugins().withType(JavaBasePlugin.class, javaBasePlugin -> { +project.getPlugins().withType(JavaBasePlugin.class) { java.getModularity().getInferModulePath().set(false); eclipse.getClasspath().getFile().whenMerged { classpath -> /* @@ -69,9 +69,8 @@ project.getPlugins().withType(JavaBasePlugin.class, javaBasePlugin -> { * in the usual build folder because eclipse becomes *very* sad * if we delete it. Which `gradlew clean` does all the time. */ - int i = 0; - classpath.getEntries().stream().filter(e -> e instanceof SourceFolder).forEachOrdered(s -> - s.setOutput("out/eclipse"+i++) - ) + classpath.getEntries().findAll{ s -> s instanceof SourceFolder }.eachWithIndex { s, i -> + s.setOutput("out/eclipse" + i) + } } -}) +} diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BasicBuildToolConventionsPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BuildToolsConventionsPlugin.java similarity index 84% rename from build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BasicBuildToolConventionsPlugin.java rename to build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BuildToolsConventionsPlugin.java index 2c0f95e880a46..a204af47b9878 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BasicBuildToolConventionsPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/BuildToolsConventionsPlugin.java @@ -10,6 +10,7 @@ import org.elasticsearch.gradle.internal.conventions.info.ParallelDetector; import org.elasticsearch.gradle.internal.conventions.util.Util; +import org.elasticsearch.gradle.internal.conventions.precommit.LicenseHeadersPrecommitPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.tasks.bundling.Jar; @@ -17,10 +18,11 @@ import java.io.File; -public class BasicBuildToolConventionsPlugin implements Plugin { +public class BuildToolsConventionsPlugin implements Plugin { @Override public void apply(Project project) { + project.getPlugins().apply(LicenseHeadersPrecommitPlugin.class); int defaultParallel = ParallelDetector.findDefaultParallel(project); project.getTasks().withType(Test.class).configureEach(test -> { test.onlyIf((t) -> Util.getBooleanProperty("tests.fips.enabled", false) == false); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/EclipseConventionPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/EclipseConventionPlugin.java index f1b81b3915606..22b4516b15829 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/EclipseConventionPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/EclipseConventionPlugin.java @@ -131,8 +131,8 @@ public void execute(Delete delete) { } private File root(Project project) { - return project.getGradle().getParent() == null ? - project.getRootDir() : - root(project.getGradle().getParent().getRootProject()); + return project.getRootProject().getName().equals("elasticsearch") ? + project.getRootProject().getRootDir() : + project.getRootDir().getParentFile(); } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java index 5b10053e7df73..79b80ea76b874 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java @@ -18,35 +18,47 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.XmlProvider; +import org.gradle.api.file.ProjectLayout; import org.gradle.api.plugins.BasePlugin; -import org.gradle.api.plugins.BasePluginConvention; +import org.gradle.api.plugins.BasePluginExtension; +import org.gradle.api.plugins.ExtensionContainer; import org.gradle.api.plugins.JavaLibraryPlugin; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.provider.MapProperty; -import org.gradle.api.provider.Property; -import org.gradle.api.provider.Provider; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; import org.gradle.api.publish.maven.tasks.GenerateMavenPom; import org.gradle.api.tasks.SourceSet; -import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.bundling.Jar; +import org.gradle.initialization.layout.BuildLayout; import org.gradle.language.base.plugins.LifecycleBasePlugin; +import javax.inject.Inject; import java.io.File; import java.util.Map; import java.util.concurrent.Callable; public class PublishPlugin implements Plugin { + private ProjectLayout projectLayout; + private BuildLayout buildLayout; + private ProviderFactory providerFactory; + + @Inject + public PublishPlugin(ProjectLayout projectLayout, BuildLayout buildLayout, ProviderFactory providerFactory) { + this.projectLayout = projectLayout; + this.buildLayout = buildLayout; + this.providerFactory = providerFactory; + } + @Override public void apply(Project project) { project.getPluginManager().apply(BasePlugin.class); project.getPluginManager().apply(MavenPublishPlugin.class); project.getPluginManager().apply(PomValidationPrecommitPlugin.class); project.getPluginManager().apply(LicensingPlugin.class); - configureJavadocJar(project); configureSourcesJar(project); configurePomGeneration(project); @@ -56,7 +68,6 @@ public void apply(Project project) { private void configurePublications(Project project) { var publishingExtension = project.getExtensions().getByType(PublishingExtension.class); var publication = publishingExtension.getPublications().create("elastic", MavenPublication.class); - project.afterEvaluate(project1 -> { if (project1.getPlugins().hasPlugin(ShadowPlugin.class)) { configureWithShadowPlugin(project1, publication); @@ -64,11 +75,11 @@ private void configurePublications(Project project) { publication.from(project.getComponents().getByName("java")); } }); + var projectLicenses = (MapProperty) project.getExtensions().getExtraProperties().get("projectLicenses"); publication.getPom().withXml(xml -> { var node = xml.asNode(); node.appendNode("inceptionYear", "2009"); var licensesNode = node.appendNode("licenses"); - var projectLicenses = (MapProperty) project.getExtensions().getExtraProperties().get("projectLicenses"); projectLicenses.get().entrySet().stream().sorted(Map.Entry.comparingByKey()).forEach(entry -> { Node license = licensesNode.appendNode("license"); license.appendNode("name", entry.getKey()); @@ -81,58 +92,59 @@ private void configurePublications(Project project) { }); publishingExtension.getRepositories().maven(mavenArtifactRepository -> { mavenArtifactRepository.setName("test"); - mavenArtifactRepository.setUrl(new File(project.getRootProject().getBuildDir(), "local-test-repo")); + mavenArtifactRepository.setUrl(new File(buildLayout.getRootDirectory(), "build/local-test-repo")); }); } - private static String getArchivesBaseName(Project project) { - return project.getConvention().getPlugin(BasePluginConvention.class).getArchivesBaseName(); + private static String getArchivesBaseName(ExtensionContainer extensions) { + return extensions.getByType(BasePluginExtension.class).getArchivesName().get(); } /** * Configuration generation of maven poms. */ - private static void configurePomGeneration(Project project) { - Property gitInfo = project.getRootProject().getPlugins().apply(GitInfoPlugin.class).getGitInfo(); - + private void configurePomGeneration(Project project) { + var gitInfo = project.getRootProject().getPlugins().apply(GitInfoPlugin.class).getGitInfo(); var generatePomTask = project.getTasks().register("generatePom"); project.getTasks().named(LifecycleBasePlugin.ASSEMBLE_TASK_NAME).configure(assemble -> assemble.dependsOn(generatePomTask)); - project.getTasks() - .withType(GenerateMavenPom.class) - .configureEach( - pomTask -> pomTask.setDestination( - (Callable) () -> String.format( - "%s/distributions/%s-%s.pom", - project.getBuildDir(), - getArchivesBaseName(project), - project.getVersion() - ) - ) - ); - var publishing = project.getExtensions().getByType(PublishingExtension.class); + var extensions = project.getExtensions(); + var archivesBaseName = providerFactory.provider(() -> getArchivesBaseName(extensions)); + var projectVersion = providerFactory.provider(() -> project.getVersion()); + var generateMavenPoms = project.getTasks().withType(GenerateMavenPom.class); + generateMavenPoms.configureEach( + pomTask -> pomTask.setDestination( + (Callable) () -> String.format( + "%s/distributions/%s-%s.pom", + projectLayout.getBuildDirectory().get().getAsFile().getPath(), + archivesBaseName.get(), + projectVersion.get() + ) + ) + ); + var publishing = extensions.getByType(PublishingExtension.class); final var mavenPublications = publishing.getPublications().withType(MavenPublication.class); addNameAndDescriptiontoPom(project, mavenPublications); - - mavenPublications.all(publication -> { + mavenPublications.configureEach(publication -> { // Add git origin info to generated POM files for internal builds - publication.getPom().withXml((xmlProvider) -> addScmInfo(xmlProvider, gitInfo.get())); + publication.getPom().withXml(xml -> addScmInfo(xml, gitInfo.get())); // have to defer this until archivesBaseName is set - project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); - generatePomTask.configure(t -> t.dependsOn(project.getTasks().withType(GenerateMavenPom.class))); + project.afterEvaluate(p -> publication.setArtifactId(archivesBaseName.get())); + generatePomTask.configure(t -> t.dependsOn(generateMavenPoms)); }); } - private static void addNameAndDescriptiontoPom(Project project, NamedDomainObjectSet mavenPublications) { - mavenPublications.all(p -> p.getPom().withXml(xml -> { + private void addNameAndDescriptiontoPom(Project project, NamedDomainObjectSet mavenPublications) { + var name = project.getName(); + var description = providerFactory.provider(() -> project.getDescription() != null ? project.getDescription() : ""); + mavenPublications.configureEach(p -> p.getPom().withXml(xml -> { var root = xml.asNode(); - root.appendNode("name", project.getName()); - var description = project.getDescription() != null ? project.getDescription() : ""; - root.appendNode("description", description); + root.appendNode("name", name); + root.appendNode("description", description.get()); })); } private static void configureWithShadowPlugin(Project project, MavenPublication publication) { - ShadowExtension shadow = project.getExtensions().getByType(ShadowExtension.class); + var shadow = project.getExtensions().getByType(ShadowExtension.class); shadow.component(publication); } @@ -161,7 +173,7 @@ private static void configureJavadocJar(Project project) { static void configureSourcesJar(Project project) { project.getPlugins().withType(JavaLibraryPlugin.class, p -> { - TaskProvider sourcesJarTask = project.getTasks().register("sourcesJar", Jar.class); + var sourcesJarTask = project.getTasks().register("sourcesJar", Jar.class); sourcesJarTask.configure(jar -> { jar.getArchiveClassifier().set("sources"); jar.setGroup("build"); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/VersionPropertiesLoader.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/VersionPropertiesLoader.java index c7307ce98f65d..0b437abd4cb45 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/VersionPropertiesLoader.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/VersionPropertiesLoader.java @@ -9,6 +9,8 @@ package org.elasticsearch.gradle.internal.conventions; +import org.gradle.api.provider.ProviderFactory; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -17,7 +19,7 @@ // Define this here because we need it early. public class VersionPropertiesLoader { - static Properties loadBuildSrcVersion(File input) throws IOException { + static Properties loadBuildSrcVersion(File input, ProviderFactory providerFactory) throws IOException { Properties props = new Properties(); InputStream is = new FileInputStream(input); try { @@ -25,11 +27,11 @@ static Properties loadBuildSrcVersion(File input) throws IOException { } finally { is.close(); } - loadBuildSrcVersion(props, System.getProperties()); + loadBuildSrcVersion(props, providerFactory); return props; } - protected static void loadBuildSrcVersion(Properties loadedProps, Properties systemProperties) { + protected static void loadBuildSrcVersion(Properties loadedProps, ProviderFactory providers) { String elasticsearch = loadedProps.getProperty("elasticsearch"); if (elasticsearch == null) { throw new IllegalStateException("Elasticsearch version is missing from properties."); @@ -40,14 +42,20 @@ protected static void loadBuildSrcVersion(Properties loadedProps, Properties sys elasticsearch ); } - String qualifier = systemProperties.getProperty("build.version_qualifier", ""); + String qualifier = providers.systemProperty("build.version_qualifier") + .orElse("") + .forUseAtConfigurationTime() + .get(); if (qualifier.isEmpty() == false) { if (qualifier.matches("(alpha|beta|rc)\\d+") == false) { throw new IllegalStateException("Invalid qualifier: " + qualifier); } elasticsearch += "-" + qualifier; } - final String buildSnapshotSystemProperty = systemProperties.getProperty("build.snapshot", "true"); + final String buildSnapshotSystemProperty = providers.systemProperty("build.snapshot") + .orElse("true") + .forUseAtConfigurationTime() + .get(); switch (buildSnapshotSystemProperty) { case "true": elasticsearch += "-SNAPSHOT"; diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/ParallelDetector.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/ParallelDetector.java index c4c0cb8f46d4c..7359d1728b96c 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/ParallelDetector.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/ParallelDetector.java @@ -9,6 +9,7 @@ package org.elasticsearch.gradle.internal.conventions.info; import org.gradle.api.Project; +import org.gradle.api.provider.ProviderFactory; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; @@ -58,7 +59,7 @@ public static int findDefaultParallel(Project project) { throw new UncheckedIOException(e); } _defaultParallel = socketToCore.values().stream().mapToInt(i -> i).sum(); - } else if (isMac()) { + } else if (isMac(project.getProviders())) { // Ask macOS to count physical CPUs for us ByteArrayOutputStream stdout = new ByteArrayOutputStream(); project.exec(spec -> { @@ -76,8 +77,8 @@ public static int findDefaultParallel(Project project) { return _defaultParallel; } - private static boolean isMac() { - return System.getProperty("os.name", "").startsWith("Mac"); + private static boolean isMac(ProviderFactory providers) { + return providers.systemProperty("os.name").forUseAtConfigurationTime().getOrElse("").startsWith("Mac"); } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersPrecommitPlugin.java index e93c6bab74f04..11dca2953b245 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersPrecommitPlugin.java @@ -11,7 +11,7 @@ import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskProvider; @@ -29,7 +29,7 @@ public LicenseHeadersPrecommitPlugin(ProviderFactory providerFactory) { public TaskProvider createTask(Project project) { return project.getTasks().register("licenseHeaders", LicenseHeadersTask.class, licenseHeadersTask -> { project.getPlugins().withType(JavaBasePlugin.class, javaBasePlugin -> { - final SourceSetContainer sourceSets = project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); + final SourceSetContainer sourceSets = project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); licenseHeadersTask.getSourceFolders() .addAll(providerFactory.provider(() -> sourceSets.stream().map(s -> s.getAllJava()).collect(Collectors.toList()))); }); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationPrecommitPlugin.java index 0f7de8573e3cc..cf70cce6e166b 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationPrecommitPlugin.java @@ -24,7 +24,7 @@ public class PomValidationPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { TaskProvider validatePom = project.getTasks().register("validatePom"); PublishingExtension publishing = project.getExtensions().getByType(PublishingExtension.class); - publishing.getPublications().all(publication -> { + publishing.getPublications().configureEach(publication -> { String publicationName = GUtils.capitalize(publication.getName()); TaskProvider validateTask = project.getTasks() .register("validate" + publicationName + "Pom", PomValidationTask.class); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitPlugin.java index 6bf3cc5e93b95..e59ef2bd2b931 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitPlugin.java @@ -11,7 +11,7 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.TaskProvider; /** @@ -29,7 +29,7 @@ public final void apply(Project project) { precommit.configure(t -> t.dependsOn(task)); project.getPluginManager().withPlugin("java", p -> { // We want to get any compilation error before running the pre-commit checks. - project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().all(sourceSet -> + project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets().all(sourceSet -> task.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName())) ); }); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java index 0493b30149bf3..f6a5db279792c 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java @@ -12,7 +12,7 @@ import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; import org.gradle.language.base.plugins.LifecycleBasePlugin; @@ -33,7 +33,7 @@ public void apply(Project project) { ); project.getPluginManager().withPlugin("java", p -> { // run compilation as part of precommit - project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().all(sourceSet -> + project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets().all(sourceSet -> precommit.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName())) ); // make sure tests run after all precommit tasks diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java index 3c90df54c0db5..05e4624d360d1 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java @@ -12,14 +12,12 @@ import org.gradle.api.GradleException; import org.gradle.api.Project; import org.gradle.api.file.FileTree; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.util.PatternFilterable; import javax.annotation.Nullable; -import java.net.URI; -import java.net.URISyntaxException; import java.util.Optional; import java.util.function.Supplier; @@ -85,7 +83,7 @@ public static FileTree getJavaTestAndMainSourceResources(Project project, Action * @return An Optional that contains the Java test SourceSet if it exists. */ public static Optional getJavaTestSourceSet(Project project) { - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return project.getExtensions().findByName("java") == null ? Optional.empty() : Optional.ofNullable(getJavaSourceSets(project).findByName(SourceSet.TEST_SOURCE_SET_NAME)); } @@ -95,11 +93,15 @@ public static Optional getJavaTestSourceSet(Project project) { * @return An Optional that contains the Java main SourceSet if it exists. */ public static Optional getJavaMainSourceSet(Project project) { - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return isJavaExtensionAvailable(project) ? Optional.empty() : Optional.ofNullable(getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME)); } + private static boolean isJavaExtensionAvailable(Project project) { + return project.getExtensions().getByType(JavaPluginExtension.class) == null; + } + public static Object toStringable(Supplier getter) { return new Object() { @@ -111,7 +113,7 @@ public String toString() { } public static SourceSetContainer getJavaSourceSets(Project project) { - return project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); + return project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); } } diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 03c01a347e452..362e5f2d4bc23 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -17,8 +17,7 @@ plugins { id 'java-gradle-plugin' id 'groovy-gradle-plugin' id 'groovy' - id 'elasticsearch.internal-licenseheaders' - id 'elasticsearch.basic-build-tool-conventions' + id 'elasticsearch.build-tools' id 'elasticsearch.eclipse' } @@ -27,7 +26,7 @@ group = 'org.elasticsearch.gradle' // we update the version property to reflect if we are building a snapshot or a release build // we write this back out below to load it in the Build.java which will be shown in rest main action // to indicate this being a snapshot build or a release build. -Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('version.properties')) +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('version.properties'), project.getProviders()) version = props.getProperty("elasticsearch") gradlePlugin { @@ -111,6 +110,10 @@ gradlePlugin { id = 'elasticsearch.jdk-download' implementationClass = 'org.elasticsearch.gradle.internal.JdkDownloadPlugin' } + releaseTools { + id = 'elasticsearch.release-tools' + implementationClass = 'org.elasticsearch.gradle.internal.release.ReleaseToolsPlugin' + } repositories { id = 'elasticsearch.repositories' implementationClass = 'org.elasticsearch.gradle.internal.RepositoriesSetupPlugin' @@ -166,14 +169,14 @@ gradlePlugin { * Java version * *****************************************************************************/ -if (JavaVersion.current() < JavaVersion.VERSION_11) { - throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') -} - def minCompilerJava = JavaVersion.toVersion(file('src/main/resources/minimumCompilerVersion').text) targetCompatibility = minCompilerJava sourceCompatibility = minCompilerJava +if (JavaVersion.current() < JavaVersion.toVersion(minCompilerJava)) { + throw new GradleException("Java ${minCompilerJava} is required to build Elasticsearch but current Java is version ${JavaVersion.current()}.") +} + sourceSets { integTest { compileClasspath += sourceSets["main"].output + configurations["testRuntimeClasspath"] @@ -206,7 +209,7 @@ dependencies { api gradleApi() api "org.elasticsearch:build-conventions:$version" - api "org.elasticsearch:build-tools:$version" + api "org.elasticsearch.gradle:build-tools:$version" api 'commons-codec:commons-codec:1.12' api 'org.apache.commons:commons-compress:1.19' @@ -229,13 +232,14 @@ dependencies { api "org.apache.httpcomponents:httpclient:${props.getProperty('httpclient')}" api "org.apache.httpcomponents:httpcore:${props.getProperty('httpcore')}" compileOnly "com.puppycrawl.tools:checkstyle:${props.getProperty('checkstyle')}" + runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation "com.puppycrawl.tools:checkstyle:${props.getProperty('checkstyle')}" testImplementation "junit:junit:${props.getProperty('junit')}" testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.23.2' testImplementation 'org.mockito:mockito-core:1.9.5' testImplementation "org.hamcrest:hamcrest:${props.getProperty('hamcrest')}" - testImplementation testFixtures("org.elasticsearch:build-tools:$version") + testImplementation testFixtures("org.elasticsearch.gradle:build-tools:$version") integTestImplementation(platform("org.junit:junit-bom:${props.getProperty('junit5')}")) integTestImplementation("org.junit.jupiter:junit-jupiter") { diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000000000..a211486a47110 --- /dev/null +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.1.1-all.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionSha256Sum=9bb8bc05f562f2d42bdf1ba8db62f6b6fa1c3bf6c392228802cc7cb0578fe7e0 diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy index 5372368fbd629..282f6b88ed2b4 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy @@ -11,9 +11,9 @@ package org.elasticsearch.gradle.fixtures; abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { - void setupRestResources(List apis, List tests = [], List xpackTests = []) { + def setup() { addSubProject(":test:framework") << "apply plugin: 'elasticsearch.java'" - addSubProject(":distribution:archives:integ-test-zip") << "configurations { extracted }" + addSubProject(":rest-api-spec") << """ configurations { restSpecs\nrestTests } artifacts { @@ -21,6 +21,7 @@ abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } """ + addSubProject(":x-pack:plugin") << """ configurations { restXpackSpecs\nrestXpackTests } artifacts { @@ -28,6 +29,11 @@ abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { } """ + addSubProject(":distribution:archives:integ-test-zip") << "configurations { extracted }" + } + + void setupRestResources(List apis, List tests = [], List xpackTests = []) { + xpackTests.each { test -> file("x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index 9b9a9e9b70642..0b0ecb3d1ad5b 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -17,6 +17,10 @@ import spock.lang.IgnoreRest class PublishPluginFuncTest extends AbstractGradleFuncTest { + def setup() { + // required for JarHell to work + addSubProject(":libs:elasticsearch-core") << "apply plugin:'java'" + } def "artifacts and tweaked pom is published"() { given: buildFile << """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/RestResourcesPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/RestResourcesPluginFuncTest.groovy index b69cd7312efa7..6db4a437a0296 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/RestResourcesPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/RestResourcesPluginFuncTest.groovy @@ -116,7 +116,7 @@ class RestResourcesPluginFuncTest extends AbstractRestResourcesFuncTest { result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE file("/build/restResources/yamlSpecs/rest-api-spec/api/" + apiFoo).exists() file("/build/restResources/yamlSpecs/rest-api-spec/api/" + apiXpackFoo).exists() - file("/build/restResources/yamlSpecs/rest-api-spec/api/" + apiBar).exists() ==false + file("/build/restResources/yamlSpecs/rest-api-spec/api/" + apiBar).exists() == false file("/build/restResources/yamlSpecs/rest-api-spec/api/" + apiXpackBar).exists() == false } @@ -136,6 +136,10 @@ class RestResourcesPluginFuncTest extends AbstractRestResourcesFuncTest { includeXpack 'bar' } } + + tasks.named("copyYamlTestsTask").configure { + it.substitutions = [ 'replacedValue' : 'replacedWithValue' ] + } """ String apiCore1 = "foo1.json" String apiCore2 = "foo2.json" @@ -143,6 +147,10 @@ class RestResourcesPluginFuncTest extends AbstractRestResourcesFuncTest { String coreTest = "foo/10_basic.yml" String xpackTest = "bar/10_basic.yml" setupRestResources([apiCore1, apiCore2, apiXpack], [coreTest], [xpackTest]) + + // drop a value to replace from expansions above into a test file + file("rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/" + coreTest) << "@replacedValue@" + // intentionally not adding tests to project, they will be copied over via the plugin // this tests that the test copy happens before the api copy since the api copy will only trigger if there are tests in the project @@ -158,6 +166,9 @@ class RestResourcesPluginFuncTest extends AbstractRestResourcesFuncTest { file("/build/restResources/yamlTests/rest-api-spec/test/" + coreTest).exists() file("/build/restResources/yamlTests/rest-api-spec/test/" + xpackTest).exists() + // confirm that replacement happened + file("/build/restResources/yamlTests/rest-api-spec/test/" + coreTest).getText("UTF-8") == "replacedWithValue" + when: result = gradleRunner("copyRestApiSpecsTask").build() diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy index d176f7897a681..9443590a1e432 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy @@ -44,7 +44,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { """ when: - def result = gradleRunner("yamlRestCompatTest").build() + def result = gradleRunner("yamlRestCompatTest", '--stacktrace').build() then: result.task(':yamlRestCompatTest').outcome == TaskOutcome.NO_SOURCE @@ -209,8 +209,11 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { task.replaceIsTrue("value_to_replace", "replaced_value") task.replaceIsFalse("value_to_replace", "replaced_value") task.replaceKeyInDo("do_.some.key_to_replace", "do_.some.key_that_was_replaced") + task.replaceKeyInDo("do_.some.key_to_replace_in_two", "do_.some.key_that_was_replaced_in_two", "two") task.replaceKeyInMatch("match_.some.key_to_replace", "match_.some.key_that_was_replaced") task.replaceKeyInLength("key.in_length_to_replace", "key.in_length_that_was_replaced") + task.replaceValueTextByKeyValue("keyvalue", "toreplace", "replacedkeyvalue") + task.replaceValueTextByKeyValue("index", "test", "test2", "two") }) // can't actually spin up test cluster from this test tasks.withType(Test).configureEach{ enabled = false } @@ -224,6 +227,9 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { do_.some.key_to_replace: index: test id: 1 + keyvalue : toreplace + do_.some.key_to_replace_in_two: + no_change_here: "because it's not in test 'two'" warnings: - "warning to remove" - match: { _source.values: ["foo"] } @@ -242,6 +248,8 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { get: index: test id: 1 + do_.some.key_to_replace_in_two: + changed_here: "because it is in test 'two'" - match: { _source.values: ["foo"] } - match: { _type: "_foo" } - match: { _source.blah: 1234 } @@ -250,7 +258,12 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - is_false: "value_to_replace" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" - + --- + "use cat with no header": + - do: + cat.indices: + {} + - match: {} """.stripIndent() when: def result = gradleRunner("yamlRestCompatTest").build() @@ -279,6 +292,9 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { do_.some.key_that_was_replaced: index: "test" id: 1 + keyvalue : replacedkeyvalue + do_.some.key_to_replace_in_two: + no_change_here: "because it's not in test 'two'" warnings: - "warning1" - "warning2" @@ -315,8 +331,10 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { two: - do: get: - index: "test" + index: "test2" id: 1 + do_.some.key_that_was_replaced_in_two: + changed_here: "because it is in test 'two'" headers: Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" Accept: "application/vnd.elasticsearch+json;compatible-with=7" @@ -337,6 +355,16 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - is_false: "replaced_value" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" + --- + "use cat with no header": + - do: + cat.indices: + {} + allowed_warnings: + - "added allowed warning" + allowed_warnings_regex: + - "added allowed warning regex .* [0-9]" + - match: {} """.stripIndent()).readAll() expectedAll.eachWithIndex{ ObjectNode expected, int i -> diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 8db97612c2a35..c819bcc4beff4 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -22,7 +22,7 @@ buildScan { String buildUrl = System.getenv('BUILD_URL') String jobName = System.getenv('JOB_NAME') String nodeName = System.getenv('NODE_NAME') - String jobBranch = System.getenv('JOB_BRANCH') + String jobBranch = System.getenv('ghprbTargetBranch') ?: System.getenv('JOB_BRANCH') tag OS.current().name() tag Architecture.current().name() diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index 78d55797e5f63..6ebf9cb0a7250 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -34,5 +34,4 @@ tasks.withType(Test).configureEach { } tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } - tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 8af190367beb9..41288f627dcbc 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -9,6 +9,7 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.TestDistribution +import org.elasticsearch.gradle.testclusters.TestClustersAware // Common config when running with a FIPS-140 runtime JVM if (BuildParams.inFipsJvm) { @@ -51,6 +52,9 @@ if (BuildParams.inFipsJvm) { } } } + tasks.withType(TestClustersAware) { + dependsOn 'fipsResources' + } testClusters.all { setTestDistribution(TestDistribution.DEFAULT) extraConfigFile "fips_java.security", fipsSecurity @@ -72,7 +76,7 @@ if (BuildParams.inFipsJvm) { } } project.tasks.withType(Test).configureEach { Test task -> - task.dependsOn('fipsResources') + dependsOn 'fipsResources' task.systemProperty('javax.net.ssl.trustStorePassword', 'password') task.systemProperty('javax.net.ssl.keyStorePassword', 'password') task.systemProperty('javax.net.ssl.trustStoreType', 'BCFKS') diff --git a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle index 5aedded4897a9..709b2033fd7a0 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle @@ -218,7 +218,7 @@ subprojects { target 'src/**/*.java' } - toggleOffOn('formatter:off', 'formatter:on') // use `formatter:off` and `formatter:on` to toggle formatting - ONLY IF STRICTLY NECESSARY + toggleOffOn('@formatter:off', '@formatter:on') // use `@formatter:off` and `@formatter:on` to toggle formatting - ONLY IF STRICTLY NECESSARY removeUnusedImports() importOrderFile rootProject.file('build-tools-internal/elastic.importorder') eclipse().configFile rootProject.file('build-tools-internal/formatterConfig.xml') @@ -235,7 +235,7 @@ subprojects { // The `paddedCell()` option is disabled for normal operation so that any // misbehaviour is detected, and not just suppressed. You can enabled the // option from the command line by running Gradle with `-Dspotless.paddedcell`. - if (System.getProperty('spotless.paddedcell') != null) { + if (providers.systemProperty('spotless.paddedcell').forUseAtConfigurationTime().isPresent()) { paddedCell() } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index a4f9242747e52..a74347b96d820 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -69,7 +69,7 @@ tasks.register('configureIdeCheckstyle') { } // Applying this stuff, particularly the idea-ext plugin, has a cost so avoid it unless we're running in the IDE -if (System.getProperty('idea.active') == 'true') { +if (providers.systemProperty('idea.active').forUseAtConfigurationTime().getOrNull() == 'true') { project.apply(plugin: org.jetbrains.gradle.ext.IdeaExtPlugin) tasks.register('configureIdeaGradleJvm') { @@ -118,7 +118,19 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + vmParameters = [ + '-ea', + '-Djava.locale.providers=SPI,COMPAT', + "--illegal-access=deny", + // TODO: only open these for mockito when it is modularized + '--add-opens=java.base/java.security.cert=ALL-UNNAMED', + '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', + '--add-opens=java.base/java.net=ALL-UNNAMED', + '--add-opens=java.base/javax.net.ssl=ALL-UNNAMED', + '--add-opens=java.base/java.nio.file=ALL-UNNAMED', + '--add-opens=java.base/java.time=ALL-UNNAMED', + '--add-opens=java.base/java.lang=ALL-UNNAMED' + ].join(' ') } } copyright { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.run.gradle b/build-tools-internal/src/main/groovy/elasticsearch.run.gradle index 92953ebbe01c5..ceb01909ebd3a 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.run.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.run.gradle @@ -6,6 +6,7 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.testclusters.RunTask // gradle has an open issue of failing applying plugins in @@ -14,9 +15,9 @@ import org.elasticsearch.gradle.testclusters.RunTask testClusters { runTask { - testDistribution = System.getProperty('run.distribution', 'default') - if (System.getProperty('run.distribution', 'default') == 'default') { - String licenseType = System.getProperty("run.license_type", "basic") + testDistribution = providers.systemProperty('run.distribution').orElse('default').forUseAtConfigurationTime().get() + if (providers.systemProperty('run.distribution').orElse('default').forUseAtConfigurationTime().get() == 'default') { + String licenseType = providers.systemProperty("run.license_type").orElse("basic").forUseAtConfigurationTime().get() if (licenseType == 'trial') { setting 'xpack.ml.enabled', 'true' setting 'xpack.graph.enabled', 'true' @@ -28,7 +29,6 @@ testClusters { setting 'xpack.security.enabled', 'true' keystore 'bootstrap.password', 'password' user username: 'elastic-admin', password: 'elastic-password', role: 'superuser' - systemProperty 'es.shutdown_feature_flag_enabled', 'true' } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java index 776349f4cc1f7..1f29ae13822ba 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java @@ -44,7 +44,6 @@ public void apply(final Project project) { project.getPluginManager().apply(DependenciesGraphPlugin.class); InternalPrecommitTasks.create(project, true); - project.getPluginManager().apply(SplitPackagesAuditPrecommitPlugin.class); } public static void configureLicenseAndNotice(final Project project) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java new file mode 100644 index 0000000000000..0ee68adb699c4 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; +import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Action; +import org.gradle.api.JavaVersion; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.ResolutionStrategy; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.plugins.JavaPluginExtension; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.compile.AbstractCompile; +import org.gradle.api.tasks.compile.CompileOptions; +import org.gradle.api.tasks.compile.GroovyCompile; +import org.gradle.api.tasks.compile.JavaCompile; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; + + +/** + * A wrapper around Gradle's Java Base plugin that applies our + * common configuration for production code. + */ +public class ElasticsearchJavaBasePlugin implements Plugin { + @Override + public void apply(Project project) { + // make sure the global build info plugin is applied to the root project + project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); + // common repositories setup + project.getPluginManager().apply(JavaBasePlugin.class); + project.getPluginManager().apply(RepositoriesSetupPlugin.class); + project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); + project.getPluginManager().apply(PrecommitTaskPlugin.class); + + configureCompile(project); + configureInputNormalization(project); + + // convenience access to common versions used in dependencies + project.getExtensions().getExtraProperties().set("versions", VersionProperties.getVersions()); + } + + /** + * Adds compiler settings to the project + */ + public static void configureCompile(Project project) { + project.getExtensions().getExtraProperties().set("compactProfile", "full"); + + JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class); + java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); + java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); + + project.afterEvaluate(p -> { + project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> { + CompileOptions compileOptions = compileTask.getOptions(); + /* + * -path because gradle will send in paths that don't always exist. + * -missing because we have tons of missing @returns and @param. + * -serial because we don't use java serialization. + */ + // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) + // fail on all javac warnings. + // TODO Discuss moving compileOptions.getCompilerArgs() to use provider api with Gradle team. + List compilerArgs = compileOptions.getCompilerArgs(); + compilerArgs.add("-Werror"); + compilerArgs.add("-Xlint:all,-path,-serial,-options,-deprecation,-try"); + compilerArgs.add("-Xdoclint:all"); + compilerArgs.add("-Xdoclint:-missing"); + // either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly + // defined + if (compilerArgs.contains("-processor") == false) { + compilerArgs.add("-proc:none"); + } + + compileOptions.setEncoding("UTF-8"); + compileOptions.setIncremental(true); + // workaround for https://github.com/gradle/gradle/issues/14141 + compileTask.getConventionMapping().map("sourceCompatibility", () -> java.getSourceCompatibility().toString()); + compileTask.getConventionMapping().map("targetCompatibility", () -> java.getTargetCompatibility().toString()); + compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); + }); + // also apply release flag to groovy, which is used in build-tools + project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { + // TODO: this probably shouldn't apply to groovy at all? + compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); + }); + }); + } + + + /** + * Apply runtime classpath input normalization so that changes in JAR manifests don't break build cacheability + */ + public static void configureInputNormalization(Project project) { + project.getNormalization().getRuntimeClasspath().ignore("META-INF/MANIFEST.MF"); + } + + private static Provider releaseVersionProviderFromCompileTask(Project project, AbstractCompile compileTask) { + return project.provider(() -> { + JavaVersion javaVersion = JavaVersion.toVersion(compileTask.getTargetCompatibility()); + return Integer.parseInt(javaVersion.getMajorVersion()); + }); + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index 88d552d8812c4..fe845e2012c70 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -12,8 +12,6 @@ import nebula.plugin.info.InfoBrokerPlugin; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.info.BuildParams; -import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; -import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.elasticsearch.gradle.internal.conventions.util.Util; import org.gradle.api.Action; @@ -52,28 +50,20 @@ import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; /** - * A wrapper around Gradle's Java plugin that applies our common configuration. + * A wrapper around Gradle's Java plugin that applies our + * common configuration for production code. */ public class ElasticsearchJavaPlugin implements Plugin { @Override public void apply(Project project) { - // make sure the global build info plugin is applied to the root project - project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - // common repositories setup - project.getPluginManager().apply(RepositoriesSetupPlugin.class); + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(JavaLibraryPlugin.class); - project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); - project.getPluginManager().apply(PrecommitTaskPlugin.class); configureConfigurations(project); - configureCompile(project); - configureInputNormalization(project); configureJars(project); configureJarManifest(project); configureJavadoc(project); - - // convenience access to common versions used in dependencies - project.getExtensions().getExtraProperties().set("versions", VersionProperties.getVersions()); + testCompileOnlyDeps(project); } /** @@ -93,11 +83,6 @@ public void apply(Project project) { * to iterate the transitive dependencies and add excludes. */ public static void configureConfigurations(Project project) { - // we want to test compileOnly deps! - Configuration compileOnlyConfig = project.getConfigurations().getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME); - Configuration testImplementationConfig = project.getConfigurations().getByName(JavaPlugin.TEST_IMPLEMENTATION_CONFIGURATION_NAME); - testImplementationConfig.extendsFrom(compileOnlyConfig); - // we are not shipping these jars, we act like dumb consumers of these things if (project.getPath().startsWith(":test:fixtures") || project.getPath().equals(":build-tools")) { return; @@ -111,119 +96,57 @@ public static void configureConfigurations(Project project) { configuration.resolutionStrategy(ResolutionStrategy::failOnVersionConflict); }); - // force all dependencies added directly to compile/testImplementation to be non-transitive, except for ES itself - Consumer disableTransitiveDeps = configName -> { - Configuration config = project.getConfigurations().getByName(configName); - config.getDependencies().all(dep -> { - if (dep instanceof ModuleDependency - && dep instanceof ProjectDependency == false - && dep.getGroup().startsWith("org.elasticsearch") == false) { - ((ModuleDependency) dep).setTransitive(false); - } - }); - }; - // disable transitive dependency management SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); sourceSets.all(sourceSet -> disableTransitiveDependenciesForSourceSet(project, sourceSet)); } - /** - * Adds compiler settings to the project - */ - public static void configureCompile(Project project) { - project.getExtensions().getExtraProperties().set("compactProfile", "full"); - - JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class); - java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); - java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); - - project.afterEvaluate(p -> { - project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> { - CompileOptions compileOptions = compileTask.getOptions(); - /* - * -path because gradle will send in paths that don't always exist. - * -missing because we have tons of missing @returns and @param. - * -serial because we don't use java serialization. - */ - // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) - // fail on all javac warnings. - // TODO Discuss moving compileOptions.getCompilerArgs() to use provider api with Gradle team. - List compilerArgs = compileOptions.getCompilerArgs(); - compilerArgs.add("-Werror"); - compilerArgs.add("-Xlint:all,-path,-serial,-options,-deprecation,-try"); - compilerArgs.add("-Xdoclint:all"); - compilerArgs.add("-Xdoclint:-missing"); - // either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly - // defined - if (compilerArgs.contains("-processor") == false) { - compilerArgs.add("-proc:none"); - } - - compileOptions.setEncoding("UTF-8"); - compileOptions.setIncremental(true); - // workaround for https://github.com/gradle/gradle/issues/14141 - compileTask.getConventionMapping().map("sourceCompatibility", () -> java.getSourceCompatibility().toString()); - compileTask.getConventionMapping().map("targetCompatibility", () -> java.getTargetCompatibility().toString()); - compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); - }); - // also apply release flag to groovy, which is used in build-tools - project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { - // TODO: this probably shouldn't apply to groovy at all? - compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); - }); - }); - } - - private static Provider releaseVersionProviderFromCompileTask(Project project, AbstractCompile compileTask) { - return project.provider(() -> { - JavaVersion javaVersion = JavaVersion.toVersion(compileTask.getTargetCompatibility()); - return Integer.parseInt(javaVersion.getMajorVersion()); - }); - } - - /** - * Apply runtime classpath input normalization so that changes in JAR manifests don't break build cacheability - */ - public static void configureInputNormalization(Project project) { - project.getNormalization().getRuntimeClasspath().ignore("META-INF/MANIFEST.MF"); + private static void testCompileOnlyDeps(Project project) { + // we want to test compileOnly deps! + Configuration compileOnlyConfig = project.getConfigurations().getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME); + Configuration testImplementationConfig = project.getConfigurations().getByName(JavaPlugin.TEST_IMPLEMENTATION_CONFIGURATION_NAME); + testImplementationConfig.extendsFrom(compileOnlyConfig); } /** * Adds additional manifest info to jars */ static void configureJars(Project project) { - project.getTasks().withType(Jar.class).configureEach(jarTask -> { - // we put all our distributable files under distributions - jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); - // fixup the jar manifest - // Explicitly using an Action interface as java lambdas - // are not supported by Gradle up-to-date checks - jarTask.doFirst(new Action() { - @Override - public void execute(Task task) { - // this doFirst is added before the info plugin, therefore it will run - // after the doFirst added by the info plugin, and we can override attributes - jarTask.getManifest() - .attributes( - Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion()) - ); - } - }); - }); + project.getTasks().withType(Jar.class).configureEach( + jarTask -> { + // we put all our distributable files under distributions + jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); + // fixup the jar manifest + // Explicitly using an Action interface as java lambdas + // are not supported by Gradle up-to-date checks + jarTask.doFirst(new Action() { + @Override + public void execute(Task task) { + // this doFirst is added before the info plugin, therefore it will run + // after the doFirst added by the info plugin, and we can override attributes + jarTask.getManifest() + .attributes( + Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion() + ) + ); + } + }); + } + ); project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { project.getTasks().withType(ShadowJar.class).configureEach(shadowJar -> { - /* - * Replace the default "-all" classifier with null - * which will leave the classifier off of the file name. - */ - shadowJar.getArchiveClassifier().set((String) null); - /* - * Not all cases need service files merged but it is - * better to be safe - */ - shadowJar.mergeServiceFiles(); - }); + /* + * Replace the default "-all" classifier with null + * which will leave the classifier off of the file name. + */ + shadowJar.getArchiveClassifier().set((String) null); + /* + * Not all cases need service files merged but it is + * better to be safe + */ + shadowJar.mergeServiceFiles(); + } + ); // Add "original" classifier to the non-shadowed JAR to distinguish it from the shadow JAR project.getTasks().named(JavaPlugin.JAR_TASK_NAME, Jar.class).configure(jar -> jar.getArchiveClassifier().set("original")); // Make sure we assemble the shadow jar @@ -273,6 +196,7 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S Stream.of( sourceSet.getApiConfigurationName(), sourceSet.getImplementationConfigurationName(), + sourceSet.getImplementationConfigurationName(), sourceSet.getCompileOnlyConfigurationName(), sourceSet.getRuntimeOnlyConfigurationName() ) @@ -280,4 +204,5 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S .filter(Objects::nonNull) .forEach(GradleUtils::disableTransitiveDependencies); } + } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 419f35fc7a91e..78fe3412c4a6f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -11,7 +11,8 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.internal.test.SimpleCommandLineArgumentProvider; -import org.elasticsearch.gradle.internal.test.SystemPropertyCommandLineArgumentProvider; +import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; @@ -22,6 +23,7 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.FileCollection; +import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.testing.Test; @@ -39,6 +41,7 @@ public class ElasticsearchTestBasePlugin implements Plugin { @Override public void apply(Project project) { + project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); // for fips mode check project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); // Default test task should run only unit tests @@ -117,12 +120,8 @@ public void execute(Task t) { Map sysprops = Map.of( "java.awt.headless", "true", - "tests.gradle", - "true", "tests.artifact", project.getName(), - "tests.task", - test.getPath(), "tests.security.manager", "true", "jna.nosys", @@ -138,14 +137,8 @@ public void execute(Task t) { } // don't track these as inputs since they contain absolute paths and break cache relocatability - File gradleHome = project.getGradle().getGradleUserHomeDir(); - String gradleVersion = project.getGradle().getGradleVersion(); - nonInputProperties.systemProperty("gradle.dist.lib", new File(project.getGradle().getGradleHomeDir(), "lib")); - nonInputProperties.systemProperty( - "gradle.worker.jar", - gradleHome + "/caches/" + gradleVersion + "/workerMain/gradle-worker.jar" - ); - nonInputProperties.systemProperty("gradle.user.home", gradleHome); + File gradleUserHome = project.getGradle().getGradleUserHomeDir(); + nonInputProperties.systemProperty("gradle.user.home", gradleUserHome); // we use 'temp' relative to CWD since this is per JVM and tests are forbidden from writing to CWD nonInputProperties.systemProperty("java.io.tmpdir", test.getWorkingDir().toPath().resolve("temp")); @@ -187,17 +180,17 @@ public void execute(Task t) { * compiled class output and dependency jars. This better emulates the runtime environment of consumers. */ project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { - // Remove output class files and any other dependencies from the test classpath, since the shadow JAR includes these - FileCollection mainRuntime = project.getExtensions() - .getByType(SourceSetContainer.class) - .getByName(SourceSet.MAIN_SOURCE_SET_NAME) - .getRuntimeClasspath(); - // Add any "shadow" dependencies. These are dependencies that are *not* bundled into the shadow JAR - Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.getCONFIGURATION_NAME()); - // Add the shadow JAR artifact itself - FileCollection shadowJar = project.files(project.getTasks().named("shadowJar")); - - test.setClasspath(test.getClasspath().minus(mainRuntime).plus(shadowConfig).plus(shadowJar)); + if (test.getName().equals(JavaPlugin.TEST_TASK_NAME)) { + // Remove output class files and any other dependencies from the test classpath, since the shadow JAR includes these + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + FileCollection mainRuntime = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME).getRuntimeClasspath(); + // Add any "shadow" dependencies. These are dependencies that are *not* bundled into the shadow JAR + Configuration shadowConfig = project.getConfigurations().getByName(ShadowBasePlugin.getCONFIGURATION_NAME()); + // Add the shadow JAR artifact itself + FileCollection shadowJar = project.files(project.getTasks().named("shadowJar")); + FileCollection testRuntime = sourceSets.getByName(SourceSet.TEST_SOURCE_SET_NAME).getRuntimeClasspath(); + test.setClasspath(testRuntime.minus(mainRuntime).plus(shadowConfig).plus(shadowJar)); + } }); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index a5c69de5c801c..b5b3891abcb95 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -39,8 +39,6 @@ */ public class InternalDistributionDownloadPlugin implements InternalPlugin { - private BwcVersions bwcVersions = null; - @Override public void apply(Project project) { // this is needed for isInternal @@ -54,7 +52,6 @@ public void apply(Project project) { distributionDownloadPlugin.setDockerAvailability( dockerSupport.map(dockerSupportService -> dockerSupportService.getDockerAvailability().isAvailable) ); - this.bwcVersions = BuildParams.getBwcVersions(); registerInternalDistributionResolutions(DistributionDownloadPlugin.getRegistrationsContainer(project)); } @@ -78,7 +75,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< })); resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { - BwcVersions.UnreleasedVersionInfo unreleasedInfo = bwcVersions.unreleasedInfo(Version.fromString(distribution.getVersion())); + BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions().unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { if (distribution.getBundledJdk() == false) { throw new GradleException( @@ -109,7 +106,6 @@ private static String getProjectConfig(ElasticsearchDistribution distribution, B } else { return distributionProjectName; } - } private static String distributionProjectPath(ElasticsearchDistribution distribution) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MavenFilteringHack.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MavenFilteringHack.java deleted file mode 100644 index c6f5fed322e5b..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MavenFilteringHack.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal; - -import java.util.LinkedHashMap; -import java.util.Map; - -import org.apache.tools.ant.filters.ReplaceTokens; -import org.gradle.api.file.CopySpec; - -/** - * Gradle provides "expansion" functionality using groovy's SimpleTemplatingEngine (TODO: check name). - * However, it allows substitutions of the form {@code $foo} (no curlies). Rest tests provide - * some substitution from the test runner, which this form is used for. - * - * This class provides a helper to do maven filtering, where only the form {@code $\{foo\}} is supported. - * - * TODO: we should get rid of this hack, and make the rest tests use some other identifier - * for builtin vars - */ -public class MavenFilteringHack { - /** - * Adds a filter to the given copy spec that will substitute maven variables. - * - */ - static void filter(CopySpec copySpec, Map substitutions) { - Map mavenSubstitutions = new LinkedHashMap<>(); - Map argMap = new LinkedHashMap<>(); - - substitutions.forEach((k, v) -> mavenSubstitutions.put("{" + k.toString(), v.toString())); - - argMap.put("tokens", mavenSubstitutions); - argMap.put("beginToken", "$"); - argMap.put("endToken", "}"); - - copySpec.filter(argMap, ReplaceTokens.class); - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java index 7a36bc7e73688..f3951b2b45d05 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java @@ -9,6 +9,7 @@ import org.elasticsearch.gradle.internal.BwcVersions; import org.gradle.api.JavaVersion; +import org.gradle.api.provider.Provider; import java.io.File; import java.lang.reflect.Modifier; @@ -34,10 +35,9 @@ public class BuildParams { private static ZonedDateTime buildDate; private static String testSeed; private static Boolean isCi; - private static Boolean isInternal; private static Integer defaultParallel; private static Boolean isSnapshotBuild; - private static BwcVersions bwcVersions; + private static Provider bwcVersions; /** * Initialize global build parameters. This method accepts and a initialization function which in turn accepts a @@ -100,7 +100,7 @@ public static ZonedDateTime getBuildDate() { } public static BwcVersions getBwcVersions() { - return value(bwcVersions); + return value(bwcVersions).get(); } public static String getTestSeed() { @@ -111,10 +111,6 @@ public static Boolean isCi() { return value(isCi); } - public static Boolean isInternal() { - return value(isInternal); - } - public static Integer getDefaultParallel() { return value(defaultParallel); } @@ -143,36 +139,6 @@ private static String propertyName(String methodName) { return propertyName.substring(0, 1).toLowerCase() + propertyName.substring(1); } - public static InternalMarker withInternalBuild(Runnable configBlock) { - if (isInternal()) { - configBlock.run(); - return InternalMarker.INTERNAL; - } else { - return InternalMarker.EXTERNAL; - } - } - - public enum InternalMarker { - INTERNAL(true), - EXTERNAL(false); - - private final boolean internal; - - InternalMarker(boolean internal) { - this.internal = internal; - } - - public void orElse(Runnable configBlock) { - if (internal == false) { - configBlock.run(); - } - } - - public boolean isInternal() { - return internal; - } - } - public static class MutableBuildParams { private static MutableBuildParams INSTANCE = new MutableBuildParams(); @@ -250,10 +216,6 @@ public void setIsCi(boolean isCi) { BuildParams.isCi = isCi; } - public void setIsInternal(Boolean isInternal) { - BuildParams.isInternal = requireNonNull(isInternal); - } - public void setDefaultParallel(int defaultParallel) { BuildParams.defaultParallel = defaultParallel; } @@ -262,7 +224,7 @@ public void setIsSnapshotBuild(final boolean isSnapshotBuild) { BuildParams.isSnapshotBuild = isSnapshotBuild; } - public void setBwcVersions(BwcVersions bwcVersions) { + public void setBwcVersions(Provider bwcVersions) { BuildParams.bwcVersions = requireNonNull(bwcVersions); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 73de5c0a7b460..24b45872ec414 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -90,9 +90,6 @@ public void apply(Project project) { GitInfo gitInfo = GitInfo.gitInfo(rootDir); BuildParams.init(params -> { - // Initialize global build parameters - boolean isInternal = GlobalBuildInfoPlugin.class.getResource("/buildSrc.marker") != null && explicitDisabledInternal(project) == false; - params.reset(); params.setRuntimeJavaHome(runtimeJavaHome); params.setRuntimeJavaVersion(determineJavaVersion("runtime java.home", runtimeJavaHome, minimumRuntimeVersion)); @@ -108,30 +105,19 @@ public void apply(Project project) { params.setBuildDate(ZonedDateTime.now(ZoneOffset.UTC)); params.setTestSeed(getTestSeed()); params.setIsCi(System.getenv("JENKINS_URL") != null); - params.setIsInternal(isInternal); params.setDefaultParallel(ParallelDetector.findDefaultParallel(project)); params.setInFipsJvm(Util.getBooleanProperty("tests.fips.enabled", false)); params.setIsSnapshotBuild(Util.getBooleanProperty("build.snapshot", true)); - if (isInternal) { - params.setBwcVersions(resolveBwcVersions(rootDir)); - } + params.setBwcVersions(providers.provider(() -> resolveBwcVersions(rootDir))); }); - // When building Elasticsearch, enforce the minimum compiler version - BuildParams.withInternalBuild(() -> assertMinimumCompilerVersion(minimumCompilerVersion)); + // Enforce the minimum compiler version + assertMinimumCompilerVersion(minimumCompilerVersion); // Print global build info header just before task execution project.getGradle().getTaskGraph().whenReady(graph -> logGlobalBuildInfo()); } - @NotNull - private Boolean explicitDisabledInternal(Project project) { - return project.getProviders().systemProperty("test.external") - .forUseAtConfigurationTime() - .map(sysProp -> sysProp.equals("true")) - .getOrElse(false); - } - private String formatJavaVendorDetails(JvmInstallationMetadata runtimeJdkMetaData) { JvmVendor vendor = runtimeJdkMetaData.getVendor(); return runtimeJdkMetaData.getVendor().getKnownVendor().name() + "/" + vendor.getRawVendor(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java index 4777b51398b7f..f338f0b55d5e5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java @@ -34,6 +34,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -114,6 +115,11 @@ public class DependencyLicensesTask extends DefaultTask { */ private Set ignoreShas = new HashSet<>(); + /** + * Names of files that should be ignored by the check + */ + private LinkedHashSet ignoreFiles = new LinkedHashSet<>(); + /** * Add a mapping from a regex pattern for the jar name, to a prefix to find * the LICENSE and NOTICE file for that jar. @@ -164,6 +170,13 @@ public void ignoreSha(String dep) { ignoreShas.add(dep); } + /** + * Add a file that should be ignored by the check. This should be used for additional license files not tied to jar dependency + */ + public void ignoreFile(String file) { + ignoreFiles.add(file); + } + @TaskAction public void checkDependencies() throws IOException, NoSuchAlgorithmException { if (dependencies == null) { @@ -202,6 +215,10 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { } } + licenses.keySet().removeAll(ignoreFiles); + notices.keySet().removeAll(ignoreFiles); + sources.keySet().removeAll(ignoreFiles); + checkDependencies(licenses, notices, sources, shaFiles); licenses.forEach((item, exists) -> failIfAnyMissing(item, exists, "license")); @@ -338,6 +355,12 @@ private String getFileName(String name, Map counters, String type) { return fileName; } + @Input + @Optional + public LinkedHashSet getIgnoreFiles() { + return new LinkedHashSet<>(ignoreFiles); + } + @Input public LinkedHashMap getMappings() { return new LinkedHashMap<>(mappings); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/InternalPrecommitTasks.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/InternalPrecommitTasks.java index 5eaf30ef55dc8..dbf1a5cbccf29 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/InternalPrecommitTasks.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/InternalPrecommitTasks.java @@ -19,34 +19,22 @@ public class InternalPrecommitTasks { /** * Adds a precommit task, which depends on non-test verification tasks. */ - public static void create(Project project, boolean includeDependencyLicenses) { - project.getPluginManager().apply(JarHellPrecommitPlugin.class); - project.getPluginManager().apply(ThirdPartyAuditPrecommitPlugin.class); + public static void create(Project project, boolean withProductiveCode) { project.getPluginManager().apply(CheckstylePrecommitPlugin.class); project.getPluginManager().apply(ForbiddenApisPrecommitPlugin.class); project.getPluginManager().apply(ForbiddenPatternsPrecommitPlugin.class); project.getPluginManager().apply(LicenseHeadersPrecommitPlugin.class); project.getPluginManager().apply(FilePermissionsPrecommitPlugin.class); project.getPluginManager().apply(TestingConventionsPrecommitPlugin.class); + project.getPluginManager().apply(LoggerUsagePrecommitPlugin.class); + project.getPluginManager().apply(JarHellPrecommitPlugin.class); - // tasks with just tests don't need dependency licenses, so this flag makes adding + // tasks with just tests don't need certain tasks to run, so this flag makes adding // the task optional - if (includeDependencyLicenses) { + if (withProductiveCode) { + project.getPluginManager().apply(ThirdPartyAuditPrecommitPlugin.class); project.getPluginManager().apply(DependencyLicensesPrecommitPlugin.class); - } - - if (project.getPath().equals(":build-tools") == false) { - /* - * Sadly, build-tools can't have logger-usage-check because that - * would create a circular project dependency between build-tools - * (which provides NamingConventionsCheck) and :test:logger-usage - * which provides the logger usage check. Since the build tools - * don't use the logger usage check because they don't have any - * of Elaticsearch's loggers and :test:logger-usage actually does - * use the NamingConventionsCheck we break the circular dependency - * here. - */ - project.getPluginManager().apply(LoggerUsagePrecommitPlugin.class); + project.getPluginManager().apply(SplitPackagesAuditPrecommitPlugin.class); } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java index 3efd37b85bb02..fb33bc0d3c473 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/JarHellPrecommitPlugin.java @@ -9,7 +9,6 @@ package org.elasticsearch.gradle.internal.precommit; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.jarhell.JarHellPlugin; import org.elasticsearch.gradle.jarhell.JarHellTask; import org.gradle.api.Project; @@ -21,7 +20,7 @@ public class JarHellPrecommitPlugin extends PrecommitPlugin { public TaskProvider createTask(Project project) { project.getPluginManager().apply(JarHellPlugin.class); - if (BuildParams.isInternal() && project.getPath().equals(":libs:elasticsearch-core") == false) { + if (project.getPath().equals(":libs:elasticsearch-core") == false) { // ideally we would configure this as a default dependency. But Default dependencies do not work correctly // with gradle project dependencies as they're resolved to late in the build and don't setup according task // dependencies properly diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java index 725fe0608337e..7fe579026349b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java @@ -151,7 +151,15 @@ public class LicenseAnalyzer { new LicenseMatcher("EPL-2.0", true, false, Pattern.compile("Eclipse Public License - v 2.0", Pattern.DOTALL)), new LicenseMatcher("EDL-1.0", true, false, Pattern.compile("Eclipse Distribution License - v 1.0", Pattern.DOTALL)), new LicenseMatcher("LGPL-2.1", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 2.1", Pattern.DOTALL)), - new LicenseMatcher("LGPL-3.0", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 3", Pattern.DOTALL)) }; + new LicenseMatcher("LGPL-3.0", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 3", Pattern.DOTALL)), + new LicenseMatcher("GeoLite", false, false, + Pattern.compile(("The Elastic GeoIP Database Service uses the GeoLite2 Data created " + + "and licensed by MaxMind,\nwhich is governed by MaxMind’s GeoLite2 End User License Agreement, " + + "available at https://www.maxmind.com/en/geolite2/eula.\n").replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), + new LicenseMatcher("GeoIp-Database-Service", false, false, + Pattern.compile(("By using the GeoIP Database Service, you agree to the Elastic GeoIP Database Service Agreement,\n" + + "available at www.elastic.co/elastic-geoip-database-service-terms.").replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL))}; + public static LicenseInfo licenseType(File licenseFile) { for (LicenseMatcher matcher : matchers) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index 1dbba30238f8e..e8ce2ab6e45b0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -32,8 +32,7 @@ public TaskProvider createTask(Project project) { project.getConfigurations().create("forbiddenApisCliJar"); project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.1"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); - if (BuildParams.isInternal() && project.getPath().equals(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH) == false) { - // External plugins will depend on this already via transitive dependencies. + if (project.getPath().equals(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH) == false) { // Internal projects are not all plugins, so make sure the check is available // we are not doing this for this project itself to avoid jar hell with itself project.getDependencies().add(JDK_JAR_HELL_CONFIG_NAME, project.project(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonAgainstSchemaTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonAgainstSchemaTask.java index 60b609b3145fa..db6d2c1135b90 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonAgainstSchemaTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateJsonAgainstSchemaTask.java @@ -20,9 +20,11 @@ import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; import org.gradle.work.ChangeType; +import org.gradle.work.FileChange; import org.gradle.work.Incremental; import org.gradle.work.InputChanges; @@ -42,8 +44,6 @@ * Incremental task to validate a set of JSON files against against a schema. */ public class ValidateJsonAgainstSchemaTask extends DefaultTask { - - private final ObjectMapper mapper = new ObjectMapper(); private File jsonSchema; private File report; private FileCollection inputFiles; @@ -76,28 +76,36 @@ public File getReport() { return this.report; } + @Internal + protected ObjectMapper getMapper() { + return new ObjectMapper(); + } + + @Internal + protected String getFileType() { + return "JSON"; + } + @TaskAction public void validate(InputChanges inputChanges) throws IOException { - File jsonSchemaOnDisk = getJsonSchema(); - getLogger().debug("JSON schema : [{}]", jsonSchemaOnDisk.getAbsolutePath()); - SchemaValidatorsConfig config = new SchemaValidatorsConfig(); - JsonSchemaFactory factory = JsonSchemaFactory.getInstance(SpecVersion.VersionFlag.V7); - JsonSchema jsonSchema = factory.getSchema(mapper.readTree(jsonSchemaOnDisk), config); - Map> errors = new LinkedHashMap<>(); + final File jsonSchemaOnDisk = getJsonSchema(); + final JsonSchema jsonSchema = buildSchemaObject(jsonSchemaOnDisk); + + final Map> errors = new LinkedHashMap<>(); + final ObjectMapper mapper = this.getMapper(); + // incrementally evaluate input files + // validate all files and hold on to errors for a complete report if there are failures StreamSupport.stream(inputChanges.getFileChanges(getInputFiles()).spliterator(), false) .filter(f -> f.getChangeType() != ChangeType.REMOVED) - .forEach(fileChange -> { - File file = fileChange.getFile(); - if (file.isDirectory() == false) { - // validate all files and hold on to errors for a complete report if there are failures - getLogger().debug("Validating JSON [{}]", file.getName()); - try { - Set validationMessages = jsonSchema.validate(mapper.readTree(file)); - maybeLogAndCollectError(validationMessages, errors, file); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + .map(FileChange::getFile) + .filter(file -> file.isDirectory() == false) + .forEach(file -> { + try { + Set validationMessages = jsonSchema.validate(mapper.readTree(file)); + maybeLogAndCollectError(validationMessages, errors, file); + } catch (IOException e) { + throw new UncheckedIOException(e); } }); if (errors.isEmpty()) { @@ -119,9 +127,17 @@ public void validate(InputChanges inputChanges) throws IOException { } } + private JsonSchema buildSchemaObject(File jsonSchemaOnDisk) throws IOException { + final ObjectMapper jsonMapper = new ObjectMapper(); + final SchemaValidatorsConfig config = new SchemaValidatorsConfig(); + final JsonSchemaFactory factory = JsonSchemaFactory.getInstance(SpecVersion.VersionFlag.V7); + return factory.getSchema(jsonMapper.readTree(jsonSchemaOnDisk), config); + } + private void maybeLogAndCollectError(Set messages, Map> errors, File file) { + final String fileType = getFileType(); for (ValidationMessage message : messages) { - getLogger().error("[validate JSON][ERROR][{}][{}]", file.getName(), message.toString()); + getLogger().error("[validate {}][ERROR][{}][{}]", fileType, file.getName(), message.toString()); errors.computeIfAbsent(file, k -> new LinkedHashSet<>()) .add(String.format("%s: %s", file.getAbsolutePath(), message.toString())); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateYamlAgainstSchemaTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateYamlAgainstSchemaTask.java new file mode 100644 index 0000000000000..c4233fc26166c --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ValidateYamlAgainstSchemaTask.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.precommit; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + +/** + * Incremental task to validate a set of YAML files against against a schema. + */ +public class ValidateYamlAgainstSchemaTask extends ValidateJsonAgainstSchemaTask { + @Override + protected String getFileType() { + return "YAML"; + } + + protected ObjectMapper getMapper() { + return new ObjectMapper(new YAMLFactory()); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java new file mode 100644 index 0000000000000..691aa47d9ebbc --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import groovy.text.SimpleTemplateEngine; + +import com.google.common.annotations.VisibleForTesting; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * Generates the page that lists the breaking changes and deprecations for a minor version release. + */ +public class BreakingChangesGenerator { + + static void update(File templateFile, File outputFile, List entries) throws IOException { + try (FileWriter output = new FileWriter(outputFile)) { + generateFile(Files.readString(templateFile.toPath()), output, entries); + } + } + + @VisibleForTesting + private static void generateFile(String template, FileWriter outputWriter, List entries) throws IOException { + final Version version = VersionProperties.getElasticsearchVersion(); + + final Map>> breakingChangesByNotabilityByArea = entries.stream() + .map(ChangelogEntry::getBreaking) + .filter(Objects::nonNull) + .collect( + Collectors.groupingBy( + ChangelogEntry.Breaking::isNotable, + Collectors.groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, Collectors.toList()) + ) + ); + + final Map> deprecationsByArea = entries.stream() + .map(ChangelogEntry::getDeprecation) + .filter(Objects::nonNull) + .collect(Collectors.groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, Collectors.toList())); + + final Map bindings = new HashMap<>(); + bindings.put("breakingChangesByNotabilityByArea", breakingChangesByNotabilityByArea); + bindings.put("deprecationsByArea", deprecationsByArea); + bindings.put("isElasticsearchSnapshot", VersionProperties.isElasticsearchSnapshot()); + bindings.put("majorDotMinor", version.getMajor() + "." + version.getMinor()); + bindings.put("majorMinor", String.valueOf(version.getMajor()) + version.getMinor()); + bindings.put("nextMajor", (version.getMajor() + 1) + ".0"); + bindings.put("version", version); + + try { + final SimpleTemplateEngine engine = new SimpleTemplateEngine(); + engine.createTemplate(template).make(bindings).writeTo(outputWriter); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java new file mode 100644 index 0000000000000..08b03b35ccd63 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java @@ -0,0 +1,374 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + +import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * This class models the contents of a changelog YAML file. We validate it using a + * JSON Schema, as well as some programmatic checks in {@link ValidateChangelogEntryTask}. + * + */ +public class ChangelogEntry { + private Integer pr; + private List issues; + private String area; + private String type; + private String summary; + private Highlight highlight; + private Breaking breaking; + private Deprecation deprecation; + private List versions; + + private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory()); + + public static ChangelogEntry parse(File file) { + try { + return yamlMapper.readValue(file, ChangelogEntry.class); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public Integer getPr() { + return pr; + } + + public void setPr(Integer pr) { + this.pr = pr; + } + + public List getIssues() { + return issues; + } + + public void setIssues(List issues) { + this.issues = issues; + } + + public String getArea() { + return area; + } + + public void setArea(String area) { + this.area = area; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getSummary() { + return summary; + } + + public void setSummary(String summary) { + this.summary = summary; + } + + public Highlight getHighlight() { + return highlight; + } + + public void setHighlight(Highlight highlight) { + this.highlight = highlight; + } + + public Breaking getBreaking() { + return breaking; + } + + public void setBreaking(Breaking breaking) { + this.breaking = breaking; + } + + public Deprecation getDeprecation() { + return deprecation; + } + + public void setDeprecation(Deprecation deprecation) { + this.deprecation = deprecation; + } + + public List getVersions() { + return versions; + } + + public void setVersions(List versions) { + this.versions = versions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ChangelogEntry that = (ChangelogEntry) o; + return Objects.equals(pr, that.pr) + && Objects.equals(issues, that.issues) + && Objects.equals(area, that.area) + && Objects.equals(type, that.type) + && Objects.equals(summary, that.summary) + && Objects.equals(highlight, that.highlight) + && Objects.equals(breaking, that.breaking) + && Objects.equals(versions, that.versions); + } + + @Override + public int hashCode() { + return Objects.hash(pr, issues, area, type, summary, highlight, breaking, versions); + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "ChangelogEntry{pr=%d, issues=%s, area='%s', type='%s', summary='%s', highlight=%s, breaking=%s, deprecation=%s versions=%s}", + pr, + issues, + area, + type, + summary, + highlight, + breaking, + deprecation, + versions + ); + } + + public static class Highlight { + private boolean notable; + private String title; + private String body; + + public boolean isNotable() { + return notable; + } + + public void setNotable(boolean notable) { + this.notable = notable; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public String getBody() { + return body; + } + + public void setBody(String body) { + this.body = body; + } + + public String getAnchor() { + return generatedAnchor(this.title); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Highlight highlight = (Highlight) o; + return Objects.equals(notable, highlight.notable) + && Objects.equals(title, highlight.title) + && Objects.equals(body, highlight.body); + } + + @Override + public int hashCode() { + return Objects.hash(notable, title, body); + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "Highlight{notable=%s, title='%s', body='%s'}", notable, title, body); + } + } + + public static class Breaking { + private String area; + private String title; + private String details; + private String impact; + private boolean notable; + + public String getArea() { + return area; + } + + public void setArea(String area) { + this.area = area; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public String getDetails() { + return details; + } + + public void setDetails(String details) { + this.details = details; + } + + public String getImpact() { + return impact; + } + + public void setImpact(String impact) { + this.impact = impact; + } + + public boolean isNotable() { + return notable; + } + + public void setNotable(boolean notable) { + this.notable = notable; + } + + public String getAnchor() { + return generatedAnchor(this.title); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Breaking breaking = (Breaking) o; + return notable == breaking.notable + && Objects.equals(area, breaking.area) + && Objects.equals(title, breaking.title) + && Objects.equals(details, breaking.details) + && Objects.equals(impact, breaking.impact); + } + + @Override + public int hashCode() { + return Objects.hash(area, title, details, impact, notable); + } + + @Override + public String toString() { + return String.format( + "Breaking{area='%s', title='%s', details='%s', impact='%s', isNotable=%s}", + area, + title, + details, + impact, + notable + ); + } + } + + public static class Deprecation { + private String area; + private String title; + private String body; + + public String getArea() { + return area; + } + + public void setArea(String area) { + this.area = area; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public String getBody() { + return body; + } + + public void setBody(String body) { + this.body = body; + } + + public String getAnchor() { + return generatedAnchor(this.title); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Deprecation that = (Deprecation) o; + return Objects.equals(area, that.area) && Objects.equals(title, that.title) && Objects.equals(body, that.body); + } + + @Override + public int hashCode() { + return Objects.hash(area, title, body); + } + + @Override + public String toString() { + return String.format("Deprecation{area='%s', title='%s', body='%s'}", area, title, body); + } + } + + private static String generatedAnchor(String input) { + final List excludes = List.of("the", "is", "a"); + + final String[] words = input.toLowerCase(Locale.ROOT) + .replaceAll("[^\\w]+", "_") + .replaceFirst("^_+", "") + .replaceFirst("_+$", "") + .split("_+"); + return Arrays.stream(words).filter(word -> excludes.contains(word) == false).collect(Collectors.joining("_")); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java new file mode 100644 index 0000000000000..5d5e1edf9b99e --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.RegularFile; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; + +import javax.inject.Inject; +import java.io.IOException; +import java.util.List; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Orchestrates the steps required to generate or update various release notes files. + */ +public class GenerateReleaseNotesTask extends DefaultTask { + private static final Logger LOGGER = Logging.getLogger(GenerateReleaseNotesTask.class); + + private final ConfigurableFileCollection changelogs; + + private final RegularFileProperty releaseNotesIndexTemplate; + private final RegularFileProperty releaseNotesTemplate; + private final RegularFileProperty releaseHighlightsTemplate; + private final RegularFileProperty breakingChangesTemplate; + + private final RegularFileProperty releaseNotesIndexFile; + private final RegularFileProperty releaseNotesFile; + private final RegularFileProperty releaseHighlightsFile; + private final RegularFileProperty breakingChangesFile; + + @Inject + public GenerateReleaseNotesTask(ObjectFactory objectFactory) { + changelogs = objectFactory.fileCollection(); + + releaseNotesIndexTemplate = objectFactory.fileProperty(); + releaseNotesTemplate = objectFactory.fileProperty(); + releaseHighlightsTemplate = objectFactory.fileProperty(); + breakingChangesTemplate = objectFactory.fileProperty(); + + releaseNotesIndexFile = objectFactory.fileProperty(); + releaseNotesFile = objectFactory.fileProperty(); + releaseHighlightsFile = objectFactory.fileProperty(); + breakingChangesFile = objectFactory.fileProperty(); + } + + @TaskAction + public void executeTask() throws IOException { + LOGGER.info("Finding changelog files..."); + + final Version checkoutVersion = VersionProperties.getElasticsearchVersion(); + + final List entries = this.changelogs.getFiles() + .stream() + .map(ChangelogEntry::parse) + .filter( + // Only process changelogs that are included in this minor version series of ES. + // If this change was released in an earlier major or minor version of Elasticsearch, do not + // include it in the notes. An earlier patch version is OK, the release notes include changes + // for every patch release in a minor series. + log -> { + final List versionsForChangelogFile = log.getVersions() + .stream() + .map(v -> Version.fromString(v, Version.Mode.RELAXED)) + .collect(Collectors.toList()); + + final Predicate includedInSameMinor = v -> v.getMajor() == checkoutVersion.getMajor() + && v.getMinor() == checkoutVersion.getMinor(); + + final Predicate includedInEarlierMajorOrMinor = v -> v.getMajor() < checkoutVersion.getMajor() + || (v.getMajor() == checkoutVersion.getMajor() && v.getMinor() < checkoutVersion.getMinor()); + + boolean includedInThisMinor = versionsForChangelogFile.stream().anyMatch(includedInSameMinor); + + if (includedInThisMinor) { + return versionsForChangelogFile.stream().noneMatch(includedInEarlierMajorOrMinor); + } else { + return false; + } + } + ) + .collect(Collectors.toList()); + + LOGGER.info("Updating release notes index..."); + ReleaseNotesIndexUpdater.update(this.releaseNotesIndexTemplate.get().getAsFile(), this.releaseNotesIndexFile.get().getAsFile()); + + LOGGER.info("Generating release notes..."); + ReleaseNotesGenerator.update(this.releaseNotesTemplate.get().getAsFile(), this.releaseNotesFile.get().getAsFile(), entries); + + LOGGER.info("Generating release highlights..."); + ReleaseHighlightsGenerator.update(this.releaseHighlightsTemplate.get().getAsFile(), this.releaseHighlightsFile.get().getAsFile(), entries); + + LOGGER.info("Generating breaking changes / deprecations notes..."); + BreakingChangesGenerator.update(this.breakingChangesTemplate.get().getAsFile(), this.breakingChangesFile.get().getAsFile(), entries); + } + + @InputFiles + public FileCollection getChangelogs() { + return changelogs; + } + + public void setChangelogs(FileCollection files) { + this.changelogs.setFrom(files); + } + + @InputFile + public RegularFileProperty getReleaseNotesIndexTemplate() { + return releaseNotesIndexTemplate; + } + + public void setReleaseNotesIndexTemplate(RegularFile file) { + this.releaseNotesIndexTemplate.set(file); + } + + @InputFile + public RegularFileProperty getReleaseNotesTemplate() { + return releaseNotesTemplate; + } + + public void setReleaseNotesTemplate(RegularFile file) { + this.releaseNotesTemplate.set(file); + } + + @InputFile + public RegularFileProperty getReleaseHighlightsTemplate() { + return releaseHighlightsTemplate; + } + + public void setReleaseHighlightsTemplate(RegularFile file) { + this.releaseHighlightsTemplate.set(file); + } + + @InputFile + public RegularFileProperty getBreakingChangesTemplate() { + return breakingChangesTemplate; + } + + public void setBreakingChangesTemplate(RegularFile file) { + this.breakingChangesTemplate.set(file); + } + + @OutputFile + public RegularFileProperty getReleaseNotesIndexFile() { + return releaseNotesIndexFile; + } + + public void setReleaseNotesIndexFile(RegularFile file) { + this.releaseNotesIndexFile.set(file); + } + + @OutputFile + public RegularFileProperty getReleaseNotesFile() { + return releaseNotesFile; + } + + public void setReleaseNotesFile(RegularFile file) { + this.releaseNotesFile.set(file); + } + + @OutputFile + public RegularFileProperty getReleaseHighlightsFile() { + return releaseHighlightsFile; + } + + public void setReleaseHighlightsFile(RegularFile file) { + this.releaseHighlightsFile.set(file); + } + + @OutputFile + public RegularFileProperty getBreakingChangesFile() { + return breakingChangesFile; + } + + public void setBreakingChangesFile(RegularFile file) { + this.breakingChangesFile.set(file); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java new file mode 100644 index 0000000000000..02b450aa22eea --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import groovy.text.SimpleTemplateEngine; + +import com.google.common.annotations.VisibleForTesting; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Generates the release highlights notes, for changelog files that contain the highlight field. + */ +public class ReleaseHighlightsGenerator { + static void update(File templateFile, File outputFile, List entries) throws IOException { + try (FileWriter output = new FileWriter(outputFile)) { + generateFile(VersionProperties.getElasticsearchVersion(), Files.readString(templateFile.toPath()), entries, output); + } + } + + @VisibleForTesting + static void generateFile(Version version, String templateFile, List entries, FileWriter outputWriter) + throws IOException { + final List priorVersions = new ArrayList<>(); + + if (version.getMinor() > 0) { + final int major = version.getMajor(); + for (int minor = version.getMinor(); minor >= 0; minor--) { + String majorMinor = major + "." + minor; + String fileSuffix = ""; + if (major == 7 && minor < 7) { + fileSuffix = "-" + majorMinor + ".0"; + } + priorVersions.add("{ref-bare}/" + majorMinor + "/release-highlights" + fileSuffix + ".html[" + majorMinor + "]"); + } + } + + final Map> groupedHighlights = entries.stream() + .map(ChangelogEntry::getHighlight) + .filter(Objects::nonNull) + .collect(Collectors.groupingBy(ChangelogEntry.Highlight::isNotable, Collectors.toList())); + + final List notableHighlights = groupedHighlights.getOrDefault(true, List.of()); + final List nonNotableHighlights = groupedHighlights.getOrDefault(false, List.of()); + + final Map bindings = new HashMap<>(); + bindings.put("priorVersions", priorVersions); + bindings.put("notableHighlights", notableHighlights); + bindings.put("nonNotableHighlights", nonNotableHighlights); + + try { + final SimpleTemplateEngine engine = new SimpleTemplateEngine(); + engine.createTemplate(templateFile).make(bindings).writeTo(outputWriter); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGenerator.java new file mode 100644 index 0000000000000..52995717a435a --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGenerator.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.google.common.annotations.VisibleForTesting; +import groovy.text.SimpleTemplateEngine; +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.gradle.api.GradleException; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.Writer; +import java.nio.file.Files; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Generates the release notes i.e. list of changes that have gone into this release. They are grouped by the + * type of change, then by team area. + */ +public class ReleaseNotesGenerator { + /** + * These mappings translate change types into the headings as they should appears in the release notes. + */ + private static final Map TYPE_LABELS = new HashMap<>(); + + static { + TYPE_LABELS.put("breaking", "Breaking changes"); + TYPE_LABELS.put("breaking-java", "Breaking Java changes"); + TYPE_LABELS.put("bug", "Bug fixes"); + TYPE_LABELS.put("deprecation", "Deprecations"); + TYPE_LABELS.put("enhancement", "Enhancements"); + TYPE_LABELS.put("feature", "New features"); + TYPE_LABELS.put("regression", "Regressions"); + TYPE_LABELS.put("upgrade", "Upgrades"); + } + + static void update(File templateFile, File outputFile, List changelogs) throws IOException { + final String templateString = Files.readString(templateFile.toPath()); + + try (FileWriter output = new FileWriter(outputFile)) { + generateFile(VersionProperties.getElasticsearchVersion(), templateString, changelogs, output); + } + } + + @VisibleForTesting + static void generateFile(Version version, String template, List changelogs, Writer outputWriter) throws IOException { + final var changelogsByVersionByTypeByArea = buildChangelogBreakdown(version, changelogs); + + final Map bindings = new HashMap<>(); + bindings.put("changelogsByVersionByTypeByArea", changelogsByVersionByTypeByArea); + bindings.put("TYPE_LABELS", TYPE_LABELS); + + try { + final SimpleTemplateEngine engine = new SimpleTemplateEngine(); + engine.createTemplate(template).make(bindings).writeTo(outputWriter); + } catch (ClassNotFoundException e) { + throw new GradleException("Failed to generate file from template", e); + } + } + + private static Map>>> buildChangelogBreakdown( + Version elasticsearchVersion, + List changelogs + ) { + final Predicate includedInSameMinor = v -> v.getMajor() == elasticsearchVersion.getMajor() + && v.getMinor() == elasticsearchVersion.getMinor(); + + final Map>>> changelogsByVersionByTypeByArea = changelogs.stream() + .collect( + Collectors.groupingBy( + // Key changelog entries by the earlier version in which they were released + entry -> entry.getVersions() + .stream() + .map(v -> Version.fromString(v.replaceFirst("^v", ""))) + .filter(includedInSameMinor) + .sorted() + .findFirst() + .get(), + + // Generate a reverse-ordered map. Despite the IDE saying the type can be inferred, removing it + // causes the compiler to complain. + () -> new TreeMap>>>(Comparator.reverseOrder()), + + // Group changelogs entries by their change type + Collectors.groupingBy( + // Entries with breaking info are always put in the breaking section + entry -> entry.getBreaking() == null ? entry.getType() : "breaking", + TreeMap::new, + // Group changelogs for each type by their team area + Collectors.groupingBy( + // `security` and `known-issue` areas don't need to supply an area + entry -> entry.getType().equals("known-issue") || entry.getType().equals("security") + ? "_all_" + : entry.getArea(), + TreeMap::new, + Collectors.toList() + ) + ) + ) + ); + + // Sort per-area changelogs by their summary text. Assumes that the underlying list is sortable + changelogsByVersionByTypeByArea.forEach( + (_version, byVersion) -> byVersion.forEach( + (_type, byTeam) -> byTeam.forEach( + (_team, changelogsForTeam) -> changelogsForTeam.sort(Comparator.comparing(ChangelogEntry::getSummary)) + ) + ) + ); + + return changelogsByVersionByTypeByArea; + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java new file mode 100644 index 0000000000000..5403d1e03f303 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import groovy.text.SimpleTemplateEngine; + +import com.google.common.annotations.VisibleForTesting; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.Writer; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * This class ensures that the release notes index page has the appropriate anchors and include directives + * for the current repository version. It achieves this by parsing out the existing entries and writing + * out the file again. + */ +public class ReleaseNotesIndexUpdater { + + static void update(File indexTemplate, File indexFile) throws IOException { + final List existingIndexLines = Files.readAllLines(indexFile.toPath()); + try (FileWriter indexFileWriter = new FileWriter(indexFile)) { + generateFile( + VersionProperties.getElasticsearchVersion(), + existingIndexLines, + Files.readString(indexTemplate.toPath()), + indexFileWriter + ); + } + } + + @VisibleForTesting + static void generateFile(Version version, List existingIndexLines, String indexTemplate, Writer outputWriter) + throws IOException { + final List existingVersions = existingIndexLines.stream() + .filter(line -> line.startsWith("* < line.replace("* <>", "")) + .distinct() + .collect(Collectors.toList()); + + final List existingIncludes = existingIndexLines.stream() + .filter(line -> line.startsWith("include::")) + .map(line -> line.replace("include::release-notes/", "").replace(".asciidoc[]", "")) + .distinct() + .collect(Collectors.toList()); + + final String versionString = version.toString(); + + if (existingVersions.contains(versionString) == false) { + int insertionIndex = existingVersions.size() - 1; + while (insertionIndex > 0 && Version.fromString(existingVersions.get(insertionIndex)).before(version)) { + insertionIndex -= 1; + } + existingVersions.add(insertionIndex, versionString); + } + + final String includeString = version.getMajor() + "." + version.getMinor(); + + if (existingIncludes.contains(includeString) == false) { + int insertionIndex = existingIncludes.size() - 1; + while (insertionIndex > 0 && Version.fromString(ensurePatchVersion(existingIncludes.get(insertionIndex))).before(version)) { + insertionIndex -= 1; + } + existingIncludes.add(insertionIndex, includeString); + } + + final Map bindings = new HashMap<>(); + bindings.put("existingVersions", existingVersions); + bindings.put("existingIncludes", existingIncludes); + + try { + final SimpleTemplateEngine engine = new SimpleTemplateEngine(); + engine.createTemplate(indexTemplate).make(bindings).writeTo(outputWriter); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + private static String ensurePatchVersion(String version) { + return version.matches("^\\d+\\.\\d+\\.\\d+.*$") ? version : version + ".0"; + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java new file mode 100644 index 0000000000000..d7d85504a0178 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; +import org.elasticsearch.gradle.internal.precommit.ValidateYamlAgainstSchemaTask; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.Directory; +import org.gradle.api.file.FileTree; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.util.PatternSet; + +import java.io.File; +import javax.inject.Inject; + +/** + * This plugin defines tasks related to releasing Elasticsearch. + */ +public class ReleaseToolsPlugin implements Plugin { + + private static final String RESOURCES = "build-tools-internal/src/main/resources/"; + + private final ProjectLayout projectLayout; + + @Inject + public ReleaseToolsPlugin(ProjectLayout projectLayout) { + this.projectLayout = projectLayout; + } + + @Override + public void apply(Project project) { + project.getPluginManager().apply(PrecommitTaskPlugin.class); + final Directory projectDirectory = projectLayout.getProjectDirectory(); + + final FileTree yamlFiles = projectDirectory.dir("docs/changelog") + .getAsFileTree() + .matching(new PatternSet().include("**/*.yml", "**/*.yaml")); + + final Provider validateChangelogsAgainstYamlTask = project.getTasks() + .register("validateChangelogsAgainstSchema", ValidateYamlAgainstSchemaTask.class, task -> { + task.setGroup("Documentation"); + task.setDescription("Validate that the changelog YAML files comply with the changelog schema"); + task.setInputFiles(yamlFiles); + task.setJsonSchema(new File(project.getRootDir(), RESOURCES + "changelog-schema.json")); + task.setReport(new File(project.getBuildDir(), "reports/validateYaml.txt")); + }); + + final TaskProvider validateChangelogsTask = project.getTasks() + .register("validateChangelogs", ValidateChangelogEntryTask.class, task -> { + task.setGroup("Documentation"); + task.setDescription("Validate that all changelog YAML files are well-formed"); + task.setChangelogs(yamlFiles); + task.dependsOn(validateChangelogsAgainstYamlTask); + }); + + project.getTasks().register("generateReleaseNotes", GenerateReleaseNotesTask.class).configure(task -> { + final Version version = VersionProperties.getElasticsearchVersion(); + + task.setGroup("Documentation"); + task.setDescription("Generates release notes from changelog files held in this checkout"); + task.setChangelogs(yamlFiles); + + task.setReleaseNotesIndexTemplate(projectDirectory.file(RESOURCES + "templates/release-notes-index.asciidoc")); + task.setReleaseNotesIndexFile(projectDirectory.file("docs/reference/release-notes.asciidoc")); + + task.setReleaseNotesTemplate(projectDirectory.file(RESOURCES + "templates/release-notes.asciidoc")); + task.setReleaseNotesFile( + projectDirectory.file(String.format("docs/reference/release-notes/%d.%d.asciidoc", version.getMajor(), version.getMinor())) + ); + + task.setReleaseHighlightsTemplate(projectDirectory.file(RESOURCES + "templates/release-highlights.asciidoc")); + task.setReleaseHighlightsFile(projectDirectory.file("docs/reference/release-notes/highlights.asciidoc")); + + task.setBreakingChangesTemplate(projectDirectory.file(RESOURCES + "templates/breaking-changes.asciidoc")); + task.setBreakingChangesFile( + projectDirectory.file( + String.format("docs/reference/migration/migrate_%d_%d.asciidoc", version.getMajor(), version.getMinor()) + ) + ); + + task.dependsOn(validateChangelogsTask); + }); + + project.getTasks().named("precommit").configure(task -> task.dependsOn(validateChangelogsTask)); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java new file mode 100644 index 0000000000000..5f030eb074653 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.TaskAction; + +import javax.inject.Inject; +import java.net.URI; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Performs additional checks on changelog files, beyond whether they conform to the schema. + */ +public class ValidateChangelogEntryTask extends DefaultTask { + private final ConfigurableFileCollection changelogs; + private final ProjectLayout projectLayout; + + @Inject + public ValidateChangelogEntryTask(ObjectFactory objectFactory, ProjectLayout projectLayout) { + this.changelogs = objectFactory.fileCollection(); + this.projectLayout = projectLayout; + } + + @TaskAction + public void executeTask() { + final URI rootDir = projectLayout.getProjectDirectory().getAsFile().toURI(); + final Map changelogs = this.changelogs.getFiles() + .stream() + .collect(Collectors.toMap(file -> rootDir.relativize(file.toURI()).toString(), ChangelogEntry::parse)); + + // We don't try to find all such errors, because we expect them to be rare e.g. only + // when a new file is added. + changelogs.forEach((path, entry) -> { + final String type = entry.getType(); + + if (type.equals("known-issue") == false && type.equals("security") == false) { + if (entry.getPr() == null) { + throw new GradleException("[" + path + "] must provide a [pr] number (only 'known-issue' and " + + "'security' entries can omit this"); + } + + if (entry.getArea() == null) { + throw new GradleException("[" + path + "] must provide an [area] (only 'known-issue' and " + + "'security' entries can omit this"); + } + } + + if ((type.equals("breaking") || type.equals("breaking-java")) && entry.getBreaking() == null) { + throw new GradleException( + "[" + path + "] has type [" + type + "] and must supply a [breaking] section with further information" + ); + } + + if (type.equals("deprecation") && entry.getDeprecation() == null) { + throw new GradleException( + "[" + path + "] has type [deprecation] and must supply a [deprecation] section with further information" + ); + } + }); + } + + @InputFiles + public FileCollection getChangelogs() { + return changelogs; + } + + public void setChangelogs(FileCollection files) { + this.changelogs.setFrom(files); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java index 74a870cbdd1fa..be964536f65bd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java @@ -30,6 +30,7 @@ import org.elasticsearch.gradle.internal.test.rest.transform.match.ReplaceValueInMatch; import org.elasticsearch.gradle.internal.test.rest.transform.text.ReplaceIsFalse; import org.elasticsearch.gradle.internal.test.rest.transform.text.ReplaceIsTrue; +import org.elasticsearch.gradle.internal.test.rest.transform.text.ReplaceTextual; import org.elasticsearch.gradle.internal.test.rest.transform.warnings.InjectAllowedWarnings; import org.elasticsearch.gradle.internal.test.rest.transform.warnings.InjectWarnings; import org.elasticsearch.gradle.internal.test.rest.transform.warnings.RemoveWarnings; @@ -53,6 +54,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; @@ -94,7 +96,18 @@ public RestCompatTestTransformTask( // always inject compat headers headers.put("Content-Type", "application/vnd.elasticsearch+json;compatible-with=" + compatibleVersion); headers.put("Accept", "application/vnd.elasticsearch+json;compatible-with=" + compatibleVersion); - transformations.add(new InjectHeaders(headers)); + transformations.add(new InjectHeaders(headers, Set.of(RestCompatTestTransformTask::doesNotHaveCatOperation))); + } + + private static boolean doesNotHaveCatOperation(ObjectNode doNodeValue) { + final Iterator fieldNamesIterator = doNodeValue.fieldNames(); + while (fieldNamesIterator.hasNext()) { + final String fieldName = fieldNamesIterator.next(); + if (fieldName.startsWith("cat.")) { + return false; + } + } + return true; } /** @@ -124,6 +137,17 @@ public void replaceValueInMatch(String subKey, Object value, String testName) { * @see ReplaceKeyInDo * @param oldKeyName the key name directly under do to replace. * @param newKeyName the new key name directly under do. + * @param testName the testName to apply replacement + */ + public void replaceKeyInDo(String oldKeyName, String newKeyName, String testName) { + transformations.add(new ReplaceKeyInDo(oldKeyName, newKeyName, testName)); + } + + /** + * A transformation to replace the key in a do section for given REST test. + * @see ReplaceKeyInDo + * @param oldKeyName the key name directly under do to replace. + * @param newKeyName the new key name directly under do. */ public void replaceKeyInDo(String oldKeyName, String newKeyName) { transformations.add(new ReplaceKeyInDo(oldKeyName, newKeyName, null)); @@ -183,6 +207,32 @@ public void replaceIsFalse(String oldValue, Object newValue, String testName) { transformations.add(new ReplaceIsFalse(oldValue, MAPPER.convertValue(newValue, TextNode.class), testName)); } + /** + * Replaces all the values of a given key/value pairs for all project REST tests. + * For example "foo": "bar" can replaced as "foo": "baz" + * + * @param key the key to find + * @param oldValue the value of that key to find + * @param newValue the value used in the replacement + */ + public void replaceValueTextByKeyValue(String key, String oldValue, Object newValue) { + transformations.add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class))); + } + + /** + * Replaces all the values of a given key/value pairs for given REST test. + * For example "foo": "bar" can replaced as "foo": "baz" + * + * @param key the key to find + * @param oldValue the value of that key to find + * @param newValue the value used in the replacement + * @param testName the testName to apply replacement + */ + public void replaceValueTextByKeyValue(String key, String oldValue, Object newValue, String testName) { + transformations.add(new ReplaceTextual(key, oldValue, MAPPER.convertValue(newValue, TextNode.class), testName)); + } + + /** * Removes the key/value of a match assertion all project REST tests for the matching subkey. * For example "match":{"_type": "foo"} to "match":{} @@ -243,6 +293,15 @@ public void removeWarning(String... warnings) { transformations.add(new RemoveWarnings(Set.copyOf(Arrays.asList(warnings)))); } + /** + * Removes one or more warnings + * @param warnings the warning(s) to remove + * @param testName the test name to remove the warning + */ + public void removeWarningForTest(String warnings, String testName) { + transformations.add(new RemoveWarnings(Set.copyOf(Arrays.asList(warnings)), testName)); + } + /** * Adds one or more allowed warnings * @param allowedWarnings the warning(s) to add @@ -259,6 +318,15 @@ public void addAllowedWarningRegex(String... allowedWarningsRegex) { transformations.add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex))); } + /** + * Adds one or more allowed regular expression warnings + * @param allowedWarningsRegex the regex warning(s) to add + * @testName the test name to add a allowedWarningRegex + */ + public void addAllowedWarningRegexForTest(String allowedWarningsRegex, String testName) { + transformations.add(new InjectAllowedWarnings(true, Arrays.asList(allowedWarningsRegex), testName)); + } + @OutputDirectory public DirectoryProperty getOutputDirectory() { return outputDirectory; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java index 90e7f461117db..b104d1aa3df77 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.rest.compat; -import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; +import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.test.RestIntegTestTask; @@ -37,7 +37,7 @@ import java.nio.file.Path; import java.util.Map; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupDependencies; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupTestDependenciesDefaults; /** * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. @@ -59,7 +59,7 @@ public void apply(Project project) { final Path compatSpecsDir = compatRestResourcesDir.resolve("yamlSpecs"); final Path compatTestsDir = compatRestResourcesDir.resolve("yamlTests"); - project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(TestClustersPlugin.class); project.getPluginManager().apply(RestTestBasePlugin.class); project.getPluginManager().apply(RestResourcesPlugin.class); @@ -162,7 +162,7 @@ public void apply(Project project) { .flatMap(CopyRestTestsTask::getOutputResourceDir); // setup the yamlRestTest task - Provider yamlRestCompatTestTask = RestTestUtil.registerTask(project, yamlCompatTestSourceSet); + Provider yamlRestCompatTestTask = RestTestUtil.registerTestTask(project, yamlCompatTestSourceSet); project.getTasks().withType(RestIntegTestTask.class).named(SOURCE_SET_NAME).configure(testTask -> { // Use test runner and classpath from "normal" yaml source set testTask.setTestClassesDirs( @@ -180,8 +180,7 @@ public void apply(Project project) { testTask.onlyIf(t -> isEnabled(project)); }); - // setup the dependencies - setupDependencies(project, yamlCompatTestSourceSet); + setupTestDependenciesDefaults(project, yamlCompatTestSourceSet); // setup IDE GradleUtils.setupIdeForTestSourceSet(project, yamlCompatTestSourceSet); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index ff6722895e4ed..76a55004766e2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportService; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.elasticsearch.gradle.internal.conventions.util.Util; import org.elasticsearch.gradle.internal.vagrant.VagrantBasePlugin; @@ -66,7 +67,7 @@ public class DistroTestPlugin implements Plugin { private static final String SYSTEM_JDK_VERSION = "11.0.2+9"; private static final String SYSTEM_JDK_VENDOR = "openjdk"; - private static final String GRADLE_JDK_VERSION = "15.0.2+7"; + private static final String GRADLE_JDK_VERSION = "16.0.1+9"; private static final String GRADLE_JDK_VENDOR = "adoptopenjdk"; // all distributions used by distro tests. this is temporary until tests are per distribution @@ -76,9 +77,6 @@ public class DistroTestPlugin implements Plugin { private static final String BWC_DISTRIBUTION_SYSPROP = "tests.bwc-distribution"; private static final String EXAMPLE_PLUGIN_SYSPROP = "tests.example-plugin"; - private static final String QUOTA_AWARE_FS_PLUGIN_CONFIGURATION = "quotaAwareFsPlugin"; - private static final String QUOTA_AWARE_FS_PLUGIN_SYSPROP = "tests.quota-aware-fs-plugin"; - @Override public void apply(Project project) { project.getRootProject().getPluginManager().apply(DockerSupportPlugin.class); @@ -101,7 +99,6 @@ public void apply(Project project) { TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); Configuration examplePlugin = configureExamplePlugin(project); - Configuration quotaAwareFsPlugin = configureQuotaAwareFsPlugin(project); List> windowsTestTasks = new ArrayList<>(); Map>> linuxTestTasks = new HashMap<>(); @@ -112,13 +109,12 @@ public void apply(Project project) { String taskname = destructiveDistroTestTaskName(distribution); TaskProvider depsTask = project.getTasks().register(taskname + "#deps"); // explicitly depend on the archive not on the implicit extracted distribution - depsTask.configure(t -> t.dependsOn(distribution.getArchiveDependencies(), examplePlugin, quotaAwareFsPlugin)); + depsTask.configure(t -> t.dependsOn(distribution.getArchiveDependencies(), examplePlugin)); depsTasks.put(taskname, depsTask); TaskProvider destructiveTask = configureTestTask(project, taskname, distribution, t -> { t.onlyIf(t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable); addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); - addDistributionSysprop(t, QUOTA_AWARE_FS_PLUGIN_SYSPROP, () -> quotaAwareFsPlugin.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); }, depsTask); @@ -316,14 +312,6 @@ private static Configuration configureExamplePlugin(Project project) { return examplePlugin; } - private static Configuration configureQuotaAwareFsPlugin(Project project) { - Configuration examplePlugin = project.getConfigurations().create(QUOTA_AWARE_FS_PLUGIN_CONFIGURATION); - DependencyHandler deps = project.getDependencies(); - Map quotaAwareFsPluginProject = Map.of("path", ":x-pack:quota-aware-fs", "configuration", "zip"); - deps.add(QUOTA_AWARE_FS_PLUGIN_CONFIGURATION, deps.project(quotaAwareFsPluginProject)); - return examplePlugin; - } - private static void configureVMWrapperTasks( Project project, List> destructiveTasks, diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java index ae6b7e70343a0..f14cc4fd658e0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java @@ -8,24 +8,39 @@ package org.elasticsearch.gradle.internal.test; +import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; import org.elasticsearch.gradle.internal.FixtureStop; +import org.elasticsearch.gradle.internal.InternalTestClustersPlugin; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.provider.ProviderFactory; +import org.jetbrains.annotations.Nullable; + +import javax.inject.Inject; public class RestTestBasePlugin implements Plugin { private static final String TESTS_REST_CLUSTER = "tests.rest.cluster"; private static final String TESTS_CLUSTER = "tests.cluster"; private static final String TESTS_CLUSTER_NAME = "tests.clustername"; + private ProviderFactory providerFactory; + + @Inject + public RestTestBasePlugin(ProviderFactory providerFactory) { + this.providerFactory = providerFactory; + } @Override public void apply(Project project) { - project.getPluginManager().apply(TestClustersPlugin.class); + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); + project.getPluginManager().apply(InternalTestClustersPlugin.class); project.getTasks().withType(RestIntegTestTask.class).configureEach(restIntegTestTask -> { @SuppressWarnings("unchecked") NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project @@ -35,8 +50,8 @@ public void apply(Project project) { restIntegTestTask.useCluster(cluster); restIntegTestTask.include("**/*IT.class"); restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); - if (System.getProperty(TESTS_REST_CLUSTER) == null) { - if (System.getProperty(TESTS_CLUSTER) != null || System.getProperty(TESTS_CLUSTER_NAME) != null) { + if (systemProperty(TESTS_REST_CLUSTER) == null) { + if (systemProperty(TESTS_CLUSTER) != null || systemProperty(TESTS_CLUSTER_NAME) != null) { throw new IllegalArgumentException( String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) ); @@ -47,15 +62,22 @@ public void apply(Project project) { runnerNonInputProperties.systemProperty(TESTS_CLUSTER, () -> String.join(",", cluster.getAllTransportPortURI())); runnerNonInputProperties.systemProperty(TESTS_CLUSTER_NAME, cluster::getName); } else { - if (System.getProperty(TESTS_CLUSTER) == null || System.getProperty(TESTS_CLUSTER_NAME) == null) { + if (systemProperty(TESTS_CLUSTER) == null || systemProperty(TESTS_CLUSTER_NAME) == null) { throw new IllegalArgumentException( String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) ); } } }); + + project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(project.getTasks().withType(RestIntegTestTask.class))); project.getTasks() .withType(StandaloneRestIntegTestTask.class) .configureEach(t -> t.finalizedBy(project.getTasks().withType(FixtureStop.class))); } + + @Nullable + private String systemProperty(String propName) { + return providerFactory.systemProperty(propName).forUseAtConfigurationTime().getOrNull(); + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java index 86e0aa19b168c..5b508588e018c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java @@ -8,19 +8,14 @@ package org.elasticsearch.gradle.internal.test; -import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; -import org.elasticsearch.gradle.internal.RepositoriesSetupPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; -import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.plugins.JavaPlugin; -import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.testing.Test; @@ -46,17 +41,9 @@ public void apply(final Project project) { } project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - project.getPluginManager().apply(JavaBasePlugin.class); - project.getPluginManager().apply(TestClustersPlugin.class); - project.getPluginManager().apply(RepositoriesSetupPlugin.class); project.getPluginManager().apply(RestTestBasePlugin.class); project.getTasks().register("buildResources", ExportElasticsearchBuildResourcesTask.class); - ElasticsearchJavaPlugin.configureInputNormalization(project); - ElasticsearchJavaPlugin.configureCompile(project); - - project.getExtensions().getByType(JavaPluginExtension.class).setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); - project.getExtensions().getByType(JavaPluginExtension.class).setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); // only setup tests to build SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); @@ -69,7 +56,7 @@ public void apply(final Project project) { // create a compileOnly configuration as others might expect it project.getConfigurations().create("compileOnly"); - project.getDependencies().add("testImplementation", project.project(":test:framework")); + RestTestUtil.setupTestDependenciesDefaults(project, testSourceSet); EclipseModel eclipse = project.getExtensions().getByType(EclipseModel.class); eclipse.getClasspath().setSourceSets(Arrays.asList(testSourceSet)); @@ -86,6 +73,6 @@ public void apply(final Project project) { "TEST", Map.of("plus", Arrays.asList(project.getConfigurations().getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME))) ); - BuildParams.withInternalBuild(() -> InternalPrecommitTasks.create(project, false)); + InternalPrecommitTasks.create(project, false); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneTestPlugin.java index 5614b7d2f215c..a7bfd81695d14 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneTestPlugin.java @@ -30,7 +30,6 @@ public void apply(final Project project) { test.mustRunAfter(project.getTasks().getByName("precommit")); }); - ElasticsearchJavaPlugin.configureCompile(project); project.getTasks().named("check").configure(task -> task.dependsOn(project.getTasks().named("test"))); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestApiTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestApiTask.java index 80096039258d1..dad4aeea9cf8c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestApiTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestApiTask.java @@ -61,14 +61,12 @@ public class CopyRestApiTask extends DefaultTask { private final PatternFilterable patternSet; private final ProjectLayout projectLayout; private final FileSystemOperations fileSystemOperations; - private final ArchiveOperations archiveOperations; @Inject public CopyRestApiTask( ProjectLayout projectLayout, Factory patternSetFactory, FileSystemOperations fileSystemOperations, - ArchiveOperations archiveOperations, ObjectFactory objectFactory ) { this.include = objectFactory.listProperty(String.class); @@ -77,7 +75,6 @@ public CopyRestApiTask( this.patternSet = patternSetFactory.create(); this.projectLayout = projectLayout; this.fileSystemOperations = fileSystemOperations; - this.archiveOperations = archiveOperations; } @Input @@ -96,12 +93,8 @@ public FileTree getInputDir() { FileTree coreFileTree = null; boolean projectHasYamlRestTests = skipHasRestTestCheck || projectHasYamlRestTests(); if (include.get().isEmpty() == false || projectHasYamlRestTests) { - if (BuildParams.isInternal()) { - patternSet.setIncludes(include.get().stream().map(prefix -> prefix + "*/**").collect(Collectors.toList())); - coreFileTree = configToFileTree.apply(config).matching(patternSet); // directory on disk - } else { - coreFileTree = config.getAsFileTree(); // jar file - } + patternSet.setIncludes(include.get().stream().map(prefix -> prefix + "*/**").collect(Collectors.toList())); + coreFileTree = configToFileTree.apply(config).matching(patternSet); // directory on disk } FileCollection fileCollection = additionalConfig == null @@ -131,30 +124,12 @@ void copy() { String projectPath = getProjectPathFromTask(getPath()); File restSpecOutputDir = new File(outputResourceDir.get().getAsFile(), REST_API_PREFIX); - if (BuildParams.isInternal()) { - getLogger().debug("Rest specs for project [{}] will be copied to the test resources.", projectPath); - fileSystemOperations.copy(c -> { - c.from(configToFileTree.apply(config)); - c.into(restSpecOutputDir); - c.include(patternSet.getIncludes()); - }); - } else { - getLogger().debug( - "Rest specs for project [{}] will be copied to the test resources from the published jar (version: [{}]).", - projectPath, - VersionProperties.getElasticsearch() - ); - fileSystemOperations.copy(c -> { - c.from(archiveOperations.zipTree(config.getSingleFile())); // jar file - c.into(outputResourceDir); - if (include.get().isEmpty()) { - c.include(REST_API_PREFIX + "/**"); - } else { - c.include(include.get().stream().map(prefix -> REST_API_PREFIX + "/" + prefix + "*/**").collect(Collectors.toList())); - } - }); - } - + getLogger().debug("Rest specs for project [{}] will be copied to the test resources.", projectPath); + fileSystemOperations.copy(c -> { + c.from(configToFileTree.apply(config)); + c.into(restSpecOutputDir); + c.include(patternSet.getIncludes()); + }); // copy any additional config if (additionalConfig != null) { fileSystemOperations.copy(c -> { @@ -209,8 +184,4 @@ public void setAdditionalConfigToFileTree(Function add this.additionalConfigToFileTree = additionalConfigToFileTree; } - @Internal - public FileCollection getConfig() { - return config; - } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java index a45ef6e0168a9..17a494bb336d9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java @@ -7,10 +7,8 @@ */ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.apache.tools.ant.filters.ReplaceTokens; import org.gradle.api.DefaultTask; -import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; @@ -20,7 +18,7 @@ import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; @@ -28,10 +26,11 @@ import org.gradle.api.tasks.util.PatternSet; import org.gradle.internal.Factory; -import javax.inject.Inject; import java.io.File; +import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; +import javax.inject.Inject; import static org.elasticsearch.gradle.util.GradleUtils.getProjectPathFromTask; @@ -46,6 +45,7 @@ public class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; private final ListProperty includeCore; private final ListProperty includeXpack; + private Map substitutions; private final DirectoryProperty outputResourceDir; private FileCollection coreConfig; @@ -59,14 +59,12 @@ public class CopyRestTestsTask extends DefaultTask { private final PatternFilterable xpackPatternSet; private final ProjectLayout projectLayout; private final FileSystemOperations fileSystemOperations; - private final ArchiveOperations archiveOperations; @Inject public CopyRestTestsTask( ProjectLayout projectLayout, Factory patternSetFactory, FileSystemOperations fileSystemOperations, - ArchiveOperations archiveOperations, ObjectFactory objectFactory ) { this.includeCore = objectFactory.listProperty(String.class); @@ -76,7 +74,6 @@ public CopyRestTestsTask( this.xpackPatternSet = patternSetFactory.create(); this.projectLayout = projectLayout; this.fileSystemOperations = fileSystemOperations; - this.archiveOperations = archiveOperations; } @Input @@ -89,6 +86,16 @@ public ListProperty getIncludeXpack() { return includeXpack; } + public void setSubstitutions(Map substitutions) { + this.substitutions = substitutions; + } + + @Input + @Optional + public Map getSubstitutions() { + return substitutions; + } + @SkipWhenEmpty @InputFiles public FileTree getInputDir() { @@ -99,12 +106,8 @@ public FileTree getInputDir() { xpackFileTree = xpackConfigToFileTree.apply(xpackConfig).matching(xpackPatternSet); } if (includeCore.get().isEmpty() == false) { - if (BuildParams.isInternal()) { - corePatternSet.setIncludes(includeCore.get().stream().map(prefix -> prefix + "*/**").collect(Collectors.toList())); - coreFileTree = coreConfigToFileTree.apply(coreConfig).matching(corePatternSet); // directory on disk - } else { - coreFileTree = coreConfig.getAsFileTree(); // jar file - } + corePatternSet.setIncludes(includeCore.get().stream().map(prefix -> prefix + "*/**").collect(Collectors.toList())); + coreFileTree = coreConfigToFileTree.apply(coreConfig).matching(corePatternSet); // directory on disk } FileCollection fileCollection = additionalConfig == null ? projectLayout.files(coreFileTree, xpackFileTree) @@ -131,27 +134,15 @@ void copy() { // only copy core tests if explicitly instructed if (includeCore.get().isEmpty() == false) { - if (BuildParams.isInternal()) { - getLogger().debug("Rest tests for project [{}] will be copied to the test resources.", projectPath); - fileSystemOperations.copy(c -> { - c.from(coreConfigToFileTree.apply(coreConfig)); - c.into(restTestOutputDir); - c.include(corePatternSet.getIncludes()); - }); - } else { - getLogger().debug( - "Rest tests for project [{}] will be copied to the test resources from the published jar (version: [{}]).", - projectPath, - VersionProperties.getElasticsearch() - ); - fileSystemOperations.copy(c -> { - c.from(archiveOperations.zipTree(coreConfig.getSingleFile())); // jar file - c.into(outputResourceDir); - c.include( - includeCore.get().stream().map(prefix -> REST_TEST_PREFIX + "/" + prefix + "*/**").collect(Collectors.toList()) - ); - }); - } + getLogger().debug("Rest tests for project [{}] will be copied to the test resources.", projectPath); + fileSystemOperations.copy(c -> { + c.from(coreConfigToFileTree.apply(coreConfig)); + c.into(restTestOutputDir); + c.include(corePatternSet.getIncludes()); + if (substitutions != null) { + c.filter(Map.of("tokens", substitutions), ReplaceTokens.class); + } + }); } // only copy x-pack tests if explicitly instructed if (includeXpack.get().isEmpty() == false) { @@ -160,6 +151,9 @@ void copy() { c.from(xpackConfigToFileTree.apply(xpackConfig)); c.into(restTestOutputDir); c.include(xpackPatternSet.getIncludes()); + if (substitutions != null) { + c.filter(Map.of("tokens", substitutions), ReplaceTokens.class); + } }); } // copy any additional config @@ -167,6 +161,9 @@ void copy() { fileSystemOperations.copy(c -> { c.from(additionalConfigToFileTree.apply(additionalConfig)); c.into(restTestOutputDir); + if (substitutions != null) { + c.filter(Map.of("tokens", substitutions), ReplaceTokens.class); + } }); } } @@ -195,13 +192,4 @@ public void setAdditionalConfigToFileTree(Function add this.additionalConfigToFileTree = additionalConfigToFileTree; } - @Internal - public FileCollection getCoreConfig() { - return coreConfig; - } - - @Internal - public FileCollection getXpackConfig() { - return xpackConfig; - } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java index 1886bb0e6f168..21c4bee730e62 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java @@ -8,21 +8,15 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; -import org.elasticsearch.gradle.internal.InternalTestClustersPlugin; -import org.elasticsearch.gradle.internal.test.RestIntegTestTask; import org.elasticsearch.gradle.internal.test.RestTestBasePlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.provider.Provider; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.createTestCluster; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTask; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupDependencies; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTestTask; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupTestDependenciesDefaults; /** * Apply this plugin to run the Java based REST tests. @@ -33,27 +27,19 @@ public class JavaRestTestPlugin implements Plugin { @Override public void apply(Project project) { - project.getPluginManager().apply(ElasticsearchJavaPlugin.class); project.getPluginManager().apply(RestTestBasePlugin.class); - project.getPluginManager().apply(InternalTestClustersPlugin.class); // create source set SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet javaTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - // create the test cluster container - createTestCluster(project, javaTestSourceSet); - // setup the javaRestTest task - Provider javaRestTestTask = registerTask(project, javaTestSourceSet); + registerTestTask(project, javaTestSourceSet); // setup dependencies - setupDependencies(project, javaTestSourceSet); + setupTestDependenciesDefaults(project, javaTestSourceSet); // setup IDE GradleUtils.setupIdeForTestSourceSet(project, javaTestSourceSet); - - // wire this task into check - project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(javaRestTestTask)); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java index 0113246c918c8..c294883f578ff 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java @@ -33,10 +33,6 @@ void restApi(Action spec) { } void restTests(Action spec) { - if (BuildParams.isInternal() == false) { - // TODO: Separate this out into an "internal" plugin so we don't even expose this API to external folks - throw new UnsupportedOperationException("Including tests is not supported from external builds."); - } spec.execute(restTests); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java index f0d70e27e2bad..90ed8cd0baef2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; @@ -85,36 +83,28 @@ public void apply(Project project) { RestResourcesExtension extension = project.getExtensions().create(EXTENSION_NAME, RestResourcesExtension.class); SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet defaultSourceSet = sourceSets.getByName(TEST_SOURCE_SET_NAME); + SourceSet defaultSourceSet = sourceSets.maybeCreate(TEST_SOURCE_SET_NAME); // tests Configuration testConfig = project.getConfigurations().create("restTestConfig"); Configuration xpackTestConfig = project.getConfigurations().create("restXpackTestConfig"); - if (BuildParams.isInternal()) { - // core - Dependency restTestdependency = project.getDependencies() + // core + Dependency restTestdependency = project.getDependencies() .project(Map.of("path", ":rest-api-spec", "configuration", "restTests")); - project.getDependencies().add(testConfig.getName(), restTestdependency); - // x-pack - Dependency restXPackTestdependency = project.getDependencies() + project.getDependencies().add(testConfig.getName(), restTestdependency); + // x-pack + Dependency restXPackTestdependency = project.getDependencies() .project(Map.of("path", ":x-pack:plugin", "configuration", "restXpackTests")); - project.getDependencies().add(xpackTestConfig.getName(), restXPackTestdependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.elasticsearch:rest-api-spec:" + VersionProperties.getElasticsearch()); - project.getDependencies().add(testConfig.getName(), dependency); - } + project.getDependencies().add(xpackTestConfig.getName(), restXPackTestdependency); project.getConfigurations().create("restTests"); project.getConfigurations().create("restXpackTests"); Provider copyRestYamlTestTask = project.getTasks() .register(COPY_YAML_TESTS_TASK, CopyRestTestsTask.class, task -> { - if (BuildParams.isInternal()) { - task.dependsOn(testConfig, xpackTestConfig); - task.setCoreConfig(testConfig); - task.setXpackConfig(xpackTestConfig); - } + task.dependsOn(testConfig, xpackTestConfig); + task.setCoreConfig(testConfig); + task.setXpackConfig(xpackTestConfig); // If this is the rest spec project, don't copy the tests again if (project.getPath().equals(":rest-api-spec") == false) { task.getIncludeCore().set(extension.getRestTests().getIncludeCore()); @@ -125,16 +115,9 @@ public void apply(Project project) { // api Configuration specConfig = project.getConfigurations().create("restSpec"); // name chosen for passivity - if (BuildParams.isInternal()) { - Dependency restSpecDependency = project.getDependencies() + Dependency restSpecDependency = project.getDependencies() .project(Map.of("path", ":rest-api-spec", "configuration", "restSpecs")); - project.getDependencies().add(specConfig.getName(), restSpecDependency); - } else { - Dependency dependency = project.getDependencies() - .create("org.elasticsearch:rest-api-spec:" + VersionProperties.getElasticsearch()); - project.getDependencies().add(specConfig.getName(), dependency); - } - + project.getDependencies().add(specConfig.getName(), restSpecDependency); project.getConfigurations().create("restSpecs"); Provider copyRestYamlApiTask = project.getTasks() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java index 28016a0f3edcd..9b83bccaaa168 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java @@ -8,15 +8,11 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.test.RestIntegTestTask; -import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; -import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.elasticsearch.gradle.util.GradleUtils; -import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.provider.Provider; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskProvider; @@ -28,26 +24,21 @@ */ public class RestTestUtil { - private RestTestUtil() {} - - public static ElasticsearchCluster createTestCluster(Project project, SourceSet sourceSet) { - // eagerly create the testCluster container so it is easily available for configuration - @SuppressWarnings("unchecked") - NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project - .getExtensions() - .getByName(TestClustersPlugin.EXTENSION_NAME); - return testClusters.create(sourceSet.getName()); + private RestTestUtil() { } /** * Creates a task with the source set name of type {@link RestIntegTestTask} */ - public static Provider registerTask(Project project, SourceSet sourceSet) { + public static Provider registerTestTask(Project project, SourceSet sourceSet) { // lazily create the test task - Provider testProvider = project.getTasks().register(sourceSet.getName(), RestIntegTestTask.class, testTask -> { + return project.getTasks().register(sourceSet.getName(), RestIntegTestTask.class, testTask -> { testTask.setGroup(JavaBasePlugin.VERIFICATION_GROUP); testTask.setDescription("Runs the REST tests against an external cluster"); - testTask.mustRunAfter(project.getTasks().named("test")); + project.getPlugins().withType(JavaPlugin.class, t -> + testTask.mustRunAfter(project.getTasks().named("test")) + ); + testTask.setTestClassesDirs(sourceSet.getOutput().getClassesDirs()); testTask.setClasspath(sourceSet.getRuntimeClasspath()); // if this a module or plugin, it may have an associated zip file with it's contents, add that to the test cluster @@ -61,23 +52,13 @@ public static Provider registerTask(Project project, SourceSe } }); }); - - return testProvider; } /** * Setup the dependencies needed for the REST tests. */ - public static void setupDependencies(Project project, SourceSet sourceSet) { - BuildParams.withInternalBuild( - () -> { project.getDependencies().add(sourceSet.getImplementationConfigurationName(), project.project(":test:framework")); } - ).orElse(() -> { - project.getDependencies() - .add( - sourceSet.getImplementationConfigurationName(), - "org.elasticsearch.test:framework:" + VersionProperties.getElasticsearch() - ); - }); + public static void setupTestDependenciesDefaults(Project project, SourceSet sourceSet) { + project.getDependencies().add(sourceSet.getImplementationConfigurationName(), project.project(":test:framework")); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java index 389d38ac91f51..5a76543bebca1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java @@ -9,20 +9,15 @@ package org.elasticsearch.gradle.internal.test.rest; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; -import org.elasticsearch.gradle.internal.test.RestIntegTestTask; import org.elasticsearch.gradle.internal.test.RestTestBasePlugin; -import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.provider.Provider; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.createTestCluster; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTask; -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupDependencies; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTestTask; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupTestDependenciesDefaults; /** * Apply this plugin to run the YAML based REST tests. @@ -33,24 +28,19 @@ public class YamlRestTestPlugin implements Plugin { @Override public void apply(Project project) { - - project.getPluginManager().apply(ElasticsearchJavaPlugin.class); - project.getPluginManager().apply(TestClustersPlugin.class); project.getPluginManager().apply(RestTestBasePlugin.class); project.getPluginManager().apply(RestResourcesPlugin.class); + ElasticsearchJavaPlugin.configureConfigurations(project); + // create source set SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet yamlTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - // create the test cluster container - createTestCluster(project, yamlTestSourceSet); - - // setup the yamlRestTest task - Provider yamlRestTestTask = registerTask(project, yamlTestSourceSet); + registerTestTask(project, yamlTestSourceSet); // setup the dependencies - setupDependencies(project, yamlTestSourceSet); + setupTestDependenciesDefaults(project, yamlTestSourceSet); // setup the copy for the rest resources project.getTasks().withType(CopyRestApiTask.class).configureEach(copyRestApiTask -> { @@ -81,10 +71,6 @@ public void apply(Project project) { .flatMap(CopyRestTestsTask::getOutputResourceDir) ); - // setup IDE GradleUtils.setupIdeForTestSourceSet(project, yamlTestSourceSet); - - // wire this task into check - project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(yamlRestTestTask)); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestTransformer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestTransformer.java index 30e2bf688194d..43e0dde175f3c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestTransformer.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/RestTestTransformer.java @@ -157,6 +157,7 @@ private void traverseTest( } } } + traverseTest(testContext, entry.getValue(), entry.getKey(), objectKeyFinders, arrayByObjectKeyFinders); } } }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/headers/InjectHeaders.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/headers/InjectHeaders.java index 27c54a5426cbd..c61a0a860d1ec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/headers/InjectHeaders.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/headers/InjectHeaders.java @@ -18,6 +18,9 @@ import org.gradle.api.tasks.Internal; import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; /** * A {@link RestTestTransform} that injects HTTP headers into a REST test. This includes adding the necessary values to the "do" section @@ -28,25 +31,37 @@ public class InjectHeaders extends FeatureInjector implements RestTestTransformB private static JsonNodeFactory jsonNodeFactory = JsonNodeFactory.withExactBigDecimals(false); private final Map headers; + private final Set> applyConditions; /** * @param headers The headers to inject + * @param applyConditions a set of conditions that has to be satisfied in order to apply headers + * If the Set is empty then headers are always applied. */ - public InjectHeaders(Map headers) { + public InjectHeaders(Map headers, Set> applyConditions) { this.headers = headers; + this.applyConditions = applyConditions; } @Override public void transformTest(ObjectNode doNodeParent) { ObjectNode doNodeValue = (ObjectNode) doNodeParent.get(getKeyToFind()); - ObjectNode headersNode = (ObjectNode) doNodeValue.get("headers"); - if (headersNode == null) { - headersNode = new ObjectNode(jsonNodeFactory); - } - for (Map.Entry entry : headers.entrySet()) { - headersNode.set(entry.getKey(), TextNode.valueOf(entry.getValue())); + + if (shouldApplyHeaders(doNodeValue)) { + ObjectNode headersNode = (ObjectNode) doNodeValue.get("headers"); + if (headersNode == null) { + headersNode = new ObjectNode(jsonNodeFactory); + } + + for (Map.Entry entry : headers.entrySet()) { + headersNode.set(entry.getKey(), TextNode.valueOf(entry.getValue())); + } + doNodeValue.set("headers", headersNode); } - doNodeValue.set("headers", headersNode); + } + + private boolean shouldApplyHeaders(ObjectNode doNodeValue) { + return applyConditions.stream().allMatch(f -> f.apply(doNodeValue)); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java index ecd6b6fa7010d..12c403c7ace21 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/text/ReplaceTextual.java @@ -18,22 +18,22 @@ import org.gradle.api.tasks.Optional; /** - * A transformation to replace the flat textual fields. + * A transformation to replace a key/value combination. */ -class ReplaceTextual implements RestTestTransformByParentObject { +public class ReplaceTextual implements RestTestTransformByParentObject { private final String keyToReplaceName; private final String valueToBeReplaced; private final TextNode replacementNode; private final String testName; - ReplaceTextual(String keyToReplaceName, String valueToBeReplaced, TextNode replacementNode) { + public ReplaceTextual(String keyToReplaceName, String valueToBeReplaced, TextNode replacementNode) { this.keyToReplaceName = keyToReplaceName; this.valueToBeReplaced = valueToBeReplaced; this.replacementNode = replacementNode; this.testName = null; } - ReplaceTextual(String keyToReplaceName, String valueToBeReplaced, TextNode replacementNode, String testName) { + public ReplaceTextual(String keyToReplaceName, String valueToBeReplaced, TextNode replacementNode, String testName) { this.keyToReplaceName = keyToReplaceName; this.valueToBeReplaced = valueToBeReplaced; this.replacementNode = replacementNode; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java index 6976ce6cd9443..8bfbadbe86ad3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarnings.java @@ -11,10 +11,13 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; + +import org.elasticsearch.gradle.internal.test.rest.transform.RestTestContext; import org.elasticsearch.gradle.internal.test.rest.transform.RestTestTransformByParentObject; import org.elasticsearch.gradle.internal.test.rest.transform.feature.FeatureInjector; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.Optional; import java.util.List; @@ -26,6 +29,7 @@ public class InjectAllowedWarnings extends FeatureInjector implements RestTestTr private static JsonNodeFactory jsonNodeFactory = JsonNodeFactory.withExactBigDecimals(false); private final List allowedWarnings; + private String testName; private final boolean isRegex; /** @@ -40,8 +44,18 @@ public InjectAllowedWarnings(List allowedWarnings) { * @param allowedWarnings The allowed warnings to inject */ public InjectAllowedWarnings(boolean isRegex, List allowedWarnings) { + this(isRegex, allowedWarnings, null); + } + + /** + * @param isRegex true if should inject the regex variant of allowed warnings + * @param allowedWarnings The allowed warnings to inject + * @param testName The testName to inject + */ + public InjectAllowedWarnings(boolean isRegex, List allowedWarnings, String testName) { this.isRegex = isRegex; this.allowedWarnings = allowedWarnings; + this.testName = testName; } @Override @@ -52,7 +66,7 @@ public void transformTest(ObjectNode doNodeParent) { arrayWarnings = new ArrayNode(jsonNodeFactory); doNodeValue.set(getSkipFeatureName(), arrayWarnings); } - allowedWarnings.forEach(arrayWarnings::add); + this.allowedWarnings.forEach(arrayWarnings::add); } @Override @@ -71,4 +85,15 @@ public String getSkipFeatureName() { public List getAllowedWarnings() { return allowedWarnings; } + + @Override + public boolean shouldApply(RestTestContext testContext) { + return testName == null || testContext.getTestName().equals(testName); + } + + @Input + @Optional + public String getTestName() { + return testName; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java index a1d4e6d206924..bd816f6a680f8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarnings.java @@ -10,9 +10,12 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; + +import org.elasticsearch.gradle.internal.test.rest.transform.RestTestContext; import org.elasticsearch.gradle.internal.test.rest.transform.RestTestTransformByParentObject; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.Optional; import java.util.ArrayList; import java.util.List; @@ -28,6 +31,7 @@ public class RemoveWarnings implements RestTestTransformByParentObject { private final Set warnings; + private String testName; /** * @param warnings The allowed warnings to inject @@ -35,6 +39,14 @@ public class RemoveWarnings implements RestTestTransformByParentObject { public RemoveWarnings(Set warnings) { this.warnings = warnings; } + /** + * @param warnings The allowed warnings to inject + * @param testName The testName to inject + */ + public RemoveWarnings(Set warnings, String testName) { + this.warnings = warnings; + this.testName = testName; + } @Override public void transformTest(ObjectNode doNodeParent) { @@ -66,4 +78,15 @@ public String getKeyToFind() { public Set getWarnings() { return warnings; } + + @Override + public boolean shouldApply(RestTestContext testContext) { + return testName == null || testContext.getTestName().equals(testName); + } + + @Input + @Optional + public String getTestName() { + return testName; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 8a1c758706100..d9ad10f866ebc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -13,7 +13,7 @@ import com.avast.gradle.dockercompose.tasks.ComposeDown; import com.avast.gradle.dockercompose.tasks.ComposePull; import com.avast.gradle.dockercompose.tasks.ComposeUp; -import org.elasticsearch.gradle.internal.test.SystemPropertyCommandLineArgumentProvider; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; import org.elasticsearch.gradle.internal.info.BuildParams; diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json new file mode 100644 index 0000000000000..a2dfc5ecd306f --- /dev/null +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -0,0 +1,234 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://github.com/elastic/elasticsearch/tree/master/docs/changelog", + "$ref": "#/definitions/Changelog", + "definitions": { + "Changelog": { + "type": "object", + "properties": { + "pr": { + "type": "integer" + }, + "issues": { + "type": "array", + "items": { + "type": "integer" + } + }, + "area": { + "type": "string", + "enum": [ + "Aggregations", + "Allocation", + "Analysis", + "Audit", + "Authentication", + "Authorization", + "Autoscaling", + "CCR", + "CRUD", + "Client", + "Cluster Coordination", + "Discovery-Plugins", + "Distributed", + "EQL", + "Engine", + "FIPS", + "Features/CAT APIs", + "Features/Data streams", + "Features/Features", + "Features/ILM+SLM", + "Features/Indices APIs", + "Features/Ingest", + "Features/Java High Level REST Client", + "Features/Java Low Level REST Client", + "Features/Monitoring", + "Features/Stats", + "Features/Watcher", + "Geo", + "Graph", + "Highlighting", + "IdentityProvider", + "Infra/CLI", + "Infra/Circuit Breakers", + "Infra/Core", + "Infra/Logging", + "Infra/Node Lifecycle", + "Infra/Plugins", + "Infra/REST API", + "Infra/Resiliency", + "Infra/Scripting", + "Infra/Settings", + "Infra/Transport API", + "License", + "Machine Learning", + "Mapping", + "Network", + "Packaging", + "Percolator", + "Performance", + "Query Languages", + "Ranking", + "Recovery", + "Reindex", + "Rollup", + "SQL", + "Search", + "Security", + "Snapshot/Restore", + "Store", + "Suggesters", + "TLS", + "Task Management", + "Transform" + ] + }, + "type": { + "type": "string", + "enum": [ + "breaking", + "breaking-java", + "bug", + "deprecation", + "enhancement", + "feature", + "known-issue", + "new-aggregation", + "regression", + "security", + "upgrade" + ] + }, + "summary": { + "type": "string", + "minLength": 1 + }, + "versions": { + "type": "array", + "items": { + "type": "string", + "pattern": "^v?\\d+\\.\\d+\\.\\d+$", + "minItems": 1 + } + }, + "highlight": { + "$ref": "#/definitions/Highlight" + }, + "breaking": { + "$ref": "#/definitions/Breaking" + }, + "deprecation": { + "$ref": "#/definitions/Deprecation" + } + }, + "required": [ + "type", + "summary", + "versions" + ] + }, + "Highlight": { + "properties": { + "notable": { + "type": "boolean" + }, + "title": { + "type": "string", + "minLength": 1 + }, + "body": { + "type": "string", + "minLength": 1 + } + }, + "required": [ + "title", + "body" + ], + "additionalProperties": false + }, + "Breaking": { + "properties": { + "area": { + "$ref": "#/definitions/breakingArea" + }, + "title": { + "type": "string", + "minLength": 1 + }, + "details": { + "type": "string", + "minLength": 1 + }, + "impact": { + "type": "string", + "minLength": 1 + }, + "notable": { + "type": "boolean" + } + }, + "required": [ + "area", + "title", + "details", + "impact" + ], + "additionalProperties": false + }, + "Deprecation": { + "properties": { + "area": { + "$ref": "#/definitions/breakingArea" + }, + "title": { + "type": "string", + "minLength": 1 + }, + "body": { + "type": "string", + "minLength": 1 + } + }, + "required": [ + "area", + "title", + "body" + ], + "additionalProperties": false + }, + "breakingArea": { + "type": "string", + "enum": [ + "API", + "Aggregation", + "Allocation", + "Authentication", + "CCR", + "Cluster", + "Discovery", + "Engine", + "HTTP", + "Highlighters", + "Indices", + "Java", + "License Information", + "Logging", + "Machine Learning", + "Mappings", + "Networking", + "Packaging", + "Plugins", + "Script Cache", + "Search Changes", + "Search", + "Security", + "Settings", + "Snapshot and Restore", + "Transform", + "Transport" + ] + }, + "additionalProperties": false + } +} diff --git a/build-tools-internal/src/main/resources/checkstyle_ide_fragment.xml b/build-tools-internal/src/main/resources/checkstyle_ide_fragment.xml index 9441fa9cd77c3..6aeae3712aaf9 100644 --- a/build-tools-internal/src/main/resources/checkstyle_ide_fragment.xml +++ b/build-tools-internal/src/main/resources/checkstyle_ide_fragment.xml @@ -29,7 +29,7 @@ - + diff --git a/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt index 7a5126457a42b..64c05ad953ab2 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt @@ -10,7 +10,6 @@ java.nio.file.Path#of(java.lang.String, java.lang.String[]) @ Use org.elasticsea java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.core.PathUtils.getDefaultFileSystem() instead. java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 -java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 @defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness java.util.Random#() diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 986084f369cdd..ef09838cb29a9 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -7.1 \ No newline at end of file +7.1.1 \ No newline at end of file diff --git a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc new file mode 100644 index 0000000000000..38573747863e9 --- /dev/null +++ b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc @@ -0,0 +1,102 @@ +[[migrating-${majorDotMinor}]] +== Migrating to ${majorDotMinor} +++++ +${majorDotMinor} +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} ${majorDotMinor}. + +See also <> and <>. +<% if (isElasticsearchSnapshot) { %> +coming[${version}] +<% } %> +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide +<% if (breakingChangesByNotabilityByArea.isEmpty() == false) { %> +[discrete] +[[breaking-changes-${majorDotMinor}]] +=== Breaking changes + +The following changes in {es} ${majorDotMinor} might affect your applications +and prevent them from operating normally. +Before upgrading to ${majorDotMinor} review these changes and take the described steps +to mitigate the impact. + +NOTE: Breaking changes introduced in minor versions are +normally limited to security and bug fixes. +Significant changes in behavior are deprecated in a minor release and +the old behavior is supported until the next major release. +To find out if you are using any deprecated functionality, +enable <>. +<% +[true, false].each { isNotable -> + def breakingChangesByArea = breakingChangesByNotabilityByArea.getOrDefault(isNotable, []) + + breakingChangesByArea.eachWithIndex { area, breakingChanges, i -> + print "\n" + + if (isNotable) { + print "// tag::notable-breaking-changes[]\n" + } + + print "[discrete]\n" + print "[[breaking_${majorMinor}_${ area.toLowerCase().replaceAll("[^a-z0-9]+", "_") }]]\n" + print "==== ${area}\n" + + for (breaking in breakingChanges) { %> +[[${ breaking.anchor }]] +.${breaking.title} +[%collapsible] +==== +*Details* + +${breaking.details.trim()} + +*Impact* + +${breaking.impact.trim()} +==== +<% + } + + if (isNotable) { + print "// end::notable-breaking-changes[]\n" + } + } +} +} +if (deprecationsByArea.empty == false) { %> + +[discrete] +[[deprecated-${majorDotMinor}]] +=== Deprecations + +The following functionality has been deprecated in {es} ${majorDotMinor} +and will be removed in ${nextMajor}. +While this won't have an immediate impact on your applications, +we strongly encourage you take the described steps to update your code +after upgrading to ${majorDotMinor}. + +NOTE: Significant changes in behavior are deprecated in a minor release and +the old behavior is supported until the next major release. +To find out if you are using any deprecated functionality, +enable <>." + +<% +deprecationsByArea.eachWithIndex { area, deprecations, i -> + print "\n[discrete]\n" + print "[[deprecations_${majorMinor}_${ area.toLowerCase().replaceAll("[^a-z0-9]+", "_") }]]" + print "==== ${area} deprecations" + + for (deprecation in deprecations) { %> + +[[${ deprecation.anchor }]] +.${deprecation.title} +[%collapsible] +==== +*Details* + +${deprecation.body.trim()} +==== +<% +} +} +} %> diff --git a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc new file mode 100644 index 0000000000000..40b828d609745 --- /dev/null +++ b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc @@ -0,0 +1,35 @@ +[[release-highlights]] +== What's new in {minor-version} + +coming::[{minor-version}] + +Here are the highlights of what's new and improved in {es} {minor-version}! +ifeval::[\\{release-state}\\"!=\\"unreleased\\"] +For detailed information about this release, see the <> and +<>. +endif::[] +<% if (priorVersions.size > 0) { %> +// Add previous release to the list +Other versions: + +<% +print priorVersions.join("\n| ") +print "\n" +} + +if (notableHighlights.empty == false) { %> +// tag::notable-highlights[] +<% for (highlight in notableHighlights) { %> +[discrete] +[[${ highlight.anchor }]] +=== ${highlight.title} +${highlight.body.trim()} +<% } %> +// end::notable-highlights[] +<% } %> +<% for (highlight in nonNotableHighlights) { %> +[discrete] +[[${ highlight.anchor }]] +=== ${highlight.title} +${highlight.body.trim()} +<% } %> diff --git a/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc b/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc new file mode 100644 index 0000000000000..0b62b9b3f1e01 --- /dev/null +++ b/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc @@ -0,0 +1,12 @@ +[[es-release-notes]] += Release notes + +[partintro] +-- + +This section summarizes the changes in each release. + +<% existingVersions.each { print "* <>\n" } %> +-- + +<% existingIncludes.each { print "include::release-notes/${ it }.asciidoc[]\n" } %> diff --git a/build-tools-internal/src/main/resources/templates/release-notes.asciidoc b/build-tools-internal/src/main/resources/templates/release-notes.asciidoc new file mode 100644 index 0000000000000..35384c8f4ce66 --- /dev/null +++ b/build-tools-internal/src/main/resources/templates/release-notes.asciidoc @@ -0,0 +1,45 @@ +<% for (version in changelogsByVersionByTypeByArea.keySet()) { +%>[[release-notes-$version]] +== {es} version $version +<% if (version.qualifier == "SNAPSHOT") { %> +coming[$version] +<% } %> +Also see <>. +<% if (changelogsByVersionByTypeByArea[version]["security"] != null) { %> +[discrete] +[[security-updates-${version}]] +=== Security updates + +<% for (change in changelogsByVersionByTypeByArea[version].remove("security").remove("_all_")) { + print "* ${change.summary}\n" +} +} +if (changelogsByVersionByTypeByArea[version]["known-issue"] != null) { %> +[discrete] +[[known-issues-${version}]] +=== Known issues + +<% for (change in changelogsByVersionByTypeByArea[version].remove("known-issue").remove("_all_")) { + print "* ${change.summary}\n" +} +} +for (changeType in changelogsByVersionByTypeByArea[version].keySet()) { %> +[[${ changeType }-${ version }]] +[float] +=== ${ TYPE_LABELS[changeType] } +<% for (team in changelogsByVersionByTypeByArea[version][changeType].keySet()) { + print "\n${team}::\n"; + + for (change in changelogsByVersionByTypeByArea[version][changeType][team]) { + print "* ${change.summary} {es-pull}${change.pr}[#${change.pr}]" + if (change.issues != null && change.issues.empty == false) { + print change.issues.size() == 1 ? " (issue: " : " (issues: " + print change.issues.collect { "{es-issue}${it}[#${it}]" }.join(", ") + print ")" + } + print "\n" + } +} +} +} +%> diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java index 75b4c8d09821a..ef3a8d9b701f4 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java @@ -59,10 +59,9 @@ protected void checkBwc( Version version, ElasticsearchDistributionType type, ElasticsearchDistribution.Platform platform, - BwcVersions bwcVersions, - boolean isInternal + BwcVersions bwcVersions ) { - Project project = createProject(bwcVersions, isInternal); + Project project = createProject(bwcVersions); Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build(); archiveProject.getConfigurations().create(config); archiveProject.getArtifacts().add(config, new File("doesnotmatter")); @@ -94,9 +93,8 @@ protected ElasticsearchDistribution createDistro( }).maybeFreeze(); } - protected Project createProject(BwcVersions bwcVersions, boolean isInternal) { + protected Project createProject(BwcVersions bwcVersions) { rootProject = ProjectBuilder.builder().build(); - BuildParams.init(params -> params.setIsInternal(isInternal)); Project distributionProject = ProjectBuilder.builder().withParent(rootProject).withName("distribution").build(); archivesProject = ProjectBuilder.builder().withParent(distributionProject).withName("archives").build(); packagesProject = ProjectBuilder.builder().withParent(distributionProject).withName("packages").build(); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java index 769e3dd0561f3..7da15e42a03c3 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java @@ -21,7 +21,7 @@ public class DistributionDownloadPluginTests extends AbstractDistributionDownloa public void testVersionDefault() { ElasticsearchDistribution distro = checkDistro( - createProject(null, false), + createProject(null), "testdistro", null, ElasticsearchDistributionTypes.ARCHIVE, @@ -33,7 +33,7 @@ public void testVersionDefault() { public void testBadVersionFormat() { assertDistroError( - createProject(null, false), + createProject(null), "testdistro", "badversion", ElasticsearchDistributionTypes.ARCHIVE, @@ -44,13 +44,13 @@ public void testBadVersionFormat() { } public void testTypeDefault() { - ElasticsearchDistribution distro = checkDistro(createProject(null, false), "testdistro", "5.0.0", null, Platform.LINUX, true); + ElasticsearchDistribution distro = checkDistro(createProject(null), "testdistro", "5.0.0", null, Platform.LINUX, true); assertEquals(distro.getType(), ElasticsearchDistributionTypes.ARCHIVE); } public void testPlatformDefault() { ElasticsearchDistribution distro = checkDistro( - createProject(null, false), + createProject(null), "testdistro", "5.0.0", ElasticsearchDistributionTypes.ARCHIVE, @@ -62,7 +62,7 @@ public void testPlatformDefault() { public void testPlatformForIntegTest() { assertDistroError( - createProject(null, false), + createProject(null), "testdistro", "5.0.0", ElasticsearchDistributionTypes.INTEG_TEST_ZIP, @@ -74,7 +74,7 @@ public void testPlatformForIntegTest() { public void testBundledJdkDefault() { ElasticsearchDistribution distro = checkDistro( - createProject(null, false), + createProject(null), "testdistro", "5.0.0", ElasticsearchDistributionTypes.ARCHIVE, @@ -86,7 +86,7 @@ public void testBundledJdkDefault() { public void testBundledJdkForIntegTest() { assertDistroError( - createProject(null, false), + createProject(null), "testdistro", "5.0.0", ElasticsearchDistributionTypes.INTEG_TEST_ZIP, @@ -97,7 +97,7 @@ public void testBundledJdkForIntegTest() { } public void testLocalCurrentVersionIntegTestZip() { - Project project = createProject(BWC_MINOR, true); + Project project = createProject(BWC_MINOR); Project archiveProject = ProjectBuilder.builder().withParent(archivesProject).withName("integ-test-zip").build(); archiveProject.getConfigurations().create("default"); archiveProject.getArtifacts().add("default", new File("doesnotmatter")); @@ -108,7 +108,7 @@ public void testLocalCurrentVersionArchives() { for (Platform platform : Platform.values()) { for (boolean bundledJdk : new boolean[] { true, false }) { // create a new project in each iteration, so that we know we are resolving the only additional project being created - Project project = createProject(BWC_MINOR, true); + Project project = createProject(BWC_MINOR); String projectName = projectName(platform.toString(), bundledJdk); projectName += (platform == Platform.WINDOWS ? "-zip" : "-tar"); Project archiveProject = ProjectBuilder.builder().withParent(archivesProject).withName(projectName).build(); @@ -132,10 +132,10 @@ public void testLocalBwcArchives() { String configName = projectName(platform.toString(), true); configName += (platform == Platform.WINDOWS ? "-zip" : "-tar"); ElasticsearchDistributionType archiveType = ElasticsearchDistributionTypes.ARCHIVE; - checkBwc("minor", configName, BWC_MINOR_VERSION, archiveType, platform, BWC_MINOR, true); - checkBwc("staged", configName, BWC_STAGED_VERSION, archiveType, platform, BWC_STAGED, true); - checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, archiveType, platform, BWC_BUGFIX, true); - checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, archiveType, platform, BWC_MAINTENANCE, true); + checkBwc("minor", configName, BWC_MINOR_VERSION, archiveType, platform, BWC_MINOR); + checkBwc("staged", configName, BWC_STAGED_VERSION, archiveType, platform, BWC_STAGED); + checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, archiveType, platform, BWC_BUGFIX); + checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, archiveType, platform, BWC_MAINTENANCE); } } diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/VersionTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/VersionTests.java deleted file mode 100644 index 37aa5cf9d21da..0000000000000 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ /dev/null @@ -1,115 +0,0 @@ -package org.elasticsearch.gradle; - -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -public class VersionTests extends GradleUnitTestCase { - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - public void testVersionParsing() { - assertVersionEquals("7.0.1", 7, 0, 1); - assertVersionEquals("7.0.1-alpha2", 7, 0, 1); - assertVersionEquals("5.1.2-rc3", 5, 1, 2); - assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2); - assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2); - assertVersionEquals("17.03.11", 17, 3, 11); - } - - public void testRelaxedVersionParsing() { - assertVersionEquals("6.1.2", 6, 1, 2, Version.Mode.RELAXED); - assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2, Version.Mode.RELAXED); - assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2, Version.Mode.RELAXED); - assertVersionEquals("6.1.2-foo", 6, 1, 2, Version.Mode.RELAXED); - assertVersionEquals("6.1.2-foo-bar", 6, 1, 2, Version.Mode.RELAXED); - assertVersionEquals("16.01.22", 16, 1, 22, Version.Mode.RELAXED); - } - - public void testCompareWithStringVersions() { - assertTrue("1.10.20 is not interpreted as before 2.0.0", Version.fromString("1.10.20").before("2.0.0")); - assertTrue( - "7.0.0-alpha1 should be equal to 7.0.0-alpha1", - Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) - ); - assertTrue( - "7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", - Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT")) - ); - } - - public void testCollections() { - assertTrue( - Arrays.asList( - Version.fromString("5.2.0"), - Version.fromString("5.2.1-SNAPSHOT"), - Version.fromString("6.0.0"), - Version.fromString("6.0.1"), - Version.fromString("6.1.0") - ).containsAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))) - ); - Set versions = new HashSet<>(); - versions.addAll( - Arrays.asList( - Version.fromString("5.2.0"), - Version.fromString("5.2.1-SNAPSHOT"), - Version.fromString("6.0.0"), - Version.fromString("6.0.1"), - Version.fromString("6.1.0") - ) - ); - Set subset = new HashSet<>(); - subset.addAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))); - assertTrue(versions.containsAll(subset)); - } - - public void testToString() { - assertEquals("7.0.1", new Version(7, 0, 1).toString()); - } - - public void testCompareVersions() { - assertEquals(0, new Version(7, 0, 0).compareTo(new Version(7, 0, 0))); - assertOrder(Version.fromString("19.0.1"), Version.fromString("20.0.3")); - } - - public void testExceptionEmpty() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Invalid version format"); - Version.fromString(""); - } - - public void testExceptionSyntax() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Invalid version format"); - Version.fromString("foo.bar.baz"); - } - - private void assertOrder(Version smaller, Version bigger) { - assertEquals(smaller + " should be smaller than " + bigger, -1, smaller.compareTo(bigger)); - } - - private void assertVersionEquals(String stringVersion, int major, int minor, int revision) { - assertVersionEquals(stringVersion, major, minor, revision, Version.Mode.STRICT); - } - - private void assertVersionEquals(String stringVersion, int major, int minor, int revision, Version.Mode mode) { - Version version = Version.fromString(stringVersion, mode); - assertEquals(major, version.getMajor()); - assertEquals(minor, version.getMinor()); - assertEquals(revision, version.getRevision()); - } - -} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java index c192809de71f0..12acc036782c6 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java @@ -23,7 +23,7 @@ public void testLocalCurrentVersionPackages() { ElasticsearchDistributionType[] types = { InternalElasticsearchDistributionTypes.RPM, InternalElasticsearchDistributionTypes.DEB }; for (ElasticsearchDistributionType packageType : types) { for (boolean bundledJdk : new boolean[] { true, false }) { - Project project = createProject(BWC_MINOR, true); + Project project = createProject(BWC_MINOR); String projectName = projectName(packageType.toString(), bundledJdk); Project packageProject = ProjectBuilder.builder().withParent(packagesProject).withName(projectName).build(); packageProject.getConfigurations().create("default"); @@ -38,10 +38,10 @@ public void testLocalBwcPackages() { for (ElasticsearchDistributionType packageType : types) { // note: no non bundled jdk for bwc String configName = projectName(packageType.toString(), true); - checkBwc("minor", configName, BWC_MINOR_VERSION, packageType, null, BWC_MINOR, true); - checkBwc("staged", configName, BWC_STAGED_VERSION, packageType, null, BWC_STAGED, true); - checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, packageType, null, BWC_BUGFIX, true); - checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, packageType, null, BWC_MAINTENANCE, true); + checkBwc("minor", configName, BWC_MINOR_VERSION, packageType, null, BWC_MINOR); + checkBwc("staged", configName, BWC_STAGED_VERSION, packageType, null, BWC_STAGED); + checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, packageType, null, BWC_BUGFIX); + checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, packageType, null, BWC_MAINTENANCE); } } } diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java index c3f13b1a02c4c..b0a5e02e1b93b 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java @@ -17,21 +17,23 @@ import com.fasterxml.jackson.databind.node.TextNode; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.fasterxml.jackson.dataformat.yaml.YAMLParser; + import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.elasticsearch.gradle.internal.test.rest.transform.headers.InjectHeaders; import org.hamcrest.CoreMatchers; +import org.hamcrest.Matchers; import org.hamcrest.core.IsCollectionContaining; import org.junit.Before; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; import java.util.stream.Collectors; @@ -125,8 +127,8 @@ protected List getKnownFeatures() { protected List> getTransformations() { List> transformations = new ArrayList<>(); - transformations.add(new InjectHeaders(headers1)); - transformations.add(new InjectHeaders(headers2)); + transformations.add(new InjectHeaders(headers1, Collections.emptySet())); + transformations.add(new InjectHeaders(headers2, Collections.emptySet())); return transformations; } @@ -180,11 +182,11 @@ protected ObjectNode getSkipNode(ArrayNode setupNodeValue) { return null; } - protected void validateBodyHasWarnings(String featureName, List tests, Set expectedWarnings) { + protected void validateBodyHasWarnings(String featureName, List tests, Collection expectedWarnings) { validateBodyHasWarnings(featureName, null, tests, expectedWarnings); } - protected void validateBodyHasWarnings(String featureName, String testName, List tests, Set expectedWarnings) { + protected void validateBodyHasWarnings(String featureName, String testName, List tests, Collection expectedWarnings) { AtomicBoolean actuallyDidSomething = new AtomicBoolean(false); tests.forEach(test -> { Iterator> testsIterator = test.fields(); @@ -200,13 +202,10 @@ protected void validateBodyHasWarnings(String featureName, String testName, List ObjectNode doSection = (ObjectNode) testSection.get("do"); assertThat(doSection.get(featureName), CoreMatchers.notNullValue()); ArrayNode warningsNode = (ArrayNode) doSection.get(featureName); - LongAdder assertions = new LongAdder(); - warningsNode.forEach(warning -> { - if (expectedWarnings.contains(warning.asText())) { - assertions.increment(); - } - }); - assertThat(assertions.intValue(), CoreMatchers.equalTo(expectedWarnings.size())); + List actual = new ArrayList<>(); + warningsNode.forEach(node -> actual.add(node.asText())); + String[] expected = expectedWarnings.toArray(new String[]{}); + assertThat(actual, Matchers.containsInAnyOrder(expected)); actuallyDidSomething.set(true); } }); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/header/InjectHeaderTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/header/InjectHeaderTests.java index 34d1273a086a2..07881a0f48678 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/header/InjectHeaderTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/header/InjectHeaderTests.java @@ -14,9 +14,12 @@ import org.elasticsearch.gradle.internal.test.rest.transform.headers.InjectHeaders; import org.junit.Test; +import java.util.ArrayList; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; public class InjectHeaderTests extends InjectFeatureTests { @@ -57,6 +60,34 @@ public void testInjectHeadersWithPreExisting() throws Exception { validateBodyHasHeaders(transformedTests, headers); } + + @Test + public void testNotInjectingHeaders() throws Exception { + String testName = "/rest/transform/header/with_operation_to_skip_adding_headers.yml"; + List tests = getTests(testName); + validateSetupExist(tests); + validateBodyHasHeaders(tests, Map.of("foo", "bar")); + + List> transformations = + Collections.singletonList(new InjectHeaders(headers, Set.of(InjectHeaderTests::applyCondition))); + List transformedTests = transformTests(tests, transformations); + printTest(testName, transformedTests); + validateSetupAndTearDown(transformedTests); + validateBodyHasHeaders(tests, Map.of("foo", "bar")); + validateBodyHasHeaders(transformedTests, Map.of("foo", "bar")); + } + + private static boolean applyCondition(ObjectNode doNodeValue) { + final Iterator fieldNamesIterator = doNodeValue.fieldNames(); + while (fieldNamesIterator.hasNext()) { + final String fieldName = fieldNamesIterator.next(); + if (fieldName.startsWith("something_to_skip")) { + return false; + } + } + return true; + } + @Override protected List getKnownFeatures() { return Collections.singletonList("headers"); @@ -64,7 +95,7 @@ protected List getKnownFeatures() { @Override protected List> getTransformations() { - return Collections.singletonList(new InjectHeaders(headers)); + return Collections.singletonList(new InjectHeaders(headers, Collections.emptySet())); } @Override diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsRegexTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsRegexTests.java index 3deddc8309a47..9d24b57f5689e 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsRegexTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsRegexTests.java @@ -49,8 +49,7 @@ public void testInjectAllowedWarningsWithPreExisting() throws Exception { List transformedTests = transformTests(tests); printTest(testName, transformedTests); validateSetupAndTearDown(transformedTests); - validateBodyHasWarnings(ALLOWED_WARNINGS_REGEX, tests, Set.of("c", "d")); - validateBodyHasWarnings(ALLOWED_WARNINGS_REGEX, tests, addWarnings); + validateBodyHasWarnings(ALLOWED_WARNINGS_REGEX, tests, Set.of("c", "d", "added warning")); } @Override diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsTests.java index 580204d6e6819..b958b07773bca 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectAllowedWarningsTests.java @@ -49,8 +49,24 @@ public void testInjectAllowedWarningsWithPreExisting() throws Exception { List transformedTests = transformTests(tests); printTest(testName, transformedTests); validateSetupAndTearDown(transformedTests); + validateBodyHasWarnings(ALLOWED_WARNINGS, transformedTests, List.of("a", "b", "added warning")); + } + + @Test + public void testInjectAllowedWarningsWithPreExistingForSingleTest() throws Exception { + String testName = "/rest/transform/warnings/with_existing_allowed_warnings.yml"; + List tests = getTests(testName); + validateSetupExist(tests); validateBodyHasWarnings(ALLOWED_WARNINGS, tests, Set.of("a", "b")); - validateBodyHasWarnings(ALLOWED_WARNINGS, tests, addWarnings); + List transformedTests = transformTests(tests, getTransformationsForTest("Test with existing allowed warnings")); + printTest(testName, transformedTests); + validateSetupAndTearDown(transformedTests); + validateBodyHasWarnings(ALLOWED_WARNINGS, "Test with existing allowed warnings", transformedTests, Set.of("a", "b", "added warning")); + validateBodyHasWarnings(ALLOWED_WARNINGS, "Test with existing allowed warnings not to change", transformedTests, Set.of("a", "b")); + } + + private List> getTransformationsForTest(String testName) { + return Collections.singletonList(new InjectAllowedWarnings(false, new ArrayList<>(addWarnings), testName)); } @Override diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java index ad95016add15c..6107387d796fc 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java @@ -67,8 +67,8 @@ public void testInjectWarningsWithPreExisting() throws Exception { List transformedTests = transformTests(tests); printTest(testName, transformedTests); validateSetupAndTearDown(transformedTests); - validateBodyHasWarnings(WARNINGS_REGEX, tests, Set.of("c", "d")); - validateBodyHasWarnings(WARNINGS_REGEX, "Test warnings", tests, addWarnings); + validateBodyHasWarnings(WARNINGS_REGEX, "Not the test to change", tests, Set.of("c", "d")); + validateBodyHasWarnings(WARNINGS_REGEX, "Test warnings", tests, Set.of("c", "d", "added warning")); } @Override diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java index cd338502e00d0..a4dbc1c0f49a2 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java @@ -66,8 +66,8 @@ public void testInjectWarningsWithPreExisting() throws Exception { List transformedTests = transformTests(tests); printTest(testName, transformedTests); validateSetupAndTearDown(transformedTests); - validateBodyHasWarnings(WARNINGS, tests, Set.of("a", "b")); - validateBodyHasWarnings(WARNINGS, "Test warnings", tests, addWarnings); + validateBodyHasWarnings(WARNINGS, "Not the test to change", tests, Set.of("a", "b")); + validateBodyHasWarnings(WARNINGS, "Test warnings", tests, Set.of("a", "b", "added warning")); } @Override diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarningsTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarningsTests.java index 8f4b4d8066853..cc6e1c256a15a 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarningsTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/RemoveWarningsTests.java @@ -50,6 +50,23 @@ public void testRemoveWarningWithPreExisting() throws Exception { validateBodyHasWarnings(WARNINGS, tests, Set.of("b")); } + @Test + public void testRemoveWarningWithPreExistingFromSingleTest() throws Exception { + String testName = "/rest/transform/warnings/with_existing_warnings.yml"; + List tests = getTests(testName); + validateSetupExist(tests); + validateBodyHasWarnings(WARNINGS, tests, Set.of("a", "b")); + List transformedTests = transformTests(tests, getTransformationsForTest("Test warnings")); + printTest(testName, transformedTests); + validateSetupAndTearDown(transformedTests); + validateBodyHasWarnings(WARNINGS, "Test warnings", tests, Set.of("b")); + validateBodyHasWarnings(WARNINGS, "Not the test to change", tests, Set.of("a", "b")); + } + + private List> getTransformationsForTest(String testName) { + return Collections.singletonList(new RemoveWarnings(Set.of("a"), testName)); + } + /** * test file has preexisting single warning */ diff --git a/build-tools-internal/src/test/resources/rest/transform/header/with_operation_to_skip_adding_headers.yml b/build-tools-internal/src/test/resources/rest/transform/header/with_operation_to_skip_adding_headers.yml new file mode 100644 index 0000000000000..9387dbe2d6b31 --- /dev/null +++ b/build-tools-internal/src/test/resources/rest/transform/header/with_operation_to_skip_adding_headers.yml @@ -0,0 +1,13 @@ +--- +setup: + - skip: + features: headers +--- +"Test without a setup": + - do: + headers: + foo: "bar" + something_to_skip: + id: "something" + - match: { acknowledged: true } + diff --git a/build-tools-internal/src/test/resources/rest/transform/text/text_replace_original.yml b/build-tools-internal/src/test/resources/rest/transform/text/text_replace_original.yml index 78ce5c33b6c66..d4ad747e31baa 100644 --- a/build-tools-internal/src/test/resources/rest/transform/text/text_replace_original.yml +++ b/build-tools-internal/src/test/resources/rest/transform/text/text_replace_original.yml @@ -18,6 +18,17 @@ teardown: - do: and: again + - do: + deep: + key: "below" + body: + actions: + output: + webhook: + a: "b" + key_to_replace: "value_to_replace" + + - key_not_to_replace: { copied.from.real.test.total: 1 } - key_not_to_replace: { hits.hits.0._index: "single_doc_index"} - key_not_to_replace: { _shards.total: 2 } diff --git a/build-tools-internal/src/test/resources/rest/transform/text/text_replace_transformed.yml b/build-tools-internal/src/test/resources/rest/transform/text/text_replace_transformed.yml index 3295445dd0bf9..e6146fa2e47c7 100644 --- a/build-tools-internal/src/test/resources/rest/transform/text/text_replace_transformed.yml +++ b/build-tools-internal/src/test/resources/rest/transform/text/text_replace_transformed.yml @@ -15,6 +15,17 @@ First test: that_is: true - do: and: "again" + + - do: + deep: + key: "below" + body: + actions: + output: + webhook: + a: "b" + key_to_replace: "_replaced_value" + - key_not_to_replace: copied.from.real.test.total: 1 - key_not_to_replace: diff --git a/build-tools-internal/src/test/resources/rest/transform/warnings/with_existing_allowed_warnings.yml b/build-tools-internal/src/test/resources/rest/transform/warnings/with_existing_allowed_warnings.yml index 2e07c956239fd..baf19b6fe185f 100644 --- a/build-tools-internal/src/test/resources/rest/transform/warnings/with_existing_allowed_warnings.yml +++ b/build-tools-internal/src/test/resources/rest/transform/warnings/with_existing_allowed_warnings.yml @@ -17,3 +17,15 @@ setup: id: "something" - match: { acknowledged: true } +--- +"Test with existing allowed warnings not to change": + - do: + allowed_warnings: + - "a" + - "b" + allowed_warnings_regex: + - "c" + - "d" + something: + id: "something_else" + - match: { acknowledged: true } diff --git a/build-tools-internal/src/testKit/elasticsearch.build/build.gradle b/build-tools-internal/src/testKit/elasticsearch.build/build.gradle index 0f379b6127b36..855d767448719 100644 --- a/build-tools-internal/src/testKit/elasticsearch.build/build.gradle +++ b/build-tools-internal/src/testKit/elasticsearch.build/build.gradle @@ -2,8 +2,6 @@ plugins { id 'java' id 'elasticsearch.global-build-info' } -import org.elasticsearch.gradle.internal.info.BuildParams -BuildParams.init { it.setIsInternal(true) } apply plugin:'elasticsearch.build' diff --git a/build-tools-internal/src/testKit/thirdPartyAudit/build.gradle b/build-tools-internal/src/testKit/thirdPartyAudit/build.gradle index f7f032ca3f8a9..6324a53958d47 100644 --- a/build-tools-internal/src/testKit/thirdPartyAudit/build.gradle +++ b/build-tools-internal/src/testKit/thirdPartyAudit/build.gradle @@ -28,6 +28,10 @@ repositories { mavenCentral() } +project("libs:elasticsearch-core") { + apply plugin:'java' +} + dependencies { forbiddenApisCliJar 'de.thetaphi:forbiddenapis:2.7' jdkJarHell 'org.elasticsearch:elasticsearch-core:current' diff --git a/build-tools-internal/src/testKit/thirdPartyAudit/settings.gradle b/build-tools-internal/src/testKit/thirdPartyAudit/settings.gradle index 093d6276018b0..66d73ac4a5a26 100644 --- a/build-tools-internal/src/testKit/thirdPartyAudit/settings.gradle +++ b/build-tools-internal/src/testKit/thirdPartyAudit/settings.gradle @@ -1 +1,2 @@ -include 'sample_jars' \ No newline at end of file +include 'sample_jars' +include 'libs:elasticsearch-core' \ No newline at end of file diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 6cb185ef14b47..5e5a2a420ec2b 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,8 +1,8 @@ elasticsearch = 8.0.0 -lucene = 8.9.0-snapshot-ddc238e5df8 +lucene = 8.9.0 -bundled_jdk_vendor = adoptopenjdk -bundled_jdk = 16.0.1+9 +bundled_jdk_vendor = openjdk +bundled_jdk = 16.0.2+7@d4a915d82b4c4fbb9bde534da945d746 checkstyle = 8.42 @@ -21,7 +21,7 @@ ecsLogging = 0.1.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.7.0-1 -netty = 4.1.63.Final +netty = 4.1.66.Final joda = 2.10.10 commons_lang3 = 3.9 @@ -53,4 +53,4 @@ jimfs = 1.2 jimfs_guava = 30.1-jre # test framework -networknt_json_schema_validator = 1.0.48 \ No newline at end of file +networknt_json_schema_validator = 1.0.48 diff --git a/build-tools/build.gradle b/build-tools/build.gradle index 80e5cc938eb82..6743306dafe22 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -12,8 +12,7 @@ plugins { id 'groovy' id 'java-test-fixtures' id 'elasticsearch.publish' - id 'elasticsearch.internal-licenseheaders' - id 'elasticsearch.basic-build-tool-conventions' + id 'elasticsearch.build-tools' } description = "The elasticsearch build tools" @@ -21,11 +20,11 @@ description = "The elasticsearch build tools" // we update the version property to reflect if we are building a snapshot or a release build // we write this back out below to load it in the Build.java which will be shown in rest main action // to indicate this being a snapshot build or a release build. -Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('../build-tools-internal/version.properties')) +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('../build-tools-internal/version.properties'), project.getProviders()) def minRuntimeJava = JavaVersion.toVersion(file('../build-tools-internal/src/main/resources/minimumRuntimeVersion').text) allprojects { - group = "org.elasticsearch" + group = "org.elasticsearch.gradle" version = props.getProperty("elasticsearch") apply plugin: 'java' @@ -55,6 +54,10 @@ gradlePlugin { id = 'elasticsearch.reaper' implementationClass = 'org.elasticsearch.gradle.ReaperPlugin' } + testpermissions { + id = 'elasticsearch.test-gradle-policy' + implementationClass = 'org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin' + } } } @@ -66,7 +69,6 @@ def generateVersionProperties = tasks.register("generateVersionProperties", Writ tasks.named("processResources").configure { from(generateVersionProperties) - exclude 'buildSrc.marker' into('META-INF') { from configurations.reaper } @@ -107,9 +109,6 @@ dependencies { api 'org.apache.commons:commons-compress:1.19' api 'org.apache.ant:ant:1.10.8' api 'commons-io:commons-io:2.2' - api 'gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0' - - runtimeOnly project(":reaper") testFixturesApi "junit:junit:${props.getProperty('junit')}" testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" @@ -216,4 +215,4 @@ tasks.named("check").configure { dependsOn("integTest") } // baseClass 'spock.lang.Specification' // } // } -// } \ No newline at end of file +// } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy new file mode 100644 index 0000000000000..6d72dc0a611e5 --- /dev/null +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/GradleTestPolicySetupPluginFuncTest.groovy @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.test + +import org.elasticsearch.gradle.fixtures.AbstractGradleFuncTest +import org.gradle.testkit.runner.TaskOutcome + +class GradleTestPolicySetupPluginFuncTest extends AbstractGradleFuncTest { + + def "configures test tasks"() { + given: + file("src/test/java/org/acme/SysPropTest.java") << """ + package org.acme; + + import static org.junit.Assert.*; + import org.junit.After; + import org.junit.Before; + import org.junit.Test; + + public class SysPropTest { + @Test + public void verifySysProps() { + assertNotNull(System.getProperty("gradle.dist.lib")); + assertNotNull(System.getProperty("gradle.worker.jar")); + assertEquals(System.getProperty("tests.gradle"), "true"); + assertEquals(System.getProperty("tests.task"), ":test"); + } + } + """ + + buildFile << """ + plugins { + id "elasticsearch.test-gradle-policy" + id "java" + } + + repositories { + mavenCentral() + } + + dependencies { + testImplementation "junit:junit:4.13" + } + """ + + when: + def result = gradleRunner('test', '-g', "guh1").build() + + then: + result.task(":test").outcome == TaskOutcome.SUCCESS + + when: // changing gradle user home + result = gradleRunner('test', '-g', "guh2").build() + then: // still up-to-date + result.task(":test").outcome == TaskOutcome.UP_TO_DATE + } +} \ No newline at end of file diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java index da65583818e3b..ece27cef7b66f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java @@ -149,7 +149,6 @@ private Path locateReaperJar() { OutputStream out = Files.newOutputStream(jarPath); InputStream jarInput = this.getClass().getResourceAsStream("/META-INF/reaper.jar"); ) { - System.out.println("jarInput = " + jarInput); logger.info("Copying reaper.jar..."); jarInput.transferTo(out); } catch (IOException e) { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java index 8ac6193bdc3f9..a86e16ad740fd 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java @@ -19,6 +19,7 @@ public final class Version implements Comparable { private final int minor; private final int revision; private final int id; + private final String qualifier; /** * Specifies how a version string should be parsed. @@ -36,27 +37,23 @@ public enum Mode { RELAXED } - private static final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); + private static final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(?:-(alpha\\d+|beta\\d+|rc\\d+|SNAPSHOT))?"); - private static final Pattern relaxedPattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(-[a-zA-Z0-9_]+)*?"); + private static final Pattern relaxedPattern = Pattern.compile("v?(\\d+)\\.(\\d+)\\.(\\d+)(?:-([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?"); public Version(int major, int minor, int revision) { - Objects.requireNonNull(major, "major version can't be null"); - Objects.requireNonNull(minor, "minor version can't be null"); - Objects.requireNonNull(revision, "revision version can't be null"); + this(major, minor, revision, null); + } + + public Version(int major, int minor, int revision, String qualifier) { this.major = major; this.minor = minor; this.revision = revision; - // currently snapshot is not taken into account + // currently qualifier is not taken into account this.id = major * 10000000 + minor * 100000 + revision * 1000; - } - private static int parseSuffixNumber(String substring) { - if (substring.isEmpty()) { - throw new IllegalArgumentException("Invalid suffix, must contain a number e.x. alpha2"); - } - return Integer.parseInt(substring); + this.qualifier = qualifier; } public static Version fromString(final String s) { @@ -68,17 +65,24 @@ public static Version fromString(final String s, final Mode mode) { Matcher matcher = mode == Mode.STRICT ? pattern.matcher(s) : relaxedPattern.matcher(s); if (matcher.matches() == false) { String expected = mode == Mode.STRICT - ? "major.minor.revision[-(alpha|beta|rc)Number][-SNAPSHOT]" + ? "major.minor.revision[-(alpha|beta|rc)Number|-SNAPSHOT]" : "major.minor.revision[-extra]"; throw new IllegalArgumentException("Invalid version format: '" + s + "'. Should be " + expected); } - return new Version(Integer.parseInt(matcher.group(1)), parseSuffixNumber(matcher.group(2)), parseSuffixNumber(matcher.group(3))); + String qualifier = matcher.group(4); + + return new Version( + Integer.parseInt(matcher.group(1)), + Integer.parseInt(matcher.group(2)), + Integer.parseInt(matcher.group(3)), + qualifier + ); } @Override public String toString() { - return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()); + return getMajor() + "." + getMinor() + "." + getRevision(); } public boolean before(Version compareTo) { @@ -146,6 +150,10 @@ protected int getId() { return id; } + public String getQualifier() { + return qualifier; + } + @Override public int compareTo(Version other) { return Integer.compare(getId(), other.getId()); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java index 5c367eb339111..54674d5476f95 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java @@ -8,14 +8,15 @@ package org.elasticsearch.gradle.plugin; -import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin; import groovy.lang.Closure; + import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; import org.elasticsearch.gradle.jarhell.JarHellPlugin; +import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.RunTask; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; @@ -59,6 +60,7 @@ public void apply(final Project project) { project.getPluginManager().apply(TestClustersPlugin.class); project.getPluginManager().apply(CompileOnlyResolvePlugin.class); project.getPluginManager().apply(JarHellPlugin.class); + project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); var extension = project.getExtensions().create(PLUGIN_EXTENSION_NAME, PluginPropertiesExtension.class, project); configureDependencies(project); @@ -208,7 +210,7 @@ public void execute(Task task) { */ zip.from(new Closure(null, null) { public Object doCall(Object it) { - return project.getPlugins().hasPlugin(ShadowPlugin.class) + return project.getPluginManager().hasPlugin("com.github.johnrengelman.shadow") ? project.getTasks().named("shadowJar") : project.getTasks().named("jar"); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java new file mode 100644 index 0000000000000..a1da860abe26a --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.test; + +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.invocation.Gradle; +import org.gradle.api.tasks.testing.Test; + +public class GradleTestPolicySetupPlugin implements Plugin { + + @Override + public void apply(Project project) { + Gradle gradle = project.getGradle(); + project.getTasks().withType(Test.class).configureEach(test -> { + test.systemProperty("tests.gradle", true); + test.systemProperty("tests.task", test.getPath()); + + SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); + // don't track these as inputs since they contain absolute paths and break cache relocatability + nonInputProperties.systemProperty("gradle.dist.lib", gradle.getGradleHomeDir().getAbsolutePath() + "/lib"); + nonInputProperties.systemProperty( + "gradle.worker.jar", + gradle.getGradleUserHomeDir().getAbsolutePath() + "/caches/" + gradle.getGradleVersion() + "/workerMain/gradle-worker.jar" + ); + test.getJvmArgumentProviders().add(nonInputProperties); + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/SystemPropertyCommandLineArgumentProvider.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/SystemPropertyCommandLineArgumentProvider.java similarity index 97% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/SystemPropertyCommandLineArgumentProvider.java rename to build-tools/src/main/java/org/elasticsearch/gradle/test/SystemPropertyCommandLineArgumentProvider.java index de00c0cd4f643..e5f4e7254d610 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/SystemPropertyCommandLineArgumentProvider.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/SystemPropertyCommandLineArgumentProvider.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.gradle.internal.test; +package org.elasticsearch.gradle.test; import org.gradle.api.tasks.Input; import org.gradle.process.CommandLineArgumentProvider; diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 557f915a9a5e0..5c94a842706c6 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.gradle.testclusters; +import org.gradle.api.Action; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.tasks.Nested; @@ -24,11 +25,9 @@ default void useCluster(ElasticsearchCluster cluster) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); } - cluster.getNodes() - .stream() - .flatMap(node -> node.getDistributions().stream()) - .forEach(distro -> dependsOn(getProject().provider(() -> distro.maybeFreeze()))); - cluster.getNodes().forEach(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); + cluster.getNodes().all(node -> node.getDistributions().stream() + .forEach(distro -> dependsOn(getProject().provider(() -> distro.maybeFreeze())))); + cluster.getNodes().all(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); getClusters().add(cluster); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 4690efee85d61..1e1ef8f17f157 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.ReaperPlugin; import org.elasticsearch.gradle.ReaperService; import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -120,6 +121,7 @@ private NamedDomainObjectContainer createTestClustersConta ); }); project.getExtensions().add(EXTENSION_NAME, container); + container.all(cluster -> cluster.systemProperty("ingest.geoip.downloader.enabled.default", "false")); return container; } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java index 03016e08a74cb..b2b6ba6658ba7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java @@ -19,6 +19,7 @@ import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceRegistration; @@ -46,26 +47,11 @@ public static Action noop() { } public static SourceSetContainer getJavaSourceSets(Project project) { - return project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); - } - - public static TaskProvider maybeRegister(TaskContainer tasks, String name, Class clazz, Action action) { - try { - return tasks.named(name, clazz); - } catch (UnknownTaskException e) { - return tasks.register(name, clazz, action); - } + return project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); } public static void maybeConfigure(TaskContainer tasks, String name, Action config) { - TaskProvider task; - try { - task = tasks.named(name); - } catch (UnknownTaskException e) { - return; - } - - task.configure(config); + tasks.matching(t -> t.getName().equals(name)).configureEach( t-> config.execute(t)); } public static void maybeConfigure( @@ -219,8 +205,7 @@ public static String getProjectPathFromTask(String taskPath) { public static boolean isModuleProject(String projectPath) { return projectPath.contains("modules:") - || projectPath.startsWith(":x-pack:plugin") - || projectPath.startsWith(":x-pack:quota-aware-fs"); + || projectPath.startsWith(":x-pack:plugin"); } public static void disableTransitiveDependencies(Configuration config) { diff --git a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java index 37aa5cf9d21da..2dae3d9f70900 100644 --- a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -12,10 +12,15 @@ import org.junit.Rule; import org.junit.rules.ExpectedException; -import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Set; +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.nullValue; + public class VersionTests extends GradleUnitTestCase { @Rule @@ -26,7 +31,6 @@ public void testVersionParsing() { assertVersionEquals("7.0.1-alpha2", 7, 0, 1); assertVersionEquals("5.1.2-rc3", 5, 1, 2); assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2); - assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2); assertVersionEquals("17.03.11", 17, 3, 11); } @@ -41,29 +45,30 @@ public void testRelaxedVersionParsing() { public void testCompareWithStringVersions() { assertTrue("1.10.20 is not interpreted as before 2.0.0", Version.fromString("1.10.20").before("2.0.0")); - assertTrue( + assertEquals( "7.0.0-alpha1 should be equal to 7.0.0-alpha1", - Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) + Version.fromString("7.0.0-alpha1"), + Version.fromString("7.0.0-alpha1") ); - assertTrue( + assertEquals( "7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", - Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT")) + Version.fromString("7.0.0-SNAPSHOT"), + Version.fromString("7.0.0-SNAPSHOT") ); } public void testCollections() { - assertTrue( - Arrays.asList( - Version.fromString("5.2.0"), - Version.fromString("5.2.1-SNAPSHOT"), - Version.fromString("6.0.0"), - Version.fromString("6.0.1"), - Version.fromString("6.1.0") - ).containsAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))) + List aList = asList( + Version.fromString("5.2.0"), + Version.fromString("5.2.1-SNAPSHOT"), + Version.fromString("6.0.0"), + Version.fromString("6.0.1"), + Version.fromString("6.1.0") ); - Set versions = new HashSet<>(); - versions.addAll( - Arrays.asList( + assertThat(aList, hasItems(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))); + + Set aSet = new HashSet<>( + asList( Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), @@ -71,9 +76,7 @@ public void testCollections() { Version.fromString("6.1.0") ) ); - Set subset = new HashSet<>(); - subset.addAll(Arrays.asList(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))); - assertTrue(versions.containsAll(subset)); + assertThat(aSet, hasItems(Version.fromString("6.0.1"), Version.fromString("5.2.1-SNAPSHOT"))); } public void testToString() { @@ -97,6 +100,20 @@ public void testExceptionSyntax() { Version.fromString("foo.bar.baz"); } + public void testQualifiers() { + Version v = Version.fromString("1.2.3"); + assertThat(v.getQualifier(), nullValue()); + + v = Version.fromString("1.2.3-rc1"); + assertThat(v.getQualifier(), equalTo("rc1")); + + v = Version.fromString("1.2.3-SNAPSHOT"); + assertThat(v.getQualifier(), equalTo("SNAPSHOT")); + + v = Version.fromString("1.2.3-SNAPSHOT-EXTRA", Version.Mode.RELAXED); + assertThat(v.getQualifier(), equalTo("SNAPSHOT-EXTRA")); + } + private void assertOrder(Version smaller, Version bigger) { assertEquals(smaller + " should be smaller than " + bigger, -1, smaller.compareTo(bigger)); } diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 5d5935b00cc04..5791744a31b31 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -101,8 +101,6 @@ abstract class AbstractGradleFuncTest extends Specification { import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.internal.info.BuildParams - BuildParams.init { it.setIsInternal(true) } - import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.Version @@ -113,7 +111,7 @@ abstract class AbstractGradleFuncTest extends Specification { ) BwcVersions versions = new BwcVersions(new TreeSet<>(versionList), currentVersion) - BuildParams.init { it.setBwcVersions(versions) } + BuildParams.init { it.setBwcVersions(provider(() -> versions)) } """ } diff --git a/build.gradle b/build.gradle index 62e8d8eb78db8..d567e231f9f65 100644 --- a/build.gradle +++ b/build.gradle @@ -7,24 +7,25 @@ */ import com.avast.gradle.dockercompose.tasks.ComposePull +import com.fasterxml.jackson.databind.JsonNode +import com.fasterxml.jackson.databind.ObjectMapper import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin -import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.internal.BuildPlugin import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.AccessRule -import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.gradle.util.DistributionLocator import org.gradle.util.GradleVersion import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure import org.gradle.plugins.ide.eclipse.model.ProjectDependency -import org.elasticsearch.gradle.testclusters.TestClustersPlugin -import org.elasticsearch.gradle.internal.test.RestTestBasePlugin import org.elasticsearch.gradle.internal.InternalPluginBuildPlugin -import org.elasticsearch.gradle.internal.InternalTestClustersPlugin +import org.elasticsearch.gradle.internal.ResolveAllDependencies +import java.nio.file.Files +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING plugins { id 'lifecycle-base' @@ -42,17 +43,10 @@ plugins { id 'elasticsearch.fips' id 'elasticsearch.internal-testclusters' id 'elasticsearch.run' + id 'elasticsearch.release-tools' id "com.diffplug.spotless" version "5.12.5" apply false } - -// common maven publishing configuration -allprojects { - group = 'org.elasticsearch' - version = VersionProperties.elasticsearch - description = "Elasticsearch subproject ${project.path}" -} - String licenseCommit if (VersionProperties.elasticsearch.toString().endsWith('-SNAPSHOT')) { licenseCommit = BuildParams.gitRevision ?: "master" // leniency for non git builds @@ -60,31 +54,6 @@ if (VersionProperties.elasticsearch.toString().endsWith('-SNAPSHOT')) { licenseCommit = "v${version}" } -subprojects { - // We disable this plugin for now till we shaked out the issues we see - // e.g. see https://github.com/elastic/elasticsearch/issues/72169 - // apply plugin:'elasticsearch.internal-test-rerun' - - plugins.withType(BuildPlugin).whenPluginAdded { - project.licenseFile = project.rootProject.file('licenses/SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - project.noticeFile = project.rootProject.file('NOTICE.txt') - } - - plugins.withType(InternalPluginBuildPlugin).whenPluginAdded { - project.dependencies { - compileOnly project(":server") - testImplementation project(":test:framework") - } - } - - // Ultimately the RestTestBase Plugin should apply the InternalTestClusters Plugin itself instead of TestClusters - // but this requires major rework on the func test infrastructure. - // TODO: This will be addressed once we have https://github.com/elastic/elasticsearch/issues/71593 resolved - project.plugins.withType(RestTestBasePlugin) { - project.plugins.apply(InternalTestClustersPlugin) - } -} - /** * This is a convenient method for declaring test artifact dependencies provided by the internal * test artifact plugin. It replaces basically the longer dependency notation with explicit capability @@ -115,29 +84,6 @@ tasks.register("updateCIBwcVersions") { } } -// build metadata from previous build, contains eg hashes for bwc builds -String buildMetadataValue = System.getenv('BUILD_METADATA') -if (buildMetadataValue == null) { - buildMetadataValue = '' -} -Map buildMetadataMap = buildMetadataValue.tokenize(';').collectEntries { - def (String key, String value) = it.split('=') - return [key, value] -} - -// injecting groovy property variables into all projects -allprojects { - project.ext { - // for ide hacks... - isEclipse = System.getProperty("eclipse.launcher") != null || // Detects gradle launched from Eclipse's IDE - System.getProperty("eclipse.application") != null || // Detects gradle launched from the Eclipse compiler server - gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff - gradle.startParameter.taskNames.contains('cleanEclipse') - - buildMetadata = buildMetadataMap - } -} - tasks.register("verifyVersions") { doLast { if (gradle.startParameter.isOffline()) { @@ -160,6 +106,28 @@ tasks.register("verifyVersions") { throw new Exception(".ci/bwcVersions is outdated, run `./gradlew updateCIBwcVersions` and check in the results"); } } + + // Make sure backport bot config file is up to date + JsonNode backportConfig = new ObjectMapper().readTree(file(".backportrc.json")) + List unreleased = BuildParams.bwcVersions.unreleased.collect { BuildParams.bwcVersions.unreleasedInfo(it) } + unreleased.each { unreleasedVersion -> + boolean valid = backportConfig.get("targetBranchChoices").elements().any { branchChoice -> + if (branchChoice.isObject()) { + return branchChoice.get("name").textValue() == unreleasedVersion.branch + } else { + return branchChoice.textValue() == unreleasedVersion.branch + } + } + if (valid == false) { + throw new GradleException("No branch choice exists for development branch ${unreleasedVersion.branch} in .backportrc.json.") + } + } + BwcVersions.UnreleasedVersionInfo nextMinor = unreleased.find { it.branch.endsWith("x") } + String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == nextMinor.branch }.key + if (versionMapping != "^v${nextMinor.version}\$") { + throw new GradleException("Backport label mapping for branch ${nextMinor.branch} is '${versionMapping}' but should be " + + "'^v${nextMinor.version}\$'. Update .backportrc.json.") + } } } @@ -197,25 +165,38 @@ if (project.gradle.startParameter.taskNames.find { it.startsWith("checkPart") } bwc_tests_enabled = false } -subprojects { - ext.bwc_tests_enabled = bwc_tests_enabled -} +allprojects { + // common maven publishing configuration + group = 'org.elasticsearch' + version = VersionProperties.elasticsearch + description = "Elasticsearch subproject ${project.path}" -tasks.register("verifyBwcTestsEnabled") { - doLast { - if (bwc_tests_enabled == false) { - throw new GradleException('Bwc tests are disabled. They must be re-enabled after completing backcompat behavior backporting.') + // We disable this plugin for now till we shaked out the issues we see + // e.g. see https://github.com/elastic/elasticsearch/issues/72169 + // apply plugin:'elasticsearch.internal-test-rerun' + plugins.withType(BuildPlugin).whenPluginAdded { + project.licenseFile = project.rootProject.file('licenses/SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + project.noticeFile = project.rootProject.file('NOTICE.txt') + } + + plugins.withType(InternalPluginBuildPlugin).whenPluginAdded { + project.dependencies { + compileOnly project(":server") + testImplementation project(":test:framework") } } -} -tasks.register("branchConsistency") { - description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.' - group 'Verification' - dependsOn ":verifyVersions", ":verifyBwcTestsEnabled" -} + // injecting groovy property variables into all projects + project.ext { + // for ide hacks... + isEclipse = providers.systemProperty("eclipse.launcher").forUseAtConfigurationTime().isPresent() || // Detects gradle launched from Eclipse's IDE + providers.systemProperty("eclipse.application").forUseAtConfigurationTime().isPresent() || // Detects gradle launched from the Eclipse compiler server + gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff + gradle.startParameter.taskNames.contains('cleanEclipse') + } + + ext.bwc_tests_enabled = bwc_tests_enabled -allprojects { // ignore missing javadocs tasks.withType(Javadoc).configureEach { Javadoc javadoc -> // the -quiet here is because of a bug in gradle, in that adding a string option @@ -227,7 +208,91 @@ allprojects { javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } + // eclipse configuration + apply plugin: 'elasticsearch.eclipse' + + /* + * Allow accessing com/sun/net/httpserver in projects that have + * configured forbidden apis to allow it. + */ + plugins.withType(ForbiddenApisPlugin) { + eclipse.classpath.file.whenMerged { classpath -> + if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) { + classpath.entries + .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } + .each { + it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) + } + } + } + } + + tasks.register('resolveAllDependencies', ResolveAllDependencies) { + configs = project.configurations + if (project.path.contains("fixture")) { + dependsOn tasks.withType(ComposePull) + } + } + + def checkPart1 = tasks.register('checkPart1') + def checkPart2 = tasks.register('checkPart2') + plugins.withId('lifecycle-base') { + if (project.path.startsWith(":x-pack:")) { + checkPart2.configure { dependsOn 'check' } + } else { + checkPart1.configure { dependsOn 'check' } + } + } + + project.ext.disableTasks = { String... tasknames -> + for (String taskname : tasknames) { + project.tasks.named(taskname).configure { enabled = false } + } + } + + /* + * Remove assemble/dependenciesInfo on all qa projects because we don't + * need to publish artifacts for them. + */ + if (project.name.equals('qa') || project.path.contains(':qa:')) { + maybeConfigure(project.tasks, 'assemble') { + it.enabled = false + } + maybeConfigure(project.tasks, 'dependenciesInfo') { + it.enabled = false + } + maybeConfigure(project.tasks, 'dependenciesGraph') { + it.enabled = false + } + } + project.afterEvaluate { + // Ensure similar tasks in dependent projects run first. The projectsEvaluated here is + // important because, while dependencies.all will pickup future dependencies, + // it is not necessarily true that the task exists in both projects at the time + // the dependency is added. + if (project.path == ':test:framework') { + // :test:framework:test cannot run before and after :server:test + return + } + tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask -> + integTestTask.mustRunAfter tasks.matching { it.name.equals("test") } + } + + configurations.matching { it.canBeResolved }.all { Configuration configuration -> + dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> + Project upstreamProject = dep.dependencyProject + if (project.path != upstreamProject?.path) { + for (String taskName : ['test', 'integTest']) { + project.tasks.matching { it.name == taskName }.configureEach {task -> + task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName }) + } + } + } + } + } + + // Handle javadoc dependencies across projects. Order matters: the linksOffline for // org.elasticsearch:elasticsearch must be the last one or all the links for the // other packages (e.g org.elasticsearch.client) will point to server rather than @@ -265,72 +330,38 @@ allprojects { } boolean hasShadow = project.plugins.hasPlugin(ShadowPlugin) project.configurations.compileClasspath.dependencies - .findAll() - .toSorted(sortClosure) - .each({ c -> depJavadocClosure(hasShadow, c) }) + .findAll() + .toSorted(sortClosure) + .each({ c -> depJavadocClosure(hasShadow, c) }) project.configurations.compileOnly.dependencies - .findAll() - .toSorted(sortClosure) - .each({ c -> depJavadocClosure(false, c) }) + .findAll() + .toSorted(sortClosure) + .each({ c -> depJavadocClosure(false, c) }) if (hasShadow) { // include any dependencies for shadow JAR projects that are *not* bundled in the shadow JAR project.configurations.shadow.dependencies - .findAll() - .toSorted(sortClosure) - .each({ c -> depJavadocClosure(false, c) }) + .findAll() + .toSorted(sortClosure) + .each({ c -> depJavadocClosure(false, c) }) } } } + } -// Ensure similar tasks in dependent projects run first. The projectsEvaluated here is -// important because, while dependencies.all will pickup future dependencies, -// it is not necessarily true that the task exists in both projects at the time -// the dependency is added. -gradle.projectsEvaluated { - allprojects { - if (project.path == ':test:framework') { - // :test:framework:test cannot run before and after :server:test - return - } - tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask -> - integTestTask.mustRunAfter tasks.matching { it.name.equals("test") } - } - configurations.matching { it.canBeResolved }.all { Configuration configuration -> - dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> - Project upstreamProject = dep.dependencyProject - if (project.path != upstreamProject?.path) { - for (String taskName : ['test', 'integTest']) { - project.tasks.matching { it.name == taskName }.configureEach {task -> - task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName }) - } - } - } - } +tasks.register("verifyBwcTestsEnabled") { + doLast { + if (bwc_tests_enabled == false) { + throw new GradleException('Bwc tests are disabled. They must be re-enabled after completing backcompat behavior backporting.') } } } -// eclipse configuration -allprojects { - apply plugin: 'elasticsearch.eclipse' - - /* - * Allow accessing com/sun/net/httpserver in projects that have - * configured forbidden apis to allow it. - */ - plugins.withType(ForbiddenApisPlugin) { - eclipse.classpath.file.whenMerged { classpath -> - if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) { - classpath.entries - .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } - .each { - it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) - } - } - } - } +tasks.register("branchConsistency") { + description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.' + group 'Verification' + dependsOn ":verifyVersions", ":verifyBwcTestsEnabled" } tasks.named("wrapper").configure { @@ -343,6 +374,9 @@ tasks.named("wrapper").configure { final String sha256Sum = new String(sha256Uri.toURL().bytes) wrapper.getPropertiesFile() << "distributionSha256Sum=${sha256Sum}\n" println "Added checksum to wrapper properties" + // copy wrapper properties file to build-tools-internal to allow seamless idea integration + def file = new File("build-tools-internal/gradle/wrapper/gradle-wrapper.properties") + Files.copy(wrapper.getPropertiesFile().toPath(), file.toPath(), REPLACE_EXISTING) // Update build-tools to reflect the Gradle upgrade // TODO: we can remove this once we have tests to make sure older versions work. project.file('build-tools-internal/src/main/resources/minimumGradleVersion').text = gradleVersion @@ -351,23 +385,6 @@ tasks.named("wrapper").configure { } gradle.projectsEvaluated { - subprojects { - /* - * Remove assemble/dependenciesInfo on all qa projects because we don't - * need to publish artifacts for them. - */ - if (project.name.equals('qa') || project.path.contains(':qa:')) { - maybeConfigure(project.tasks, 'assemble') { - it.enabled = false - } - maybeConfigure(project.tasks, 'dependenciesInfo') { - it.enabled = false - } - maybeConfigure(project.tasks, 'dependenciesGraph') { - it.enabled = false - } - } - } // Having the same group and name for distinct projects causes Gradle to consider them equal when resolving // dependencies leading to hard to debug failures. Run a check across all project to prevent this from happening. // see: https://github.com/gradle/gradle/issues/847 @@ -385,50 +402,7 @@ gradle.projectsEvaluated { } } -allprojects { - tasks.register('resolveAllDependencies', org.elasticsearch.gradle.internal.ResolveAllDependencies) { - configs = project.configurations - if (project.path.contains("fixture")) { - dependsOn tasks.withType(ComposePull) - } - } - - // helper task to print direct dependencies of a single task - project.tasks.addRule("Pattern: Dependencies") { String taskName -> - if (taskName.endsWith("Dependencies") == false) { - return - } - if (project.tasks.findByName(taskName) != null) { - return - } - String realTaskName = taskName.substring(0, taskName.length() - "Dependencies".length()) - Task realTask = project.tasks.findByName(realTaskName) - if (realTask == null) { - return - } - project.tasks.register(taskName) { - doLast { - println("${realTask.path} dependencies:") - for (Task dep : realTask.getTaskDependencies().getDependencies(realTask)) { - println(" - ${dep.path}") - } - } - } - } - - def checkPart1 = tasks.register('checkPart1') - def checkPart2 = tasks.register('checkPart2') - plugins.withId('lifecycle-base') { - if (project.path.startsWith(":x-pack:")) { - checkPart2.configure { dependsOn 'check' } - } else { - checkPart1.configure { dependsOn 'check' } - } - } -} - - -tasks.register("precommit") { +tasks.named("precommit") { dependsOn gradle.includedBuild('build-tools').task(':precommit') dependsOn gradle.includedBuild('build-tools-internal').task(':precommit') } @@ -453,19 +427,3 @@ tasks.named("eclipse").configure { dependsOn gradle.includedBuild('build-tools').task(':eclipse') dependsOn gradle.includedBuild('build-tools-internal').task(':eclipse') } - -subprojects { - project.ext.disableTasks = { String... tasknames -> - for (String taskname : tasknames) { - project.tasks.named(taskname).configure { enabled = false } - } - } -} - -subprojects { Project subproj -> - plugins.withType(TestClustersPlugin).whenPluginAdded { - testClusters.all { - systemProperty "ingest.geoip.downloader.enabled.default", "false" - } - } -} diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index cc2a248f65e0f..2c258d23738f3 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -74,7 +74,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } private static class BulkRestBuilderListener extends RestBuilderListener { - private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, + private final BulkItemResponse ITEM_RESPONSE = BulkItemResponse.success(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "1", 0L, 1L, 1L, DocWriteResponse.Result.CREATED)); private final RestRequest request; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 7022f59350ce5..0535e234e5ec2 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.transport.TransportService; public class TransportNoopBulkAction extends HandledTransportAction { - private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocWriteRequest.OpType.UPDATE, + private static final BulkItemResponse ITEM_RESPONSE = BulkItemResponse.success(1, DocWriteRequest.OpType.UPDATE, new UpdateResponse(new ShardId("mock", "", 1), "1", 0L, 1L, 1L, DocWriteResponse.Result.CREATED)); @Inject diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 2317bbd72496a..e4d24e220b29b 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -14,6 +14,7 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.publish' apply plugin: 'com.github.johnrengelman.shadow' apply plugin: 'elasticsearch.rest-resources' +apply plugin: 'elasticsearch.internal-test-artifact' group = 'org.elasticsearch.client' archivesBaseName = 'elasticsearch-rest-high-level-client' @@ -67,20 +68,25 @@ tasks.named('forbiddenApisMain').configure { File nodeCert = file("./testnode.crt") File nodeTrustStore = file("./testnode.jks") File pkiTrustCert = file("./src/test/resources/org/elasticsearch/client/security/delegate_pki/testRootCA.crt") -File httpCaKeystore = file("./httpCa.p12"); -File transportKeystore = file("./transport.p12"); + +def clusterUserNameProvider = providers.systemProperty('tests.rest.cluster.username') + .orElse('test_user') + .forUseAtConfigurationTime() + +def clusterPasswordProvider = providers.systemProperty('tests.rest.cluster.password') + .orElse('test-user-password') + .forUseAtConfigurationTime() tasks.named("integTest").configure { systemProperty 'tests.rest.async', 'false' - systemProperty 'tests.rest.cluster.username', System.getProperty('tests.rest.cluster.username', 'test_user') - systemProperty 'tests.rest.cluster.password', System.getProperty('tests.rest.cluster.password', 'test-user-password') + systemProperty 'tests.rest.cluster.username', clusterUserNameProvider.get() + systemProperty 'tests.rest.cluster.password', clusterPasswordProvider.get() } -// Requires https://github.com/elastic/elasticsearch/pull/64403 to have this moved to task avoidance api. TaskProvider asyncIntegTest = tasks.register("asyncIntegTest", RestIntegTestTask) { systemProperty 'tests.rest.async', 'true' - systemProperty 'tests.rest.cluster.username', System.getProperty('tests.rest.cluster.username', 'test_user') - systemProperty 'tests.rest.cluster.password', System.getProperty('tests.rest.cluster.password', 'test-user-password') + systemProperty 'tests.rest.cluster.username', clusterUserNameProvider.get() + systemProperty 'tests.rest.cluster.password', clusterPasswordProvider.get() } tasks.named("check").configure { @@ -110,16 +116,14 @@ testClusters.all { setting 'indices.lifecycle.history_index_enabled', 'false' keystore 'xpack.security.transport.ssl.truststore.secure_password', 'testnode' extraConfigFile 'roles.yml', file('roles.yml') - user username: System.getProperty('tests.rest.cluster.username', 'test_user'), - password: System.getProperty('tests.rest.cluster.password', 'test-user-password'), - role: System.getProperty('tests.rest.cluster.role', 'admin') + user username: clusterUserNameProvider.get(), + password: clusterPasswordProvider.get(), + role: providers.systemProperty('tests.rest.cluster.role').orElse('admin').forUseAtConfigurationTime().get() user username: 'admin_user', password: 'admin-password' extraConfigFile nodeCert.name, nodeCert extraConfigFile nodeTrustStore.name, nodeTrustStore extraConfigFile pkiTrustCert.name, pkiTrustCert - extraConfigFile httpCaKeystore.name, httpCaKeystore - extraConfigFile transportKeystore.name, transportKeystore setting 'xpack.searchable.snapshot.shared_cache.size', '1mb' setting 'xpack.searchable.snapshot.shared_cache.region_size', '16kb' diff --git a/client/rest-high-level/qa/ssl-enabled/build.gradle b/client/rest-high-level/qa/ssl-enabled/build.gradle new file mode 100644 index 0000000000000..164c74e6b292d --- /dev/null +++ b/client/rest-high-level/qa/ssl-enabled/build.gradle @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.java-rest-test' +dependencies { + javaRestTestImplementation(testArtifact(project(':client:rest-high-level'))) +} + +tasks.matching{ it.name == "javaRestTest" }.configureEach { + onlyIf { BuildParams.inFipsJvm == false} + systemProperty 'tests.rest.cluster.username', providers.systemProperty('tests.rest.cluster.username') + .orElse('test_user') + .forUseAtConfigurationTime() + .get() + systemProperty 'tests.rest.cluster.password', providers.systemProperty('tests.rest.cluster.password') + .orElse('test-user-password') + .forUseAtConfigurationTime() + .get() +} + +testClusters.matching { it.name == 'javaRestTest' }.configureEach { + testDistribution = 'DEFAULT' + numberOfNodes = 2 + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.authc.api_key.enabled', 'true' + + extraConfigFile 'httpCa.p12', file('./src/javaRestTest/resources/httpCa.p12') + extraConfigFile 'transport.p12', file('./src/javaRestTest/resources/transport.p12') + + // TBD: sync these settings (which options are set) with the ones we will be generating in #74868 + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.http.ssl.keystore.path', 'httpCa.p12' + setting 'xpack.security.transport.ssl.keystore.path', 'transport.p12' + setting 'xpack.security.transport.ssl.verification_mode', 'certificate' + + + keystore 'xpack.security.http.ssl.keystore.secure_password', 'password' + keystore 'xpack.security.transport.ssl.keystore.secure_password', 'password' + user username: 'admin_user', password: 'admin-password', role: 'superuser' +} diff --git a/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java new file mode 100644 index 0000000000000..5266db022bd4a --- /dev/null +++ b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.client.security.KibanaEnrollmentResponse; +import org.elasticsearch.client.security.NodeEnrollmentResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.PathUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.FileNotFoundException; +import java.net.URL; +import java.nio.file.Path; +import java.util.List; + +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class EnrollmentIT extends ESRestHighLevelClientTestCase { + private static Path httpTrustStore; + + @BeforeClass + public static void findTrustStore() throws Exception { + final URL resource = EnrollmentIT.class.getResource("/httpCa.p12"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /httpCa.p12"); + } + httpTrustStore = PathUtils.get(resource.toURI()); + } + + @AfterClass + public static void cleanupStatics() { + httpTrustStore = null; + } + + @Override + protected String getProtocol() { + return "https"; + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(TRUSTSTORE_PATH, httpTrustStore) + .put(TRUSTSTORE_PASSWORD, "password") + .build(); + } + + public void testEnrollNode() throws Exception { + final NodeEnrollmentResponse nodeEnrollmentResponse = + execute(highLevelClient().security()::enrollNode, highLevelClient().security()::enrollNodeAsync, RequestOptions.DEFAULT); + assertThat(nodeEnrollmentResponse, notNullValue()); + assertThat(nodeEnrollmentResponse.getHttpCaKey(), endsWith("K2S3vidA=")); + assertThat(nodeEnrollmentResponse.getHttpCaCert(), endsWith("LfkRjirc=")); + assertThat(nodeEnrollmentResponse.getTransportKey(), endsWith("1I+r8vOQ==")); + assertThat(nodeEnrollmentResponse.getTransportCert(), endsWith("OpTdtgJo=")); + List nodesAddresses = nodeEnrollmentResponse.getNodesAddresses(); + assertThat(nodesAddresses.size(), equalTo(2)); + } + + public void testEnrollKibana() throws Exception { + KibanaEnrollmentResponse kibanaResponse = + execute(highLevelClient().security()::enrollKibana, highLevelClient().security()::enrollKibanaAsync, RequestOptions.DEFAULT); + assertThat(kibanaResponse, notNullValue()); + assertThat(kibanaResponse.getHttpCa() + , endsWith("brcNC5xq6YE7C4/06nH7F6le4kE4Uo6c9fpkl4ehOxQxndNLn462tFF+8VBA8IftJ1PPWzqGxLsCTzM6p6w8sa+XhgNYglLfkRjirc=")); + assertNotNull(kibanaResponse.getPassword()); + assertThat(kibanaResponse.getPassword().toString().length(), equalTo(14)); + } +} diff --git a/client/rest-high-level/httpCa.p12 b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/resources/httpCa.p12 similarity index 100% rename from client/rest-high-level/httpCa.p12 rename to client/rest-high-level/qa/ssl-enabled/src/javaRestTest/resources/httpCa.p12 diff --git a/client/rest-high-level/transport.p12 b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/resources/transport.p12 similarity index 100% rename from client/rest-high-level/transport.p12 rename to client/rest-high-level/qa/ssl-enabled/src/javaRestTest/resources/transport.p12 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 558c7888bc0e3..550ea1199c74c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -273,5 +273,4 @@ public Cancellable existsComponentTemplateAsync(ComponentTemplatesExistRequest c return restHighLevelClient.performRequestAsync(componentTemplatesRequest, ClusterRequestConverters::componentTemplatesExist, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } - } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 74e51ded1de7b..a742fcb0f1a0f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -2043,7 +2043,7 @@ void runIfNotCancelled(Runnable runnable) { versionCheck.addListener(new ActionListener>() { @Override public void onResponse(Optional validation) { - if (validation.isEmpty()) { + if (validation.isPresent() == false) { // Send the request and propagate cancellation Cancellable call = client.performRequestAsync(request, listener); cancellationForwarder.whenComplete((r, t) -> @@ -2078,7 +2078,7 @@ private Response performClientRequest(Request request) throws IOException { throw new ElasticsearchException(e); } - if (versionValidation.isEmpty()) { + if (versionValidation.isPresent() == false) { return client.performRequest(request); } else { throw new ElasticsearchException(versionValidation.get()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index 4e00abe2ecda2..e46c4400d16cc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -78,6 +78,8 @@ import org.elasticsearch.client.security.PutRoleResponse; import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.KibanaEnrollmentRequest; +import org.elasticsearch.client.security.KibanaEnrollmentResponse; import java.io.IOException; @@ -1299,7 +1301,6 @@ public Cancellable delegatePkiAuthenticationAsync(DelegatePkiAuthenticationReque DelegatePkiAuthenticationResponse::fromXContent, listener, emptySet()); } - /** * Allows a node to join to a cluster with security features enabled using the Enroll Node API. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1321,4 +1322,33 @@ public Cancellable enrollNodeAsync(RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + KibanaEnrollmentRequest.INSTANCE, + KibanaEnrollmentRequest::getRequest, options, KibanaEnrollmentResponse::fromXContent, listener, emptySet()); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java index 5ddd567d18a0c..1548347eba8fc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/DeprecationInfoResponse.java @@ -128,16 +128,28 @@ public static class DeprecationIssue { private static final ParseField MESSAGE = new ParseField("message"); private static final ParseField URL = new ParseField("url"); private static final ParseField DETAILS = new ParseField("details"); + private static final ParseField RESOLVE_DURING_ROLLING_UPGRADE = new ParseField("resolve_during_rolling_upgrade"); + private static final ParseField META = new ParseField("_meta"); static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("deprecation_issue", true, - a -> new DeprecationIssue(Level.fromString((String) a[0]), (String) a[1], (String) a[2], (String) a[3])); + new ConstructingObjectParser<>("deprecation_issue", true, args -> { + String logLevel = (String) args[0]; + String message = (String) args[1]; + String url = (String) args[2]; + String details = (String) args[3]; + boolean resolveDuringRollingUpgrade = (boolean) args[4]; + @SuppressWarnings("unchecked") + Map meta = (Map) args[5]; + return new DeprecationIssue(Level.fromString(logLevel), message, url, details, resolveDuringRollingUpgrade, meta); + }); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), LEVEL); PARSER.declareString(ConstructingObjectParser.constructorArg(), MESSAGE); PARSER.declareString(ConstructingObjectParser.constructorArg(), URL); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DETAILS); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), RESOLVE_DURING_ROLLING_UPGRADE); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), META); } public enum Level { @@ -159,12 +171,17 @@ public String toString() { private final String message; private final String url; private final String details; + private final boolean resolveDuringRollingUpgrade; + private final Map meta; - public DeprecationIssue(Level level, String message, String url, @Nullable String details) { + public DeprecationIssue(Level level, String message, String url, @Nullable String details, boolean resolveDuringRollingUpgrade, + @Nullable Map meta) { this.level = level; this.message = message; this.url = url; this.details = details; + this.resolveDuringRollingUpgrade = resolveDuringRollingUpgrade; + this.meta = meta; } public Level getLevel() { @@ -183,6 +200,14 @@ public String getDetails() { return details; } + public boolean isResolveDuringRollingUpgrade() { + return resolveDuringRollingUpgrade; + } + + public Map getMeta() { + return meta; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -195,12 +220,14 @@ public boolean equals(Object o) { return Objects.equals(level, that.level) && Objects.equals(message, that.message) && Objects.equals(url, that.url) && - Objects.equals(details, that.details); + Objects.equals(details, that.details) && + Objects.equals(resolveDuringRollingUpgrade, that.resolveDuringRollingUpgrade) && + Objects.equals(meta, that.meta); } @Override public int hashCode() { - return Objects.hash(level, message, url, details); + return Objects.hash(level, message, url, details, resolveDuringRollingUpgrade, meta); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StartDatafeedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StartDatafeedResponse.java index 25e74500223a5..47a16a95e0b83 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StartDatafeedResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StartDatafeedResponse.java @@ -79,8 +79,7 @@ public boolean equals(Object other) { } StartDatafeedResponse that = (StartDatafeedResponse) other; - return started == started - && Objects.equals(node, that.node); + return started == that.started && Objects.equals(node, that.node); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/IndexLocation.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/IndexLocation.java index a06f5b4b0d483..4be63fd9aa676 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/IndexLocation.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/trainedmodel/IndexLocation.java @@ -19,14 +19,12 @@ public class IndexLocation implements TrainedModelLocation { public static final String INDEX = "index"; - private static final ParseField MODEL_ID = new ParseField("model_id"); private static final ParseField NAME = new ParseField("name"); private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(INDEX, true, a -> new IndexLocation((String) a[0], (String) a[1])); + new ConstructingObjectParser<>(INDEX, true, a -> new IndexLocation((String) a[0])); static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), MODEL_ID); PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); } @@ -34,18 +32,12 @@ public static IndexLocation fromXContent(XContentParser parser) throws IOExcepti return PARSER.parse(parser, null); } - private final String modelId; private final String index; - public IndexLocation(String modelId, String index) { - this.modelId = Objects.requireNonNull(modelId); + public IndexLocation(String index) { this.index = Objects.requireNonNull(index); } - public String getModelId() { - return modelId; - } - public String getIndex() { return index; } @@ -59,7 +51,6 @@ public String getName() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(NAME.getPreferredName(), index); - builder.field(MODEL_ID.getPreferredName(), modelId); builder.endObject(); return builder; } @@ -73,12 +64,11 @@ public boolean equals(Object o) { return false; } IndexLocation that = (IndexLocation) o; - return Objects.equals(modelId, that.modelId) - && Objects.equals(index, that.index); + return Objects.equals(index, that.index); } @Override public int hashCode() { - return Objects.hash(modelId, index); + return Objects.hash(index); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java index 20cc1259bafa6..4d57094341f55 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java @@ -51,6 +51,7 @@ public class AnalysisConfig implements ToXContentObject { public static final ParseField DETECTORS = new ParseField("detectors"); public static final ParseField INFLUENCERS = new ParseField("influencers"); public static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); + public static final ParseField MODEL_PRUNE_WINDOW = new ParseField("model_prune_window"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), @@ -75,6 +76,8 @@ public class AnalysisConfig implements ToXContentObject { PARSER.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME); PARSER.declareStringArray(Builder::setInfluencers, INFLUENCERS); PARSER.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS); + PARSER.declareString((builder, val) -> + builder.setModelPruneWindow(TimeValue.parseTimeValue(val, MODEL_PRUNE_WINDOW.getPreferredName())), MODEL_PRUNE_WINDOW); } /** @@ -90,11 +93,13 @@ public class AnalysisConfig implements ToXContentObject { private final List detectors; private final List influencers; private final Boolean multivariateByFields; + private final TimeValue modelPruneWindow; private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, List categorizationFilters, CategorizationAnalyzerConfig categorizationAnalyzerConfig, PerPartitionCategorizationConfig perPartitionCategorizationConfig, TimeValue latency, - String summaryCountFieldName, List detectors, List influencers, Boolean multivariateByFields) { + String summaryCountFieldName, List detectors, List influencers, Boolean multivariateByFields, + TimeValue modelPruneWindow) { this.detectors = Collections.unmodifiableList(detectors); this.bucketSpan = bucketSpan; this.latency = latency; @@ -105,6 +110,7 @@ private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, Lis this.summaryCountFieldName = summaryCountFieldName; this.influencers = Collections.unmodifiableList(influencers); this.multivariateByFields = multivariateByFields; + this.modelPruneWindow = modelPruneWindow; } /** @@ -171,6 +177,10 @@ public Boolean getMultivariateByFields() { return multivariateByFields; } + public TimeValue getModelPruneWindow() { + return modelPruneWindow; + } + private static void addIfNotNull(Set fields, String field) { if (field != null) { fields.add(field); @@ -243,6 +253,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (multivariateByFields != null) { builder.field(MULTIVARIATE_BY_FIELDS.getPreferredName(), multivariateByFields); } + if (modelPruneWindow != null) { + builder.field(MODEL_PRUNE_WINDOW.getPreferredName(), modelPruneWindow.getStringRep()); + } builder.endObject(); return builder; } @@ -267,14 +280,15 @@ public boolean equals(Object object) { Objects.equals(summaryCountFieldName, that.summaryCountFieldName) && Objects.equals(detectors, that.detectors) && Objects.equals(influencers, that.influencers) && - Objects.equals(multivariateByFields, that.multivariateByFields); + Objects.equals(multivariateByFields, that.multivariateByFields) && + Objects.equals(modelPruneWindow, that.modelPruneWindow); } @Override public int hashCode() { return Objects.hash( bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, perPartitionCategorizationConfig, - latency, summaryCountFieldName, detectors, influencers, multivariateByFields); + latency, summaryCountFieldName, detectors, influencers, multivariateByFields, modelPruneWindow); } public static Builder builder(List detectors) { @@ -293,6 +307,7 @@ public static class Builder { private String summaryCountFieldName; private List influencers = new ArrayList<>(); private Boolean multivariateByFields; + private TimeValue modelPruneWindow; public Builder(List detectors) { setDetectors(detectors); @@ -310,6 +325,7 @@ public Builder(AnalysisConfig analysisConfig) { this.summaryCountFieldName = analysisConfig.summaryCountFieldName; this.influencers = new ArrayList<>(analysisConfig.influencers); this.multivariateByFields = analysisConfig.multivariateByFields; + this.modelPruneWindow = analysisConfig.modelPruneWindow; } public Builder setDetectors(List detectors) { @@ -376,10 +392,16 @@ public Builder setMultivariateByFields(Boolean multivariateByFields) { return this; } + public Builder setModelPruneWindow(TimeValue modelPruneWindow) { + this.modelPruneWindow = modelPruneWindow; + return this; + } + public AnalysisConfig build() { return new AnalysisConfig(bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, - perPartitionCategorizationConfig, latency, summaryCountFieldName, detectors, influencers, multivariateByFields); + perPartitionCategorizationConfig, latency, summaryCountFieldName, detectors, influencers, multivariateByFields, + modelPruneWindow); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java index f5fb1b93b7d67..f8d7b98a46bad 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Job.java @@ -119,8 +119,8 @@ private Job(String jobId, String jobType, List groups, String descriptio AnalysisConfig analysisConfig, AnalysisLimits analysisLimits, DataDescription dataDescription, ModelPlotConfig modelPlotConfig, Long renormalizationWindowDays, TimeValue backgroundPersistInterval, Long modelSnapshotRetentionDays, Long dailyModelSnapshotRetentionAfterDays, Long resultsRetentionDays, - Map customSettings, String modelSnapshotId, String resultsIndexName, Boolean deleting, - Boolean allowLazyOpen) { + Map customSettings, String modelSnapshotId, String resultsIndexName, + Boolean deleting, Boolean allowLazyOpen) { this.jobId = jobId; this.jobType = jobType; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java index 60415bda14701..0d3152d72885f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java @@ -47,6 +47,8 @@ public class JobUpdate implements ToXContentObject { AnalysisConfig.PER_PARTITION_CATEGORIZATION); PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); PARSER.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN); + PARSER.declareString((builder, val) -> builder.setModelPruneWindow( + TimeValue.parseTimeValue(val, AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName())), AnalysisConfig.MODEL_PRUNE_WINDOW); } private final String jobId; @@ -64,15 +66,16 @@ public class JobUpdate implements ToXContentObject { private final PerPartitionCategorizationConfig perPartitionCategorizationConfig; private final Map customSettings; private final Boolean allowLazyOpen; + private final TimeValue modelPruneWindow; private JobUpdate(String jobId, @Nullable List groups, @Nullable String description, @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, @Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval, @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, - @Nullable Long modelSnapshotRetentionDays, @Nullable Long dailyModelSnapshotRetentionAfterDays, - @Nullable List categorizationFilters, + @Nullable Long modelSnapshotRetentionDays, + @Nullable Long dailyModelSnapshotRetentionAfterDays, @Nullable List categorizationFilters, @Nullable PerPartitionCategorizationConfig perPartitionCategorizationConfig, - @Nullable Map customSettings, @Nullable Boolean allowLazyOpen) { + @Nullable Map customSettings, @Nullable Boolean allowLazyOpen, @Nullable TimeValue modelPruneWindow) { this.jobId = jobId; this.groups = groups; this.description = description; @@ -88,6 +91,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String this.perPartitionCategorizationConfig = perPartitionCategorizationConfig; this.customSettings = customSettings; this.allowLazyOpen = allowLazyOpen; + this.modelPruneWindow = modelPruneWindow; } public String getJobId() { @@ -146,6 +150,10 @@ public Boolean getAllowLazyOpen() { return allowLazyOpen; } + public TimeValue getModelPruneWindow() { + return modelPruneWindow; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -192,6 +200,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (allowLazyOpen != null) { builder.field(Job.ALLOW_LAZY_OPEN.getPreferredName(), allowLazyOpen); } + if (modelPruneWindow != null) { + builder.field(AnalysisConfig.MODEL_PRUNE_WINDOW.getPreferredName(), modelPruneWindow); + } builder.endObject(); return builder; } @@ -222,14 +233,16 @@ public boolean equals(Object other) { && Objects.equals(this.categorizationFilters, that.categorizationFilters) && Objects.equals(this.perPartitionCategorizationConfig, that.perPartitionCategorizationConfig) && Objects.equals(this.customSettings, that.customSettings) - && Objects.equals(this.allowLazyOpen, that.allowLazyOpen); + && Objects.equals(this.allowLazyOpen, that.allowLazyOpen) + && Objects.equals(this.modelPruneWindow, that.modelPruneWindow); } @Override public int hashCode() { return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays, dailyModelSnapshotRetentionAfterDays, resultsRetentionDays, - categorizationFilters, perPartitionCategorizationConfig, customSettings, allowLazyOpen); + categorizationFilters, perPartitionCategorizationConfig, customSettings, allowLazyOpen, + modelPruneWindow); } public static class DetectorUpdate implements ToXContentObject { @@ -328,6 +341,7 @@ public static class Builder { private PerPartitionCategorizationConfig perPartitionCategorizationConfig; private Map customSettings; private Boolean allowLazyOpen; + private TimeValue modelPruneWindow; /** * New {@link JobUpdate.Builder} object for the existing job @@ -501,10 +515,16 @@ public Builder setAllowLazyOpen(boolean allowLazyOpen) { return this; } + public Builder setModelPruneWindow(TimeValue modelPruneWindow) { + this.modelPruneWindow = modelPruneWindow; + return this; + } + public JobUpdate build() { return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval, - renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, dailyModelSnapshotRetentionAfterDays, - categorizationFilters, perPartitionCategorizationConfig, customSettings, allowLazyOpen); + renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, + dailyModelSnapshotRetentionAfterDays, categorizationFilters, perPartitionCategorizationConfig, customSettings, + allowLazyOpen, modelPruneWindow); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponse.java index 4bfdea223d4a6..ea9daba79380b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponse.java @@ -9,18 +9,17 @@ package org.elasticsearch.client.security; import org.elasticsearch.client.security.support.ServiceTokenInfo; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** * Response when requesting credentials of a service account. @@ -28,65 +27,65 @@ public final class GetServiceAccountCredentialsResponse { private final String principal; - private final String nodeName; - private final List serviceTokenInfos; + private final List indexTokenInfos; + private final ServiceAccountCredentialsNodesResponse nodesResponse; - public GetServiceAccountCredentialsResponse( - String principal, String nodeName, List serviceTokenInfos) { + public GetServiceAccountCredentialsResponse(String principal, + List indexTokenInfos, + ServiceAccountCredentialsNodesResponse nodesResponse) { this.principal = Objects.requireNonNull(principal, "principal is required"); - this.nodeName = Objects.requireNonNull(nodeName, "nodeName is required"); - this.serviceTokenInfos = List.copyOf(Objects.requireNonNull(serviceTokenInfos, "service token infos are required)")); + this.indexTokenInfos = List.copyOf(Objects.requireNonNull(indexTokenInfos, "service token infos are required")); + this.nodesResponse = Objects.requireNonNull(nodesResponse, "nodes response is required"); } public String getPrincipal() { return principal; } - public String getNodeName() { - return nodeName; - } - - public List getServiceTokenInfos() { - return serviceTokenInfos; + public List getIndexTokenInfos() { + return indexTokenInfos; } - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - GetServiceAccountCredentialsResponse that = (GetServiceAccountCredentialsResponse) o; - return principal.equals(that.principal) && nodeName.equals(that.nodeName) && serviceTokenInfos.equals(that.serviceTokenInfos); - } - - @Override - public int hashCode() { - return Objects.hash(principal, nodeName, serviceTokenInfos); + public ServiceAccountCredentialsNodesResponse getNodesResponse() { + return nodesResponse; } + @SuppressWarnings("unchecked") static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_service_account_credentials_response", args -> { - @SuppressWarnings("unchecked") - final List tokenInfos = Stream.concat( - ((Map) args[3]).keySet().stream().map(name -> new ServiceTokenInfo(name, "index")), - ((Map) args[4]).keySet().stream().map(name -> new ServiceTokenInfo(name, "file"))) - .collect(Collectors.toList()); - assert tokenInfos.size() == (int) args[2] : "number of tokens do not match"; - return new GetServiceAccountCredentialsResponse((String) args[0], (String) args[1], tokenInfos); + final int count = (int) args[1]; + final List indexTokenInfos = (List) args[2]; + final ServiceAccountCredentialsNodesResponse fileTokensResponse = (ServiceAccountCredentialsNodesResponse) args[3]; + if (count != indexTokenInfos.size() + fileTokensResponse.getFileTokenInfos().size()) { + throw new IllegalArgumentException("number of tokens do not match"); + } + return new GetServiceAccountCredentialsResponse((String) args[0], indexTokenInfos, fileTokensResponse); }); static { PARSER.declareString(constructorArg(), new ParseField("service_account")); - PARSER.declareString(constructorArg(), new ParseField("node_name")); PARSER.declareInt(constructorArg(), new ParseField("count")); - PARSER.declareObject(constructorArg(), (p, c) -> p.map(), new ParseField("tokens")); - PARSER.declareObject(constructorArg(), (p, c) -> p.map(), new ParseField("file_tokens")); + PARSER.declareObject(constructorArg(), + (p, c) -> GetServiceAccountCredentialsResponse.parseIndexTokenInfos(p), new ParseField("tokens")); + PARSER.declareObject(constructorArg(), + (p, c) -> ServiceAccountCredentialsNodesResponse.fromXContent(p), new ParseField("nodes_credentials")); } public static GetServiceAccountCredentialsResponse fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } + static List parseIndexTokenInfos(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + final List indexTokenInfos = new ArrayList<>(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + indexTokenInfos.add(new ServiceTokenInfo(parser.currentName(), "index")); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + } + return indexTokenInfos; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentRequest.java new file mode 100644 index 0000000000000..a667e2b759db7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentRequest.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.client.security; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Validatable; + +public final class KibanaEnrollmentRequest implements Validatable { + + public static final KibanaEnrollmentRequest INSTANCE = new KibanaEnrollmentRequest(); + private KibanaEnrollmentRequest() { + } + + public Request getRequest() { + return new Request(HttpGet.METHOD_NAME, "/_security/enroll/kibana"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentResponse.java new file mode 100644 index 0000000000000..b9e71f2a08ed6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/KibanaEnrollmentResponse.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public final class KibanaEnrollmentResponse { + + private SecureString password; + private String httpCa; + + public KibanaEnrollmentResponse(SecureString password, String httpCa) { + this.password = password; + this.httpCa = httpCa; + } + + public SecureString getPassword() { return password; } + + public String getHttpCa() { + return httpCa; + } + + private static final ParseField PASSWORD = new ParseField("password"); + private static final ParseField HTTP_CA = new ParseField("http_ca"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + KibanaEnrollmentResponse.class.getName(), true, + a -> new KibanaEnrollmentResponse(new SecureString(((String) a[0]).toCharArray()), (String) a[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PASSWORD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), HTTP_CA); + } + + public static KibanaEnrollmentResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + @Override public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + KibanaEnrollmentResponse that = (KibanaEnrollmentResponse) o; + return password.equals(that.password) && httpCa.equals(that.httpCa); + } + + @Override public int hashCode() { + return Objects.hash(password, httpCa); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentRequest.java index 5889badb255ca..f618f702af25b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentRequest.java @@ -24,6 +24,6 @@ private NodeEnrollmentRequest(){ } public Request getRequest() { - return new Request(HttpGet.METHOD_NAME, "/_security/enroll_node"); + return new Request(HttpGet.METHOD_NAME, "/_security/enroll/node"); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentResponse.java index d2f7d7a295b0e..adacaf0537bbe 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/NodeEnrollmentResponse.java @@ -23,16 +23,14 @@ public class NodeEnrollmentResponse { private final String httpCaCert; private final String transportKey; private final String transportCert; - private final String clusterName; private final List nodesAddresses; - public NodeEnrollmentResponse(String httpCaKey, String httpCaCert, String transportKey, String transportCert, String clusterName, + public NodeEnrollmentResponse(String httpCaKey, String httpCaCert, String transportKey, String transportCert, List nodesAddresses){ this.httpCaKey = httpCaKey; this.httpCaCert = httpCaCert; this.transportKey = transportKey; this.transportCert = transportCert; - this.clusterName = clusterName; this.nodesAddresses = Collections.unmodifiableList(nodesAddresses); } @@ -52,10 +50,6 @@ public String getTransportCert() { return transportCert; } - public String getClusterName() { - return clusterName; - } - public List getNodesAddresses() { return nodesAddresses; } @@ -64,7 +58,6 @@ public List getNodesAddresses() { private static final ParseField HTTP_CA_CERT = new ParseField("http_ca_cert"); private static final ParseField TRANSPORT_KEY = new ParseField("transport_key"); private static final ParseField TRANSPORT_CERT = new ParseField("transport_cert"); - private static final ParseField CLUSTER_NAME = new ParseField("cluster_name"); private static final ParseField NODES_ADDRESSES = new ParseField("nodes_addresses"); @SuppressWarnings("unchecked") @@ -75,9 +68,8 @@ public List getNodesAddresses() { final String httpCaCert = (String) a[1]; final String transportKey = (String) a[2]; final String transportCert = (String) a[3]; - final String clusterName = (String) a[4]; - final List nodesAddresses = (List) a[5]; - return new NodeEnrollmentResponse(httpCaKey, httpCaCert, transportKey, transportCert, clusterName, nodesAddresses); + final List nodesAddresses = (List) a[4]; + return new NodeEnrollmentResponse(httpCaKey, httpCaCert, transportKey, transportCert, nodesAddresses); }); static { @@ -85,7 +77,6 @@ public List getNodesAddresses() { PARSER.declareString(ConstructingObjectParser.constructorArg(), HTTP_CA_CERT); PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_KEY); PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_CERT); - PARSER.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_NAME); PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), NODES_ADDRESSES); } @@ -98,11 +89,11 @@ public static NodeEnrollmentResponse fromXContent(XContentParser parser) throws if (o == null || getClass() != o.getClass()) return false; NodeEnrollmentResponse that = (NodeEnrollmentResponse) o; return httpCaKey.equals(that.httpCaKey) && httpCaCert.equals(that.httpCaCert) && transportKey.equals(that.transportKey) - && transportCert.equals(that.transportCert) && clusterName.equals(that.clusterName) + && transportCert.equals(that.transportCert) && nodesAddresses.equals(that.nodesAddresses); } @Override public int hashCode() { - return Objects.hash(httpCaKey, httpCaCert, transportKey, transportCert, clusterName, nodesAddresses); + return Objects.hash(httpCaKey, httpCaCert, transportKey, transportCert, nodesAddresses); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ServiceAccountCredentialsNodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ServiceAccountCredentialsNodesResponse.java new file mode 100644 index 0000000000000..8fb268d96e5ff --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ServiceAccountCredentialsNodesResponse.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.NodesResponseHeader; +import org.elasticsearch.client.security.support.ServiceTokenInfo; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; + +public class ServiceAccountCredentialsNodesResponse { + + private final NodesResponseHeader header; + private final List fileTokenInfos; + + public ServiceAccountCredentialsNodesResponse( + NodesResponseHeader header, List fileTokenInfos) { + this.header = header; + this.fileTokenInfos = fileTokenInfos; + } + + public NodesResponseHeader getHeader() { + return header; + } + + public List getFileTokenInfos() { + return fileTokenInfos; + } + + public static ServiceAccountCredentialsNodesResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + NodesResponseHeader header = null; + List fileTokenInfos = List.of(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + if ("_nodes".equals(parser.currentName())) { + if (header == null) { + header = NodesResponseHeader.fromXContent(parser, null); + } else { + throw new IllegalArgumentException("expecting only a single [_nodes] field, multiple found"); + } + } else if ("file_tokens".equals(parser.currentName())) { + fileTokenInfos = parseFileToken(parser); + } else { + throw new IllegalArgumentException("expecting field of either [_nodes] or [file_tokens], found [" + + parser.currentName() + "]"); + } + } + return new ServiceAccountCredentialsNodesResponse(header, fileTokenInfos); + } + + static List parseFileToken(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + XContentParser.Token token; + final ArrayList fileTokenInfos = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + final String tokenName = parser.currentName(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + ensureFieldName(parser, parser.nextToken(), "nodes"); + parser.nextToken(); + final List nodeNames = XContentParserUtils.parseList(parser, XContentParser::text); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + fileTokenInfos.add(new ServiceTokenInfo(tokenName, "file", nodeNames)); + } + return fileTokenInfos; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/ServiceTokenInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/ServiceTokenInfo.java index 7a60ad573f661..c00aea1de6475 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/ServiceTokenInfo.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/ServiceTokenInfo.java @@ -8,15 +8,25 @@ package org.elasticsearch.client.security.support; +import org.elasticsearch.core.Nullable; + +import java.util.Collection; import java.util.Objects; public class ServiceTokenInfo { private final String name; private final String source; + @Nullable + private final Collection nodeNames; public ServiceTokenInfo(String name, String source) { + this(name, source, null); + } + + public ServiceTokenInfo(String name, String source, Collection nodeNames) { this.name = Objects.requireNonNull(name, "token name is required"); this.source = Objects.requireNonNull(source, "token source is required"); + this.nodeNames = nodeNames; } public String getName() { @@ -27,6 +37,10 @@ public String getSource() { return source; } + public Collection getNodeNames() { + return nodeNames; + } + @Override public boolean equals(Object o) { if (this == o) @@ -34,16 +48,16 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; ServiceTokenInfo that = (ServiceTokenInfo) o; - return name.equals(that.name) && source.equals(that.source); + return Objects.equals(name, that.name) && Objects.equals(source, that.source) && Objects.equals(nodeNames, that.nodeNames); } @Override public int hashCode() { - return Objects.hash(name, source); + return Objects.hash(name, source, nodeNames); } @Override public String toString() { - return "ServiceTokenInfo{" + "name='" + name + '\'' + ", source='" + source + '\'' + '}'; + return "ServiceTokenInfo{" + "name='" + name + '\'' + ", source='" + source + '\'' + ", nodeNames=" + nodeNames + '}'; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SettingsConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SettingsConfig.java index 0378d27669f4b..9b0054ac78f9c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SettingsConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SettingsConfig.java @@ -25,20 +25,25 @@ public class SettingsConfig implements ToXContentObject { private static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); private static final ParseField DOCS_PER_SECOND = new ParseField("docs_per_second"); private static final ParseField DATES_AS_EPOCH_MILLIS = new ParseField("dates_as_epoch_millis"); + private static final ParseField INTERIM_RESULTS = new ParseField("interim_results"); private static final int DEFAULT_MAX_PAGE_SEARCH_SIZE = -1; private static final float DEFAULT_DOCS_PER_SECOND = -1F; // use an integer as we need to code 4 states: true, false, null (unchanged), default (defined server side) private static final int DEFAULT_DATES_AS_EPOCH_MILLIS = -1; + // use an integer as we need to code 4 states: true, false, null (unchanged), default (defined server side) + private static final int DEFAULT_INTERIM_RESULTS = -1; + private final Integer maxPageSearchSize; private final Float docsPerSecond; private final Integer datesAsEpochMillis; + private final Integer interimResults; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "settings_config", true, - args -> new SettingsConfig((Integer) args[0], (Float) args[1], (Integer) args[2]) + args -> new SettingsConfig((Integer) args[0], (Float) args[1], (Integer) args[2], (Integer) args[3]) ); static { @@ -51,16 +56,24 @@ public class SettingsConfig implements ToXContentObject { DATES_AS_EPOCH_MILLIS, ValueType.BOOLEAN_OR_NULL ); + // this boolean requires 4 possible values: true, false, not_specified, default, therefore using a custom parser + PARSER.declareField( + optionalConstructorArg(), + p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? DEFAULT_INTERIM_RESULTS : p.booleanValue() ? 1 : 0, + INTERIM_RESULTS, + ValueType.BOOLEAN_OR_NULL + ); } public static SettingsConfig fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - SettingsConfig(Integer maxPageSearchSize, Float docsPerSecond, Integer datesAsEpochMillis) { + SettingsConfig(Integer maxPageSearchSize, Float docsPerSecond, Integer datesAsEpochMillis, Integer interimResults) { this.maxPageSearchSize = maxPageSearchSize; this.docsPerSecond = docsPerSecond; this.datesAsEpochMillis = datesAsEpochMillis; + this.interimResults = interimResults; } @Override @@ -87,6 +100,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DATES_AS_EPOCH_MILLIS.getPreferredName(), datesAsEpochMillis > 0 ? true : false); } } + if (interimResults != null) { + if (interimResults.equals(DEFAULT_INTERIM_RESULTS)) { + builder.field(INTERIM_RESULTS.getPreferredName(), (Boolean) null); + } else { + builder.field(INTERIM_RESULTS.getPreferredName(), interimResults > 0 ? true : false); + } + } builder.endObject(); return builder; } @@ -103,6 +123,10 @@ public Boolean getDatesAsEpochMillis() { return datesAsEpochMillis != null ? datesAsEpochMillis > 0 : null; } + public Boolean getInterimResults() { + return interimResults != null ? interimResults > 0 : null; + } + @Override public boolean equals(Object other) { if (other == this) { @@ -115,12 +139,13 @@ public boolean equals(Object other) { SettingsConfig that = (SettingsConfig) other; return Objects.equals(maxPageSearchSize, that.maxPageSearchSize) && Objects.equals(docsPerSecond, that.docsPerSecond) - && Objects.equals(datesAsEpochMillis, that.datesAsEpochMillis); + && Objects.equals(datesAsEpochMillis, that.datesAsEpochMillis) + && Objects.equals(interimResults, that.interimResults); } @Override public int hashCode() { - return Objects.hash(maxPageSearchSize, docsPerSecond, datesAsEpochMillis); + return Objects.hash(maxPageSearchSize, docsPerSecond, datesAsEpochMillis, interimResults); } public static Builder builder() { @@ -131,6 +156,7 @@ public static class Builder { private Integer maxPageSearchSize; private Float docsPerSecond; private Integer datesAsEpochMillis; + private Integer interimResults; /** * Sets the paging maximum paging maxPageSearchSize that transform can use when @@ -176,8 +202,21 @@ public Builder setDatesAsEpochMillis(Boolean datesAsEpochMillis) { return this; } + /** + * Whether to write interim results in transform checkpoints. + * + * An explicit `null` resets to default. + * + * @param interimResults true if interim results should be written. + * @return the {@link Builder} with interimResults set. + */ + public Builder setInterimResults(Boolean interimResults) { + this.interimResults = interimResults == null ? DEFAULT_INTERIM_RESULTS : interimResults ? 1 : 0; + return this; + } + public SettingsConfig build() { - return new SettingsConfig(maxPageSearchSize, docsPerSecond, datesAsEpochMillis); + return new SettingsConfig(maxPageSearchSize, docsPerSecond, datesAsEpochMillis, interimResults); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 91c2c8f6d4016..cb570348274e8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -386,5 +386,4 @@ public void testComponentTemplates() throws Exception { assertFalse(exist); } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 1d9b4a03b5831..700570934ecaa 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.tasks.RawTaskStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; @@ -324,7 +325,10 @@ protected static TaskId findTaskToRethrottle(String actionName, String descripti } TaskGroup taskGroup = taskGroups.get(0); assertThat(taskGroup.getChildTasks(), empty()); - return taskGroup.getTaskInfo().getTaskId(); + // check that the task initialized enough that it can rethrottle too. + if (((RawTaskStatus) taskGroup.getTaskInfo().getStatus()).toMap().containsKey("batches")) { + return taskGroup.getTaskInfo().getTaskId(); + } } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + highLevelClient().tasks().list(request, RequestOptions.DEFAULT)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index ff56a519e6572..6f172384f82c4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -189,7 +189,6 @@ public void testReindexConflict() throws IOException { assertTrue(response.getTook().getMillis() > 0); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/60811#issuecomment-830040692") public void testDeleteByQuery() throws Exception { final String sourceIndex = "source1"; { @@ -264,6 +263,8 @@ public void onFailure(Exception e) { float requestsPerSecond = 1000f; ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); + assertThat(response.getTaskFailures(), empty()); + assertThat(response.getNodeFailures(), empty()); assertThat(response.getTasks(), hasSize(1)); assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 08f43a79fb645..040004e822758 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -569,7 +569,7 @@ public void testSearchWithParentJoin() throws IOException { assertEquals(Float.NaN, searchResponse.getHits().getMaxScore(), 0f); assertEquals(1, searchResponse.getAggregations().asList().size()); Terms terms = searchResponse.getAggregations().get("top-tags"); - assertEquals(0, terms.getDocCountError()); + assertEquals(0, terms.getDocCountError().longValue()); assertEquals(0, terms.getSumOfOtherDocCounts()); assertEquals(3, terms.getBuckets().size()); for (Terms.Bucket bucket : terms.getBuckets()) { @@ -581,7 +581,7 @@ public void testSearchWithParentJoin() throws IOException { assertEquals(2, children.getDocCount()); assertEquals(1, children.getAggregations().asList().size()); Terms leafTerms = children.getAggregations().get("top-names"); - assertEquals(0, leafTerms.getDocCountError()); + assertEquals(0, leafTerms.getDocCountError().longValue()); assertEquals(0, leafTerms.getSumOfOtherDocCounts()); assertEquals(2, leafTerms.getBuckets().size()); assertEquals(2, leafTerms.getBuckets().size()); @@ -1367,13 +1367,25 @@ public void testCountAllIndicesMatchQuery() throws IOException { public void testSearchWithBasicLicensedQuery() throws IOException { SearchRequest searchRequest = new SearchRequest("index"); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - PinnedQueryBuilder pinnedQuery = new PinnedQueryBuilder(new MatchAllQueryBuilder(), "2", "1"); - searchSourceBuilder.query(pinnedQuery); - searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); - assertSearchHeader(searchResponse); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("1")); + { + PinnedQueryBuilder pinnedQuery = new PinnedQueryBuilder(new MatchAllQueryBuilder(), "2", "1"); + searchSourceBuilder.query(pinnedQuery); + searchRequest.source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + assertSearchHeader(searchResponse); + assertFirstHit(searchResponse, hasId("2")); + assertSecondHit(searchResponse, hasId("1")); + } + { + PinnedQueryBuilder pinnedQuery = new PinnedQueryBuilder(new MatchAllQueryBuilder(), + new PinnedQueryBuilder.Item("index", "2"), new PinnedQueryBuilder.Item("index", "1")); + searchSourceBuilder.query(pinnedQuery); + searchRequest.source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + assertSearchHeader(searchResponse); + assertFirstHit(searchResponse, hasId("2")); + assertSecondHit(searchResponse, hasId("1")); + } } public void testPointInTime() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java index 41581cba0e97c..507308ca24160 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.client; +import org.apache.http.client.methods.HttpGet; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -35,7 +36,10 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -125,6 +129,35 @@ public void testCacheStats() throws Exception { assertThat(response.getHits().getHits()[0].getSourceAsMap(), aMapWithSize(2)); } + { + assertBusy(() -> { + final Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, "/_nodes/stats/thread_pool")); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + @SuppressWarnings("unchecked") + final Map nodes = (Map) extractValue(responseAsMap(response), "nodes"); + assertThat(nodes, notNullValue()); + + for (String node : nodes.keySet()) { + @SuppressWarnings("unchecked") + final Map threadPools = + (Map) extractValue((Map) nodes.get(node), "thread_pool"); + assertNotNull("No thread pools on node " + node, threadPools); + + @SuppressWarnings("unchecked") + final Map threadPoolStats = + (Map) threadPools.get("searchable_snapshots_cache_fetch_async"); + assertNotNull("No thread pools stats on node " + node, threadPoolStats); + + final Number active = (Number) extractValue(threadPoolStats, "active"); + assertThat(node + " has still active tasks", active, equalTo(0)); + + final Number queue = (Number) extractValue(threadPoolStats, "queue"); + assertThat(node + " has still enqueued tasks", queue, equalTo(0)); + } + }, 30L, TimeUnit.SECONDS); + } + { final CachesStatsRequest request = new CachesStatsRequest(); final CachesStatsResponse response = execute(request, client::cacheStats, client::cacheStatsAsync); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java index 3fb43731bfbb5..8cdb2fafe204b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.client.security.GetRolesResponse; import org.elasticsearch.client.security.GetUsersRequest; import org.elasticsearch.client.security.GetUsersResponse; -import org.elasticsearch.client.security.NodeEnrollmentResponse; import org.elasticsearch.client.security.PutRoleRequest; import org.elasticsearch.client.security.PutRoleResponse; import org.elasticsearch.client.security.PutUserRequest; @@ -46,12 +45,10 @@ import java.util.Map; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class SecurityIT extends ESRestHighLevelClientTestCase { @@ -195,19 +192,6 @@ public void testPutRole() throws Exception { assertThat(deleteRoleResponse.isFound(), is(true)); } - @AwaitsFix(bugUrl = "Determine behavior for keystore with multiple keys") - public void testEnrollNode() throws Exception { - final NodeEnrollmentResponse nodeEnrollmentResponse = - execute(highLevelClient().security()::enrollNode, highLevelClient().security()::enrollNodeAsync, RequestOptions.DEFAULT); - assertThat(nodeEnrollmentResponse, notNullValue()); - assertThat(nodeEnrollmentResponse.getHttpCaKey(), endsWith("ECAwGGoA==")); - assertThat(nodeEnrollmentResponse.getHttpCaCert(), endsWith("ECAwGGoA==")); - assertThat(nodeEnrollmentResponse.getTransportKey(), endsWith("fSI09on8AgMBhqA=")); - assertThat(nodeEnrollmentResponse.getTransportCert(), endsWith("fSI09on8AgMBhqA=")); - List nodesAddresses = nodeEnrollmentResponse.getNodesAddresses(); - assertThat(nodesAddresses.size(), equalTo(1)); - } - private void deleteUser(User user) throws IOException { final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, "/_security/user/" + user.getUsername()); highLevelClient().getLowLevelClient().performRequest(deleteUserRequest); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index f2c3486e185f7..2caf20ffe8b3a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -33,14 +33,16 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.SnapshotInfo; import org.mockito.internal.util.collections.Sets; import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.elasticsearch.snapshots.SnapshotsService.NO_FEATURE_STATES_VALUE; import static org.elasticsearch.tasks.TaskResultsService.TASKS_FEATURE_NAME; @@ -149,7 +151,7 @@ public void testCreateSnapshot() throws Exception { boolean waitForCompletion = randomBoolean(); request.waitForCompletion(waitForCompletion); if (randomBoolean()) { - request.userMetadata(randomUserMetadata()); + request.userMetadata(AbstractSnapshotIntegTestCase.randomUserMetadata()); } request.partial(randomBoolean()); request.includeGlobalState(randomBoolean()); @@ -193,7 +195,7 @@ public void testGetSnapshots() throws IOException { CreateSnapshotResponse putSnapshotResponse1 = createTestSnapshot(createSnapshotRequest1); CreateSnapshotRequest createSnapshotRequest2 = new CreateSnapshotRequest(repository2, snapshot2); createSnapshotRequest2.waitForCompletion(true); - Map originalMetadata = randomUserMetadata(); + Map originalMetadata = AbstractSnapshotIntegTestCase.randomUserMetadata(); createSnapshotRequest2.userMetadata(originalMetadata); CreateSnapshotResponse putSnapshotResponse2 = createTestSnapshot(createSnapshotRequest2); // check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead. @@ -209,14 +211,17 @@ public void testGetSnapshots() throws IOException { GetSnapshotsResponse response = execute(request, highLevelClient().snapshot()::get, highLevelClient().snapshot()::getAsync); assertThat(response.isFailed(), is(false)); - assertThat(response.getRepositories(), equalTo(Sets.newSet(repository1, repository2))); - - assertThat(response.getSnapshots(repository1), hasSize(1)); - assertThat(response.getSnapshots(repository1).get(0).snapshotId().getName(), equalTo(snapshot1)); + assertEquals( + Sets.newSet(repository1, repository2), + response.getSnapshots().stream().map(SnapshotInfo::repository).collect(Collectors.toSet()) + ); - assertThat(response.getSnapshots(repository2), hasSize(1)); - assertThat(response.getSnapshots(repository2).get(0).snapshotId().getName(), equalTo(snapshot2)); - assertThat(response.getSnapshots(repository2).get(0).userMetadata(), equalTo(originalMetadata)); + assertThat(response.getSnapshots(), hasSize(2)); + assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(snapshot1)); + assertThat(response.getSnapshots().get(0).repository(), equalTo(repository1)); + assertThat(response.getSnapshots().get(1).snapshotId().getName(), equalTo(snapshot2)); + assertThat(response.getSnapshots().get(1).userMetadata(), equalTo(originalMetadata)); + assertThat(response.getSnapshots().get(1).repository(), equalTo(repository2)); } @@ -264,7 +269,7 @@ public void testRestoreSnapshot() throws IOException { createSnapshotRequest.indices(testIndex); createSnapshotRequest.waitForCompletion(true); if (randomBoolean()) { - createSnapshotRequest.userMetadata(randomUserMetadata()); + createSnapshotRequest.userMetadata(AbstractSnapshotIntegTestCase.randomUserMetadata()); } CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest); assertEquals(RestStatus.OK, createSnapshotResponse.status()); @@ -311,7 +316,7 @@ public void testSnapshotHidden() throws IOException { createSnapshotRequest.indices("*"); createSnapshotRequest.waitForCompletion(true); if (randomBoolean()) { - createSnapshotRequest.userMetadata(randomUserMetadata()); + createSnapshotRequest.userMetadata(AbstractSnapshotIntegTestCase.randomUserMetadata()); } CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest); assertEquals(RestStatus.OK, createSnapshotResponse.status()); @@ -344,7 +349,7 @@ public void testDeleteSnapshot() throws IOException { CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); createSnapshotRequest.waitForCompletion(true); if (randomBoolean()) { - createSnapshotRequest.userMetadata(randomUserMetadata()); + createSnapshotRequest.userMetadata(AbstractSnapshotIntegTestCase.randomUserMetadata()); } CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest); // check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead. @@ -380,27 +385,4 @@ public void testCloneSnapshot() throws IOException { assertTrue(response.isAcknowledged()); } - private static Map randomUserMetadata() { - if (randomBoolean()) { - return null; - } - - Map metadata = new HashMap<>(); - long fields = randomLongBetween(0, 4); - for (int i = 0; i < fields; i++) { - if (randomBoolean()) { - metadata.put(randomValueOtherThanMany(metadata::containsKey, () -> randomAlphaOfLengthBetween(2,10)), - randomAlphaOfLengthBetween(5, 5)); - } else { - Map nested = new HashMap<>(); - long nestedFields = randomLongBetween(0, 4); - for (int j = 0; j < nestedFields; j++) { - nested.put(randomValueOtherThanMany(nested::containsKey, () -> randomAlphaOfLengthBetween(2,10)), - randomAlphaOfLengthBetween(5, 5)); - } - metadata.put(randomValueOtherThanMany(metadata::containsKey, () -> randomAlphaOfLengthBetween(2,10)), nested); - } - } - return metadata; - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index 277db4dac1b5d..9f8e09cd1951d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -621,9 +621,6 @@ public void testRetryPolicy() throws Exception { .put("index.lifecycle.name", "my_policy") .build()); client.indices().create(createIndexRequest, RequestOptions.DEFAULT); - assertBusy(() -> assertNotNull(client.indexLifecycle() - .explainLifecycle(new ExplainLifecycleRequest("my_index"), RequestOptions.DEFAULT) - .getIndexResponses().get("my_index").getFailedStep()), 30, TimeUnit.SECONDS); } // tag::ilm-retry-lifecycle-policy-request @@ -644,8 +641,8 @@ public void testRetryPolicy() throws Exception { assertTrue(acknowledged); } catch (ElasticsearchException e) { - // the retry API might fail as the shrink action steps are retryable (so if the retry API reaches ES when ILM is retrying the - // failed `shrink` step, the retry API will fail) + // the retry API might fail as the shrink action steps are retryable (ILM will stuck in the `check-target-shards-count` step + // with no failure, the retry API will fail) // assert that's the exception we encountered (we want to test to fail if there is an actual error with the retry api) assertThat(e.getMessage(), containsStringIgnoringCase("reason=cannot retry an action for an index [my_index] that has not " + "encountered an error when running a Lifecycle Policy")); @@ -1044,7 +1041,7 @@ private void assertSnapshotExists(final RestHighLevelClient client, final String GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(new String[]{repo}, new String[]{snapshotName}); try { final GetSnapshotsResponse snaps = client.snapshot().get(getSnapshotsRequest, RequestOptions.DEFAULT); - Optional info = snaps.getSnapshots(repo).stream().findFirst(); + Optional info = snaps.getSnapshots().stream().findFirst(); if (info.isPresent()) { info.ifPresent(si -> { assertThat(si.snapshotId().getName(), equalTo(snapshotName)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 7c57d3076c076..3ae7a4d74398e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.NodesResponseHeader; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.security.AuthenticateResponse; @@ -100,6 +101,7 @@ import org.elasticsearch.client.security.user.privileges.Role.ClusterPrivilegeName; import org.elasticsearch.client.security.user.privileges.Role.IndexPrivilegeName; import org.elasticsearch.client.security.user.privileges.UserIndicesPrivileges; +import org.elasticsearch.client.security.KibanaEnrollmentResponse; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -121,6 +123,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -130,6 +133,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -701,8 +705,8 @@ public void testGetRoles() throws Exception { List roles = response.getRoles(); assertNotNull(response); - // 31 system roles plus the three we created - assertThat(roles.size(), equalTo(31 + 3)); + // 30 system roles plus the three we created + assertThat(roles.size(), equalTo(30 + 3)); } { @@ -2614,13 +2618,12 @@ public void onFailure(Exception e) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/74278") public void testCreateServiceAccountToken() throws IOException { RestHighLevelClient client = highLevelClient(); { // tag::create-service-account-token-request CreateServiceAccountTokenRequest createServiceAccountTokenRequest = - new CreateServiceAccountTokenRequest("elastic", "fleet-server", "token1"); + new CreateServiceAccountTokenRequest("elastic", "fleet-server", "my_token_1"); // end::create-service-account-token-request // tag::create-service-account-token-execute @@ -2632,7 +2635,7 @@ public void testCreateServiceAccountToken() throws IOException { final String tokenName = createServiceAccountTokenResponse.getName(); // <1> final SecureString tokenValue = createServiceAccountTokenResponse.getValue(); // <2> // end::create-service-account-token-response - assertThat(createServiceAccountTokenResponse.getName(), equalTo("token1")); + assertThat(createServiceAccountTokenResponse.getName(), equalTo("my_token_1")); assertNotNull(tokenValue); } @@ -2723,14 +2726,13 @@ public void onFailure(Exception e) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/74278") public void testGetServiceAccountCredentials() throws IOException { RestHighLevelClient client = highLevelClient(); final CreateServiceAccountTokenRequest createServiceAccountTokenRequest = - new CreateServiceAccountTokenRequest("elastic", "fleet-server", "token1"); + new CreateServiceAccountTokenRequest("elastic", "fleet-server", "token2"); final CreateServiceAccountTokenResponse createServiceAccountTokenResponse = client.security().createServiceAccountToken(createServiceAccountTokenRequest, RequestOptions.DEFAULT); - assertThat(createServiceAccountTokenResponse.getName(), equalTo("token1")); + assertThat(createServiceAccountTokenResponse.getName(), equalTo("token2")); { // tag::get-service-account-credentials-request @@ -2745,15 +2747,23 @@ public void testGetServiceAccountCredentials() throws IOException { // tag::get-service-account-credentials-response final String principal = getServiceAccountCredentialsResponse.getPrincipal(); // <1> - final String nodeName = getServiceAccountCredentialsResponse.getNodeName(); // <2> - final List serviceTokenInfos = getServiceAccountCredentialsResponse.getServiceTokenInfos(); // <3> - final String tokenName = serviceTokenInfos.get(0).getName(); // <4> - final String tokenSource = serviceTokenInfos.get(0).getSource(); // <5> + final List indexTokenInfos = getServiceAccountCredentialsResponse.getIndexTokenInfos(); // <2> + final String tokenName = indexTokenInfos.get(0).getName(); // <3> + final String tokenSource = indexTokenInfos.get(0).getSource(); // <4> + final Collection nodeNames = indexTokenInfos.get(0).getNodeNames(); // <5> + final List fileTokenInfos + = getServiceAccountCredentialsResponse.getNodesResponse().getFileTokenInfos(); // <6> + final NodesResponseHeader fileTokensResponseHeader + = getServiceAccountCredentialsResponse.getNodesResponse().getHeader(); // <7> + final int nSuccessful = fileTokensResponseHeader.getSuccessful(); // <8> + final int nFailed = fileTokensResponseHeader.getFailed(); // <9> // end::get-service-account-credentials-response assertThat(principal, equalTo("elastic/fleet-server")); - assertThat(serviceTokenInfos.size(), equalTo(1)); - assertThat(tokenName, equalTo("token1")); - assertThat(tokenSource, equalTo("index")); + // Cannot assert exactly one token because there are rare occasions where tests overlap and it will see + // token created from other tests + assertThat(indexTokenInfos.size(), greaterThanOrEqualTo(1)); + assertThat(indexTokenInfos.stream().map(ServiceTokenInfo::getName).collect(Collectors.toSet()), hasItem("token2")); + assertThat(indexTokenInfos.stream().map(ServiceTokenInfo::getSource).collect(Collectors.toSet()), hasItem("index")); } { @@ -2785,8 +2795,9 @@ public void onFailure(Exception e) { assertNotNull(future.actionGet()); assertThat(future.actionGet().getPrincipal(), equalTo("elastic/fleet-server")); - assertThat(future.actionGet().getServiceTokenInfos().size(), equalTo(1)); - assertThat(future.actionGet().getServiceTokenInfos().get(0), equalTo(new ServiceTokenInfo("token1", "index"))); + assertThat(future.actionGet().getIndexTokenInfos().size(), greaterThanOrEqualTo(1)); + assertThat(future.actionGet().getIndexTokenInfos().stream().map(ServiceTokenInfo::getName).collect(Collectors.toSet()), + hasItem("token2")); } } @@ -2878,8 +2889,7 @@ public void testNodeEnrollment() throws Exception { String httpCaCert = response.getHttpCaCert(); // <2> String transportKey = response.getTransportKey(); // <3> String transportCert = response.getTransportCert(); // <4> - String clusterName = response.getClusterName(); // <5> - List nodesAddresses = response.getNodesAddresses(); // <6> + List nodesAddresses = response.getNodesAddresses(); // <5> // end::node-enrollment-response } @@ -2892,7 +2902,6 @@ public void onResponse(NodeEnrollmentResponse response) { // <1> } - @Override public void onFailure(Exception e) { // <2> @@ -2909,6 +2918,47 @@ public void onFailure(Exception e) { } } + @AwaitsFix(bugUrl = "Determine behavior for keystores with multiple keys") + public void testKibanaEnrollment() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // tag::kibana-enrollment-execute + KibanaEnrollmentResponse response = client.security().enrollKibana(RequestOptions.DEFAULT); + // end::kibana-enrollment-execute + + // tag::kibana-enrollment-response + SecureString password = response.getPassword(); // <1> + String httoCa = response.getHttpCa(); // <2> + // end::kibana-enrollment-response + assertThat(password.length(), equalTo(14)); + } + + { + // tag::kibana-enrollment-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(KibanaEnrollmentResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + }}; + // end::kibana-enrollment-execute-listener + + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::kibana-enrollment-execute-async + client.security().enrollKibanaAsync(RequestOptions.DEFAULT, listener); + // end::kibana-enrollment-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + private X509Certificate readCertForPkiDelegation(String certificateName) throws Exception { Path path = getDataPath("/org/elasticsearch/client/security/delegate_pki/" + certificateName); try (InputStream in = Files.newInputStream(path)) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index d09db5fda0b4e..22fd3a7c0095a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -617,7 +617,7 @@ public void testSnapshotGetSnapshots() throws IOException { // end::get-snapshots-execute // tag::get-snapshots-response - List snapshotsInfos = response.getSnapshots(repositoryName); + List snapshotsInfos = response.getSnapshots(); SnapshotInfo snapshotInfo = snapshotsInfos.get(0); RestStatus restStatus = snapshotInfo.status(); // <1> SnapshotId snapshotId = snapshotInfo.snapshotId(); // <2> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java index ea9972b307683..40c8d0552c884 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/DeprecationInfoResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.client.migration; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -69,9 +70,13 @@ private void toXContent(DeprecationInfoResponse.DeprecationIssue issue, XContent .field("level", issue.getLevel()) .field("message", issue.getMessage()) .field("url", issue.getUrl()); - if (issue.getDetails()!= null) { + if (issue.getDetails() != null) { builder.field("details", issue.getDetails()); } + builder.field("resolve_during_rolling_upgrade", issue.isResolveDuringRollingUpgrade()); + if (issue.getMeta() != null) { + builder.field("_meta", issue.getMeta()); + } builder.endObject(); } @@ -94,7 +99,9 @@ private List createRandomIssues(boolea list.add(new DeprecationInfoResponse.DeprecationIssue(randomFrom(WARNING, CRITICAL), randomAlphaOfLength(5), randomAlphaOfLength(5), - randomBoolean() ? randomAlphaOfLength(5) : null)); + randomBoolean() ? randomAlphaOfLength(5) : null, + randomBoolean(), + randomBoolean() ? randomMap(1, 5, () -> new Tuple<>(randomAlphaOfLength(4), randomAlphaOfLength(4))) : null)); } return list; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/IndexLocationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/IndexLocationTests.java index ab19977d7ee2f..f64c70f2acc7c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/IndexLocationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/IndexLocationTests.java @@ -17,7 +17,7 @@ public class IndexLocationTests extends AbstractXContentTestCase { static IndexLocation randomInstance() { - return new IndexLocation(randomAlphaOfLength(7), randomAlphaOfLength(7)); + return new IndexLocation(randomAlphaOfLength(7)); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java index fbb22d3363ea1..7f45425e7fea8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java @@ -86,6 +86,9 @@ public static AnalysisConfig.Builder createRandomized() { if (randomBoolean()) { builder.setMultivariateByFields(randomBoolean()); } + if (randomBoolean()) { + builder.setModelPruneWindow(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } builder.setInfluencers(Arrays.asList(generateRandomStringArray(10, 10, false))); return builder; @@ -195,6 +198,19 @@ public void testEquals_GivenDifferentLatency() { assertFalse(config2.equals(config1)); } + public void testEquals_GivenDifferentModelPruneWindow() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setModelPruneWindow(TimeValue.timeValueDays(30)); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setModelPruneWindow(TimeValue.timeValueDays(60)); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + public void testEquals_GivenSummaryCountField() { AnalysisConfig.Builder builder = createConfigBuilder(); builder.setSummaryCountFieldName("foo"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java index c1c1e63cb749b..8731e83d7800c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/JobUpdateTests.java @@ -26,7 +26,7 @@ protected JobUpdate createTestInstance() { /** * Creates a completely random update when the job is null - * or a random update that is is valid for the given job + * or a random update that is valid for the given job */ public static JobUpdate createRandom(String jobId) { JobUpdate.Builder update = new JobUpdate.Builder(jobId); @@ -74,6 +74,9 @@ public static JobUpdate createRandom(String jobId) { if (randomBoolean()) { update.setAllowLazyOpen(randomBoolean()); } + if (randomBoolean()) { + update.setModelPruneWindow(TimeValue.timeValueDays(randomIntBetween(1, 100))); + } return update.build(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponseTests.java index 82a4ecc6796de..5eafe7c04eed2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetServiceAccountCredentialsResponseTests.java @@ -8,15 +8,23 @@ package org.elasticsearch.client.security; +import org.elasticsearch.Version; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.Tuple; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.action.service.GetServiceAccountCredentialsNodesResponse; import org.elasticsearch.xpack.core.security.action.service.TokenInfo; import java.io.IOException; +import java.util.List; import java.util.Locale; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; @@ -27,15 +35,17 @@ public class GetServiceAccountCredentialsResponseTests @Override protected org.elasticsearch.xpack.core.security.action.service.GetServiceAccountCredentialsResponse createServerTestInstance( XContentType xContentType) { + final String[] fileTokenNames = randomArray(3, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)); + final GetServiceAccountCredentialsNodesResponse nodesResponse = new GetServiceAccountCredentialsNodesResponse( + new ClusterName(randomAlphaOfLength(12)), + List.of(new GetServiceAccountCredentialsNodesResponse.Node(new DiscoveryNode(randomAlphaOfLength(10), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), + Version.CURRENT), fileTokenNames)), + List.of(new FailedNodeException(randomAlphaOfLength(11), "error", new NoSuchFieldError("service_tokens")))); return new org.elasticsearch.xpack.core.security.action.service.GetServiceAccountCredentialsResponse( randomAlphaOfLengthBetween(3, 8) + "/" + randomAlphaOfLengthBetween(3, 8), - randomAlphaOfLengthBetween(3, 8), randomList( - 1, - 5, - () -> randomBoolean() ? - TokenInfo.fileToken(randomAlphaOfLengthBetween(3, 8)) : - TokenInfo.indexToken(randomAlphaOfLengthBetween(3, 8))) - ); + randomList(0, 5, () -> TokenInfo.indexToken(randomAlphaOfLengthBetween(3, 8))), + nodesResponse); } @Override @@ -48,14 +58,19 @@ protected void assertInstances( org.elasticsearch.xpack.core.security.action.service.GetServiceAccountCredentialsResponse serverTestInstance, GetServiceAccountCredentialsResponse clientInstance) { assertThat(serverTestInstance.getPrincipal(), equalTo(clientInstance.getPrincipal())); - assertThat(serverTestInstance.getNodeName(), equalTo(clientInstance.getNodeName())); assertThat( - serverTestInstance.getTokenInfos().stream() + Stream.concat(serverTestInstance.getIndexTokenInfos().stream(), + serverTestInstance.getNodesResponse().getFileTokenInfos().stream()) .map(tokenInfo -> new Tuple<>(tokenInfo.getName(), tokenInfo.getSource().name().toLowerCase(Locale.ROOT))) .collect(Collectors.toSet()), - equalTo(clientInstance.getServiceTokenInfos().stream() + equalTo(Stream.concat(clientInstance.getIndexTokenInfos().stream(), + clientInstance.getNodesResponse().getFileTokenInfos().stream()) .map(info -> new Tuple<>(info.getName(), info.getSource())) .collect(Collectors.toSet()))); + + assertThat( + serverTestInstance.getNodesResponse().failures().size(), + equalTo(clientInstance.getNodesResponse().getHeader().getFailures().size())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/KibanaErnollmentResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/KibanaErnollmentResponseTests.java new file mode 100644 index 0000000000000..937e750ccb72c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/KibanaErnollmentResponseTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class KibanaErnollmentResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + final String password = randomAlphaOfLength(14); + final String httpCa = randomAlphaOfLength(50); + final List nodesAddresses = randomList(2, 10, () -> buildNewFakeTransportAddress().toString()); + + final XContentType xContentType = randomFrom(XContentType.values()); + final XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + builder.startObject().field("password", password).field("http_ca", httpCa).field("nodes_addresses", nodesAddresses).endObject(); + BytesReference xContent = BytesReference.bytes(builder); + + final KibanaEnrollmentResponse response = KibanaEnrollmentResponse.fromXContent(createParser(xContentType.xContent(), xContent)); + assertThat(response.getPassword(), equalTo(password)); + assertThat(response.getHttpCa(), equalTo(httpCa)); + } + + public void testEqualsHashCode() { + final SecureString password = new SecureString(randomAlphaOfLength(14).toCharArray()); + final String httpCa = randomAlphaOfLength(50); + KibanaEnrollmentResponse kibanaEnrollmentResponse = new KibanaEnrollmentResponse(password, httpCa); + + EqualsHashCodeTestUtils.checkEqualsAndHashCode(kibanaEnrollmentResponse, + (original) -> new KibanaEnrollmentResponse(original.getPassword(), original.getHttpCa())); + + EqualsHashCodeTestUtils.checkEqualsAndHashCode(kibanaEnrollmentResponse, + (original) -> new KibanaEnrollmentResponse(original.getPassword(), original.getHttpCa()), + KibanaErnollmentResponseTests::mutateTestItem); + } + + private static KibanaEnrollmentResponse mutateTestItem(KibanaEnrollmentResponse original) { + switch (randomIntBetween(0, 1)) { + case 0: + return new KibanaEnrollmentResponse(new SecureString(randomAlphaOfLength(14).toCharArray()), + original.getHttpCa()); + case 1: + return new KibanaEnrollmentResponse(original.getPassword(), randomAlphaOfLength(51)); + default: + return new KibanaEnrollmentResponse(original.getPassword(), + original.getHttpCa()); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SettingsConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SettingsConfigTests.java index d4ab886b625ca..9d2776415b432 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SettingsConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SettingsConfigTests.java @@ -30,6 +30,7 @@ public static SettingsConfig randomSettingsConfig() { return new SettingsConfig( randomBoolean() ? null : randomIntBetween(10, 10_000), randomBoolean() ? null : randomFloat(), + randomBoolean() ? null : randomIntBetween(-1, 1), randomBoolean() ? null : randomIntBetween(-1, 1) ); } @@ -72,6 +73,7 @@ public void testExplicitNullOnWriteParser() throws IOException { assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); assertNull(settingsAsMap.getOrDefault("docs_per_second", "not_set")); assertThat(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("interim_results", "not_set"), equalTo("not_set")); config = fromString("{\"dates_as_epoch_millis\" : null}"); assertFalse(config.getDatesAsEpochMillis()); @@ -80,6 +82,16 @@ public void testExplicitNullOnWriteParser() throws IOException { assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); assertThat(settingsAsMap.getOrDefault("docs_per_second", "not_set"), equalTo("not_set")); assertNull(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set")); + assertThat(settingsAsMap.getOrDefault("interim_results", "not_set"), equalTo("not_set")); + + config = fromString("{\"interim_results\" : null}"); + assertFalse(config.getInterimResults()); + + settingsAsMap = xContentToMap(config); + assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("docs_per_second", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set"), equalTo("not_set")); + assertNull(settingsAsMap.getOrDefault("interim_results", "not_set")); } public void testExplicitNullOnWriteBuilder() throws IOException { @@ -91,10 +103,12 @@ public void testExplicitNullOnWriteBuilder() throws IOException { assertNull(settingsAsMap.getOrDefault("max_page_search_size", "not_set")); assertThat(settingsAsMap.getOrDefault("docs_per_second", "not_set"), equalTo("not_set")); assertThat(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("interim_results", "not_set"), equalTo("not_set")); SettingsConfig emptyConfig = new SettingsConfig.Builder().build(); assertNull(emptyConfig.getMaxPageSearchSize()); assertNull(emptyConfig.getDatesAsEpochMillis()); + assertNull(emptyConfig.getInterimResults()); settingsAsMap = xContentToMap(emptyConfig); assertTrue(settingsAsMap.isEmpty()); @@ -106,6 +120,7 @@ public void testExplicitNullOnWriteBuilder() throws IOException { assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); assertNull(settingsAsMap.getOrDefault("docs_per_second", "not_set")); assertThat(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("interim_results", "not_set"), equalTo("not_set")); config = new SettingsConfig.Builder().setDatesAsEpochMillis(null).build(); // returns false, however it's `null` as in "use default", checked next @@ -115,6 +130,17 @@ public void testExplicitNullOnWriteBuilder() throws IOException { assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); assertThat(settingsAsMap.getOrDefault("docs_per_second", "not_set"), equalTo("not_set")); assertNull(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set")); + assertThat(settingsAsMap.getOrDefault("interim_results", "not_set"), equalTo("not_set")); + + config = new SettingsConfig.Builder().setInterimResults(null).build(); + // returns false, however it's `null` as in "use default", checked next + assertFalse(config.getInterimResults()); + + settingsAsMap = xContentToMap(config); + assertThat(settingsAsMap.getOrDefault("max_page_search_size", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("docs_per_second", "not_set"), equalTo("not_set")); + assertThat(settingsAsMap.getOrDefault("dates_as_epoch_millis", "not_set"), equalTo("not_set")); + assertNull(settingsAsMap.getOrDefault("interim_results", "not_set")); } private Map xContentToMap(ToXContent xcontent) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/SettingsConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/SettingsConfigTests.java index b808538cc7566..6ac3ee81238aa 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/SettingsConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/SettingsConfigTests.java @@ -23,6 +23,7 @@ public static org.elasticsearch.xpack.core.transform.transforms.SettingsConfig r return new org.elasticsearch.xpack.core.transform.transforms.SettingsConfig( randomBoolean() ? null : randomIntBetween(10, 10_000), randomBoolean() ? null : randomFloat(), + randomBoolean() ? null : randomIntBetween(0, 1), randomBoolean() ? null : randomIntBetween(0, 1) ); } @@ -34,6 +35,7 @@ public static void assertHlrcEquals( assertEquals(serverTestInstance.getMaxPageSearchSize(), clientInstance.getMaxPageSearchSize()); assertEquals(serverTestInstance.getDocsPerSecond(), clientInstance.getDocsPerSecond()); assertEquals(serverTestInstance.getDatesAsEpochMillis(), clientInstance.getDatesAsEpochMillis()); + assertEquals(serverTestInstance.getInterimResults(), clientInstance.getInterimResults()); } @Override diff --git a/client/test/build.gradle b/client/test/build.gradle index f619b74166639..ee52a9b522269 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -48,7 +48,6 @@ tasks.named("dependenciesInfo").configure { enabled = false } tasks.named("dependenciesGraph").configure { it.enabled = false } //we aren't releasing this jar -tasks.named("thirdPartyAudit").configure { enabled = false } tasks.named("test").configure { enabled = false } tasks.withType(LicenseHeadersTask.class).configureEach { diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 0eae955e19588..41815f24d65b8 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -5,7 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -import org.elasticsearch.gradle.internal.MavenFilteringHack + +import org.apache.tools.ant.filters.ReplaceTokens apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -17,7 +18,7 @@ group = "org.elasticsearch.distribution.integ-test-zip" tasks.named("processTestResources").configure { inputs.properties(project(':distribution').restTestExpansions) - MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) + filter("tokens" : project(':distribution').restTestExpansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } // make the pom file name use elasticsearch instead of the project name diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index 28ed28926d6b5..bdb0a76cf9709 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -83,7 +83,7 @@ private void closeWhileListenerEngaged(ActionFuture future) throws Excep Map refresh = (Map) total.get("refresh"); int listeners = (Integer) refresh.get("listeners"); assertEquals(1, listeners); - }); + }, 30L, TimeUnit.SECONDS); // Close the index. That should flush the listener. client().performRequest(new Request("POST", "/test/_close")); diff --git a/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy b/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy index d0d865c4ede16..f0cb0d58d3c1a 100644 --- a/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy +++ b/distribution/archives/integ-test-zip/src/test/resources/plugin-security.policy @@ -1,4 +1,4 @@ grant { // Needed to read the log file - permission java.io.FilePermission "${tests.logfile}", "read"; + permission java.io.FilePermission "@tests.logfile@", "read"; }; diff --git a/distribution/build.gradle b/distribution/build.gradle index 4a1c07444ee1e..942a718acea96 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -8,11 +8,11 @@ import org.apache.tools.ant.filters.FixCrLfFilter +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoTask -import org.elasticsearch.gradle.internal.MavenFilteringHack import org.elasticsearch.gradle.internal.NoticeTask -import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import java.nio.file.Files @@ -35,12 +35,15 @@ tasks.register("generateDependenciesReport", ConcatFilesTask) { dependsOn rootProject.allprojects.collect { it.tasks.withType(DependenciesInfoTask) } files = fileTree(dir: project.rootDir, include: '**/dependencies.csv') headerLine = "name,version,url,license,sourceURL" - target = new File(System.getProperty('csv') ?: "${project.buildDir}/reports/dependencies/es-dependencies.csv") - + target = new File(providers.systemProperty('csv') + .orElse("${project.buildDir}/reports/dependencies/es-dependencies.csv") + .forUseAtConfigurationTime() + .get() + ) // explicitly add our dependency on the JDK String jdkVersion = VersionProperties.versions.get('bundled_jdk').split('@')[0] String jdkMajorVersion = jdkVersion.split('[+.]')[0] - String sourceUrl = "https://hg.openjdk.java.net/jdk-updates/jdk${jdkMajorVersion}u/archive/jdk-${jdkVersion}.tar.gz" + String sourceUrl = "https://github.com/openjdk/jdk${jdkMajorVersion}u/archive/refs/tags/jdk-${jdkVersion}.tar.gz" additionalLines << "OpenJDK,${jdkVersion},https://openjdk.java.net/,GPL-2.0-with-classpath-exception,${sourceUrl}".toString() // Explicitly add the dependency on the RHEL UBI Docker base image @@ -49,7 +52,7 @@ tasks.register("generateDependenciesReport", ConcatFilesTask) { '8', 'https://catalog.redhat.com/software/containers/ubi8/ubi-minimal/5c359a62bed8bd75a2c3fba8', 'Custom;https://www.redhat.com/licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf', - 'https://oss-dependencies.elastic.co/redhat/ubi/ubi-minimal-8-source.tar.gz' + 'https://oss-dependencies.elastic.co/red-hat-universal-base-image-minimal/8/ubi-minimal-8-source.tar.gz' ] additionalLines << rhelUbiFields.join(',') } @@ -165,57 +168,36 @@ void copyModule(TaskProvider copyTask, Project module) { } } -// log4j config could be contained in modules, so we must join it together using these tasks -def buildOssLog4jConfigTaskProvider = tasks.register("buildOssLog4jConfig") { - ext.contents = [] - ext.log4jFile = file("${ossOutputs}/log4j2.properties") - outputs.file log4jFile -} def buildDefaultLog4jConfigTaskProvider = tasks.register("buildDefaultLog4jConfig") { - dependsOn processDefaultOutputsTaskProvider - ext.contents = [] - ext.log4jFile = file("${defaultOutputs}/log4j2.properties") - outputs.file log4jFile -} + mustRunAfter('processDefaultOutputs') -Closure writeLog4jProperties = { - def file = file('src/config/log4j2.properties') - String mainLog4jProperties = file.getText('UTF-8') - it.log4jFile.setText(mainLog4jProperties, 'UTF-8') - for (String moduleLog4jProperties : it.contents.reverse()) { - it.log4jFile.append(moduleLog4jProperties, 'UTF-8') + def outputFile = file("${defaultOutputs}/log4j2.properties") + def inputFiles = fileTree('src/config').matching { include 'log4j2.properties' } + project(':modules').subprojects.each { + inputFiles = inputFiles + it.fileTree('src/main/config').matching { include 'log4j2.properties' } + } + project(':x-pack:plugin').subprojects.each { + inputFiles = inputFiles + it.fileTree('src/main/config').matching { include 'log4j2.properties' } } -} -buildOssLog4jConfigTaskProvider.configure { - doLast(writeLog4jProperties) -} - -buildDefaultLog4jConfigTaskProvider.configure { - doLast(writeLog4jProperties) -} -// copy log4j2.properties from modules that have it -void copyLog4jProperties(TaskProvider buildTask, Project module) { - buildTask.configure { - Configuration moduleConfig = moduleZip(module) + inputs.files(inputFiles) + outputs.file outputFile - dependsOn moduleConfig - doFirst { - FileTree tree = zipTree(moduleConfig.singleFile) - FileTree filtered = tree.matching { - include 'config/log4j2.properties' - include '*/config/log4j2.properties' // could be in a bundled plugin - } - if (filtered.isEmpty() == false) { - contents.add('\n\n' + filtered.singleFile.getText('UTF-8')) + doLast { + outputFile.setText('', 'UTF-8') + inputFiles.files.eachWithIndex( + { f, i -> + if (i != 0) { + outputFile.append('\n\n', 'UTF-8') + } + outputFile.append(f.text, 'UTF-8') } - } + ) } - } ext.restTestExpansions = [ - 'expected.modules.count': 0, + 'expected.modules.count': 0 ] // we create the buildOssModules task above but fill it here so we can do a single // loop over modules to also setup cross task dependencies and increment our modules counter @@ -237,9 +219,6 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { copyModule(processTransportOutputsTaskProvider, module) } - copyLog4jProperties(buildOssLog4jConfigTaskProvider, module) - copyLog4jProperties(buildDefaultLog4jConfigTaskProvider, module) - restTestExpansions['expected.modules.count'] += 1 } @@ -254,7 +233,6 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> } } copyModule(processDefaultOutputsTaskProvider, xpackModule) - copyLog4jProperties(buildDefaultLog4jConfigTaskProvider, xpackModule) } copyModule(processSystemdOutputsTaskProvider, project(':modules:systemd')) @@ -382,14 +360,10 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // main config files, processed with distribution specific substitutions from '../src/config' exclude 'log4j2.properties' // this is handled separately below - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, testDistro, jdk)) - } - if (testDistro) { - from buildOssLog4jConfigTaskProvider - } else { - from buildDefaultLog4jConfigTaskProvider - from defaultConfigFiles + filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) } + from buildDefaultLog4jConfigTaskProvider + from defaultConfigFiles } } @@ -401,7 +375,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude '*.exe' exclude '*.bat' eachFile { it.setMode(0755) } - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, testDistro, jdk)) + filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) } // windows files, only for zip if (distributionType == 'zip') { @@ -409,7 +383,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from '../src/bin' include '*.bat' filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, testDistro, jdk)) + filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) } with copySpec { from '../src/bin' @@ -489,10 +463,6 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { *
scripts.footer
*
Footer appended to control scripts embedded in the distribution that is * (almost) entirely there for cosmetic reasons.
- *
stopping.timeout
- *
RPM's init script needs to wait for elasticsearch to stop before - * returning from stop and it needs a maximum time to wait. This is it. One - * day. DEB retries forever.
* */ subprojects { @@ -516,6 +486,7 @@ subprojects { Map expansions = [ 'project.name': project.name, 'project.version': version, + 'project.minor.version': "${VersionProperties.elasticsearchVersion.major}.${VersionProperties.elasticsearchVersion.minor}", 'path.conf': [ 'deb': '/etc/elasticsearch', @@ -562,10 +533,6 @@ subprojects { 'def': "-XX:ErrorFile=logs/hs_err_pid%p.log" ], - 'stopping.timeout': [ - 'rpm': 86400, - ], - 'scripts.footer': [ /* Debian needs exit 0 on these scripts so we add it here and preserve the pretty footer. */ @@ -597,7 +564,7 @@ subprojects { ], ] Map result = [:] - expansions = expansions.each { key, value -> + expansions.each { key, value -> if (value instanceof Map) { // 'def' is for default but its three characters like 'rpm' and 'deb' value = value[distributionType] ?: value['def'] @@ -605,7 +572,8 @@ subprojects { return } } - result[key] = value + // expansions is String->Object but result is String->String, so we have to coerce the values + result[key] = value.toString() } return result } diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index c8403d8ebd1e8..6b5db06e55d82 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -53,9 +53,9 @@ ext.expansions = { Architecture architecture, DockerBase base -> String buildArgs = '' if (base == DockerBase.IRON_BANK) { buildArgs = """ -ARG BASE_REGISTRY=nexus-docker-secure.levelup-nexus.svc.cluster.local:18082 -ARG BASE_IMAGE=redhat/ubi/ubi8 -ARG BASE_TAG=8.3 +ARG BASE_REGISTRY=registry1.dso.mil +ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8 +ARG BASE_TAG=8.4 """ } @@ -68,7 +68,7 @@ ARG BASE_TAG=8.3 'build_date' : BuildParams.buildDate, 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', 'git_revision' : BuildParams.gitRevision, - 'license' : 'Elastic-License-2.0', + 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 1.0' : 'Elastic-License-2.0', 'package_manager' : base == DockerBase.UBI ? 'microdnf' : 'yum', 'docker_base' : base.name().toLowerCase(), 'version' : VersionProperties.elasticsearch, @@ -202,20 +202,27 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { String configDirectory = base == DockerBase.IRON_BANK ? 'scripts' : 'config' String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' - tasks.register(taskName('build', architecture, base, 'DockerContext'), Tar) { - archiveExtension = 'tar.gz' - compression = Compression.GZIP - archiveClassifier = "docker-build-context${arch}" - archiveBaseName = "elasticsearch${base.suffix}" - with dockerBuildContext(architecture, base) - - into(configDirectory) { - from(configurations.log4jConfig) { - filter TransformLog4jConfigFilter + final TaskProvider buildDockerContextTask = + tasks.register(taskName('build', architecture, base, 'DockerContext'), Tar) { + archiveExtension = 'tar.gz' + compression = Compression.GZIP + archiveClassifier = "docker-build-context${arch}" + archiveBaseName = "elasticsearch${base.suffix}" + with dockerBuildContext(architecture, base) + + into(configDirectory) { + from(configurations.log4jConfig) { + filter TransformLog4jConfigFilter + } } + + onlyIf { Architecture.current() == architecture } } - onlyIf { Architecture.current() == architecture } + if (base == DockerBase.IRON_BANK) { + tasks.named("assemble").configure { + dependsOn(buildDockerContextTask) + } } } @@ -303,8 +310,10 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { onlyIf { Architecture.current() == architecture } } - tasks.named("assemble").configure { - dependsOn(buildDockerImageTask) + if (base != DockerBase.IRON_BANK) { + tasks.named("assemble").configure { + dependsOn(buildDockerImageTask) + } } } diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 2d627c5a519cf..617c867413f11 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -47,7 +47,7 @@ RUN set -eux ; \\ sha256sum -c \${tini_bin}.sha256sum ; \\ rm \${tini_bin}.sha256sum ; \\ mv \${tini_bin} /bin/tini ; \\ - chmod +x /bin/tini + chmod 0555 /bin/tini <% } else if (docker_base == 'iron_bank') { %> ################################################################################ @@ -62,7 +62,7 @@ FROM ${base_image} AS builder # `tini` is a tiny but valid init for containers. This is used to cleanly # control how ES and any child processes are shut down. COPY tini /bin/tini -RUN chmod 0755 /bin/tini +RUN chmod 0555 /bin/tini <% } else { %> @@ -171,7 +171,7 @@ RUN set -e ; \\ sha256sum -c "\${TINI_BIN}.sha256sum" ; \\ rm "\${TINI_BIN}.sha256sum" ; \\ mv "\${TINI_BIN}" /rootfs/bin/tini ; \\ - chmod +x /rootfs/bin/tini ; \\ + chmod 0555 /rootfs/bin/tini ; \\ curl --retry 10 -L -O \\ # Here we're fetching the same binaries used for the official busybox docker image from their GtiHub repository "https://github.com/docker-library/busybox/raw/\${BUSYBOX_COMMIT}/stable/musl/busybox.tar.xz" ; \\ @@ -221,12 +221,17 @@ FROM ${base_image} AS builder RUN mkdir /usr/share/elasticsearch WORKDIR /usr/share/elasticsearch -<% /* - Fetch the appropriate Elasticsearch distribution for this architecture. - Keep this command on one line - it is replaced with a `COPY` during local builds. - It uses the `arch` command to fetch the correct distro for the build machine. -*/ %> +<% if (docker_base == "iron_bank") { + // Iron Bank always copies the local artifact +%> +COPY elasticsearch-${version}-linux-x86_64.tar.gz /opt/elasticsearch.tar.gz +<% } else { + // Fetch the appropriate Elasticsearch distribution for this architecture. + // Keep this command on one line - it is replaced with a `COPY` during local builds. + // It uses the `arch` command to fetch the correct distro for the build machine. +%> RUN curl --retry 10 -S -L --output /opt/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-${version}-linux-\$(arch).tar.gz +<% } %> RUN tar -zxf /opt/elasticsearch.tar.gz --strip-components=1 @@ -235,27 +240,24 @@ COPY ${config_dir}/elasticsearch.yml config/ COPY ${config_dir}/log4j2.properties config/log4j2.docker.properties # 1. Configure the distribution for Docker -# 2. Ensure directories are created. Most already are, but make sure -# 3. Apply correct permissions -# 4. Move the distribution's default logging config aside -# 5. Generate a docker logging config, to be used by default -# 6. Apply more correct permissions -# 7. The JDK's directories' permissions don't allow `java` to be executed under a different -# group to the default. Fix this. -# 8. Remove write permissions from all files under `lib`, `bin`, `jdk` and `modules` -# 9. Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks. -# 10. Ensure all files are world-readable by default. It should be possible to -# examine the contents of the image under any UID:GID +# 2. Create required directory +# 3. Move the distribution's default logging config aside +# 4. Move the generated docker logging config so that it is the default +# 5. Reset permissions on all directories +# 6. Reset permissions on all files +# 7. Make CLI tools executable +# 8. Make some directories writable. `bin` must be writable because +# plugins can install their own CLI utilities. +# 9. Make some files writable RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \\ - mkdir -p config/jvm.options.d data logs plugins && \\ - chmod 0775 config config/jvm.options.d data logs plugins && \\ + mkdir data && \\ mv config/log4j2.properties config/log4j2.file.properties && \\ mv config/log4j2.docker.properties config/log4j2.properties && \\ - chmod 0660 config/elasticsearch.yml config/log4j2*.properties && \\ - find ./jdk -type d -exec chmod 0755 {} + && \\ - chmod -R a-w lib bin jdk modules && \\ - find . -xdev -perm -4000 -exec chmod ug-s {} + && \\ - find . -type f -exec chmod o+r {} + + find . -type d -exec chmod 0555 {} + && \\ + find . -type f -exec chmod 0444 {} + && \\ + chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \\ + chmod 0775 bin config config/jvm.options.d data logs plugins && \\ + find config -type f -exec chmod 0664 {} + <% if (docker_base == "ubi" || docker_base == "iron_bank") { %> @@ -293,8 +295,7 @@ RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\ RUN groupadd -g 1000 elasticsearch && \\ adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \\ - chmod 0775 /usr/share/elasticsearch && \\ - chown -R 1000:0 /usr/share/elasticsearch + chown -R 0:0 /usr/share/elasticsearch <% } else { %> @@ -310,15 +311,14 @@ COPY --from=rootfs /rootfs / RUN addgroup -g 1000 elasticsearch && \\ adduser -D -u 1000 -G elasticsearch -g elasticsearch -h /usr/share/elasticsearch elasticsearch && \\ addgroup elasticsearch root && \\ - chmod 0775 /usr/share/elasticsearch && \\ - chgrp 0 /usr/share/elasticsearch + chown -R 0:0 /usr/share/elasticsearch <% } %> ENV ELASTIC_CONTAINER true WORKDIR /usr/share/elasticsearch -COPY --from=builder --chown=1000:0 /usr/share/elasticsearch /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch <% if (docker_base == "ubi" || docker_base == "iron_bank") { %> COPY --from=builder --chown=0:0 /bin/tini /bin/tini @@ -335,10 +335,16 @@ COPY ${bin_dir}/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh # 4. Replace OpenJDK's built-in CA certificate keystore with the one from the OS # vendor. The latter is superior in several ways. # REF: https://github.com/elastic/elasticsearch-docker/issues/171 +# 5. Tighten up permissions on the ES home dir (the permissions of the contents are handled earlier) +# 6. You can't install plugins that include configuration when running as `elasticsearch` and the `config` +# dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's +# config directory. RUN chmod g=u /etc/passwd && \\ - chmod 0775 /usr/local/bin/docker-entrypoint.sh && \\ + chmod 0555 /usr/local/bin/docker-entrypoint.sh && \\ find / -xdev -perm -4000 -exec chmod ug-s {} + && \\ - ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts + ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts && \\ + chmod 0775 /usr/share/elasticsearch && \\ + chown elasticsearch bin config config/jvm.options.d data logs plugins EXPOSE 9200 9300 diff --git a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml index ccdc923e17824..63da9424a0c45 100644 --- a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml +++ b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml @@ -14,7 +14,7 @@ tags: # Build args passed to Dockerfile ARGs args: BASE_IMAGE: "redhat/ubi/ubi8" - BASE_TAG: "8.3" + BASE_TAG: "8.4" # Docker image labels labels: @@ -39,6 +39,9 @@ labels: resources: - filename: "elasticsearch-${version}-linux-x86_64.tar.gz" url: "/elasticsearch-${version}-linux-x86_64.tar.gz" + validation: + type: "sha512" + value: "" - filename: "tini" url: "https://github.com/krallin/tini/releases/download/v0.19.0/tini-amd64" validation: @@ -47,7 +50,14 @@ resources: # List of project maintainers maintainers: - - name: "Nassim Kammah" - email: "nassim.kammah@elastic.co" - name: "Rory Hunter" email: "rory.hunter@elastic.co" + username: "rory" + - email: "klepal_alexander@bah.com" + name: "Alexander Klepal" + username: "alexander.klepal" + cht_member: true + - email: "yalabe.dukuly@anchore.com" + name: "Yalabe Dukuly" + username: "yalabe.dukuly" + cht_member: true diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 09fbb04e2d665..aeeeafc2cae41 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -6,8 +6,8 @@ * Side Public License, v 1. */ +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.internal.MavenFilteringHack import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.internal.info.BuildParams import org.redline_rpm.header.Flags @@ -33,7 +33,7 @@ import java.util.regex.Pattern * empty directory requires more wits than I have. * 3. ospackage really wants to suck up some of the debian control scripts * directly from the filesystem. It doesn't want to process them through - * MavenFilteringHack or any other copy-style action. + * any copy-style action. * * The following commands are useful when it comes to check the user/group * and files permissions set within the RPM and DEB packages: @@ -47,6 +47,16 @@ buildscript { mavenCentral() maven { url 'https://jitpack.io' } } + + // We rely on a specific version of the redline library used to build rpm packages + // to support sha256header in our elasticsearch RPMs + // TODO: Remove once https://github.com/nebula-plugins/gradle-ospackage-plugin/pull/402 got merged and released + configurations.all { + resolutionStrategy { + force 'org.redline-rpm:redline:1.2.10' + } + } + dependencies { classpath "com.github.breskeby:gradle-ospackage-plugin:98455c1" } @@ -64,13 +74,14 @@ void addProcessFilesTask(String type, boolean oss, boolean jdk) { with copySpec { from 'src/common' from "src/${type}" - MavenFilteringHack.filter(it, expansionsForDistribution(type, oss, jdk)) + filter("tokens" : expansionsForDistribution(type, oss, jdk), ReplaceTokens.class) } into('etc/elasticsearch') { with configFiles(type, oss, jdk) } - MavenFilteringHack.filter(it, expansionsForDistribution(type, oss, jdk)) + + filter("tokens" : expansionsForDistribution(type, oss, jdk), ReplaceTokens.class) doLast { // create empty dirs, we set the permissions when configuring the packages @@ -293,7 +304,7 @@ ospackage { signingKeyPassphrase = project.property('signing.password') signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? project.file(project.property('signing.secretKeyRingFile')) : - new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') + new File(new File(project.providers.systemProperty('user.home').orElse('.gnupg').forUseAtConfigurationTime().get()), 'secring.gpg') } // version found on oldest supported distro, centos-6 @@ -534,14 +545,17 @@ subprojects { assertLinesInFile(noticePath, noticeLines) } } - tasks.named("check").configure { dependsOn "checkNotice" } - tasks.register('checkLicenseMetadata', LoggedExec) { + def checkLicenseMetadataTaskProvider = tasks.register('checkLicenseMetadata', LoggedExec) { dependsOn buildDist, "checkExtraction" } - check.dependsOn checkLicenseMetadata + tasks.named("check").configure { + dependsOn "checkNotice" + dependsOn(checkLicenseMetadataTaskProvider) + } + if (project.name.contains('deb')) { - checkLicenseMetadata { LoggedExec exec -> + checkLicenseMetadataTaskProvider.configure { LoggedExec exec -> onlyIf dpkgExists final ByteArrayOutputStream output = new ByteArrayOutputStream() exec.commandLine 'dpkg-deb', '--info', "${-> buildDist.get().outputs.files.filter(debFilter).singleFile}" @@ -577,7 +591,7 @@ subprojects { } } else { assert project.name.contains('rpm') - checkLicenseMetadata { LoggedExec exec -> + checkLicenseMetadataTaskProvider.configure { LoggedExec exec -> onlyIf rpmExists final ByteArrayOutputStream output = new ByteArrayOutputStream() exec.commandLine 'rpm', '-qp', '--queryformat', '%{License}', "${-> buildDist.get().outputs.files.singleFile}" diff --git a/distribution/packages/src/common/env/elasticsearch b/distribution/packages/src/common/env/elasticsearch index 38ecfb63da019..1b19349083ae6 100644 --- a/distribution/packages/src/common/env/elasticsearch +++ b/distribution/packages/src/common/env/elasticsearch @@ -10,7 +10,7 @@ # Elasticsearch configuration directory # Note: this setting will be shared with command-line tools -ES_PATH_CONF=${path.conf} +ES_PATH_CONF=@path.conf@ # Elasticsearch PID directory #PID_DIR=/var/run/elasticsearch diff --git a/distribution/packages/src/common/scripts/postinst b/distribution/packages/src/common/scripts/postinst index 1d0201445949a..3414e19d592ab 100644 --- a/distribution/packages/src/common/scripts/postinst +++ b/distribution/packages/src/common/scripts/postinst @@ -9,11 +9,11 @@ # $1=1 : indicates an upgrade # source the default env file -if [ -f "${path.env}" ]; then - . "${path.env}" +if [ -f "@path.env@" ]; then + . "@path.env@" fi -export ES_PATH_CONF=${ES_PATH_CONF:-${path.conf}} +export ES_PATH_CONF=${ES_PATH_CONF:-@path.conf@} IS_UPGRADE=false @@ -23,7 +23,7 @@ case "$1" in configure) # If $1=configure and $2 is set, this is an upgrade - if [ -n $2 ]; then + if [ -n "$2" ]; then IS_UPGRADE=true fi PACKAGE=deb @@ -90,4 +90,4 @@ if [ "$PACKAGE" = "deb" ]; then fi fi -${scripts.footer} +@scripts.footer@ diff --git a/distribution/packages/src/common/scripts/postrm b/distribution/packages/src/common/scripts/postrm index 9bbdcc78eda08..77f092ba1e59c 100644 --- a/distribution/packages/src/common/scripts/postrm +++ b/distribution/packages/src/common/scripts/postrm @@ -10,11 +10,11 @@ # $1=1 : indicates an upgrade # source the default env file -if [ -f "${path.env}" ]; then - . "${path.env}" +if [ -f "@path.env@" ]; then + . "@path.env@" fi -export ES_PATH_CONF=${ES_PATH_CONF:-${path.conf}} +export ES_PATH_CONF=${ES_PATH_CONF:-@path.conf@} REMOVE_DIRS=false REMOVE_JVM_OPTIONS_DIRECTORY=false @@ -114,4 +114,4 @@ if [ "$REMOVE_USER_AND_GROUP" = "true" ]; then fi fi -${scripts.footer} +@scripts.footer@ diff --git a/distribution/packages/src/common/scripts/posttrans b/distribution/packages/src/common/scripts/posttrans index d62c24ac34e85..6904948c32d2a 100644 --- a/distribution/packages/src/common/scripts/posttrans +++ b/distribution/packages/src/common/scripts/posttrans @@ -1,9 +1,9 @@ # source the default env file -if [ -f "${path.env}" ]; then - . "${path.env}" +if [ -f "@path.env@" ]; then + . "@path.env@" fi -export ES_PATH_CONF=${ES_PATH_CONF:-${path.conf}} +export ES_PATH_CONF=${ES_PATH_CONF:-@path.conf@} if [ ! -f "${ES_PATH_CONF}"/elasticsearch.keystore ]; then /usr/share/elasticsearch/bin/elasticsearch-keystore create @@ -19,4 +19,4 @@ else fi fi -${scripts.footer} +@scripts.footer@ diff --git a/distribution/packages/src/common/scripts/preinst b/distribution/packages/src/common/scripts/preinst index 82eb8458be005..76c617933d1c0 100644 --- a/distribution/packages/src/common/scripts/preinst +++ b/distribution/packages/src/common/scripts/preinst @@ -16,11 +16,11 @@ err_exit() { } # source the default env file -if [ -f "${path.env}" ]; then - . "${path.env}" +if [ -f "@path.env@" ]; then + . "@path.env@" fi -export ES_PATH_CONF=${ES_PATH_CONF:-${path.conf}} +export ES_PATH_CONF=${ES_PATH_CONF:-@path.conf@} case "$1" in @@ -80,4 +80,4 @@ case "$1" in ;; esac -${scripts.footer} +@scripts.footer@ diff --git a/distribution/packages/src/common/scripts/prerm b/distribution/packages/src/common/scripts/prerm index 59c66e1cf0574..c998a748a521f 100644 --- a/distribution/packages/src/common/scripts/prerm +++ b/distribution/packages/src/common/scripts/prerm @@ -10,11 +10,11 @@ # $1=1 : indicates an upgrade # source the default env file -if [ -f "${path.env}" ]; then - . "${path.env}" +if [ -f "@path.env@" ]; then + . "@path.env@" fi -export ES_PATH_CONF=${ES_PATH_CONF:-${path.conf}} +export ES_PATH_CONF=${ES_PATH_CONF:-@path.conf@} STOP_REQUIRED=false REMOVE_SERVICE=false @@ -70,4 +70,4 @@ if [ "$REMOVE_SERVICE" = "true" ]; then fi fi -${scripts.footer} +@scripts.footer@ diff --git a/distribution/packages/src/common/systemd/elasticsearch.service b/distribution/packages/src/common/systemd/elasticsearch.service index 797a3a4c740f3..9af491822c81c 100644 --- a/distribution/packages/src/common/systemd/elasticsearch.service +++ b/distribution/packages/src/common/systemd/elasticsearch.service @@ -9,10 +9,10 @@ Type=notify RuntimeDirectory=elasticsearch PrivateTmp=true Environment=ES_HOME=/usr/share/elasticsearch -Environment=ES_PATH_CONF=${path.conf} +Environment=ES_PATH_CONF=@path.conf@ Environment=PID_DIR=/var/run/elasticsearch Environment=ES_SD_NOTIFY=true -EnvironmentFile=-${path.env} +EnvironmentFile=-@path.env@ WorkingDirectory=/usr/share/elasticsearch @@ -63,4 +63,4 @@ TimeoutStartSec=75 [Install] WantedBy=multi-user.target -# Built for ${project.name}-${project.version} (${project.name}) +# Built for @project.name@-@project.version@ (@project.name@) diff --git a/distribution/packages/src/deb/copyright b/distribution/packages/src/deb/copyright index 44c7582666f21..03a1635637a43 100644 --- a/distribution/packages/src/deb/copyright +++ b/distribution/packages/src/deb/copyright @@ -1,4 +1,4 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Copyright: Elasticsearch B.V. -License: ${license.name} -${license.text} +License: @license.name@ +@license.text@ diff --git a/distribution/src/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env index 6cb41da4ba40d..df501cce7b12b 100644 --- a/distribution/src/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -84,7 +84,7 @@ fi export HOSTNAME=$HOSTNAME -${source.path.env} +@source.path.env@ if [ -z "$ES_PATH_CONF" ]; then echo "ES_PATH_CONF must be set to the configuration path" @@ -94,9 +94,9 @@ fi # now make ES_PATH_CONF absolute ES_PATH_CONF=`cd "$ES_PATH_CONF"; pwd` -ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} -ES_DISTRIBUTION_TYPE=${es.distribution.type} -ES_BUNDLED_JDK=${es.bundled_jdk} +ES_DISTRIBUTION_FLAVOR=@es.distribution.flavor@ +ES_DISTRIBUTION_TYPE=@es.distribution.type@ +ES_BUNDLED_JDK=@es.bundled_jdk@ if [[ "$ES_BUNDLED_JDK" == "false" ]]; then echo "warning: no-jdk distributions that do not bundle a JDK are deprecated and will be removed in a future release" >&2 @@ -111,7 +111,7 @@ if [[ "$ES_DISTRIBUTION_TYPE" == "docker" ]]; then # Parse Docker env vars to customize Elasticsearch # - # e.g. Setting the env var cluster.name=testcluster + # e.g. Setting the env var cluster.name=testcluster or ES_CLUSTER_NAME=testcluster # # will cause Elasticsearch to be invoked with -Ecluster.name=testcluster # @@ -122,11 +122,18 @@ if [[ "$ES_DISTRIBUTION_TYPE" == "docker" ]]; then while IFS='=' read -r envvar_key envvar_value do # Elasticsearch settings need to have at least two dot separated lowercase - # words, e.g. `cluster.name` - if [[ "$envvar_key" =~ ^[a-z0-9_]+\.[a-z0-9_]+ ]]; then - if [[ ! -z $envvar_value ]]; then + # words, e.g. `cluster.name`, or uppercased with underscore separators and + # prefixed with `ES_SETTING_`, e.g. `ES_SETTING_CLUSTER_NAME`. Underscores in setting names + # are escaped by writing them as a double-underscore e.g. "__" + if [[ ! -z "$envvar_value" ]]; then + if [[ "$envvar_key" =~ ^[a-z0-9_]+\.[a-z0-9_]+ ]]; then es_opt="-E${envvar_key}=${envvar_value}" es_arg_array+=("${es_opt}") + elif [[ "$envvar_key" =~ ^ES_SETTING(_{1,2}[A-Z]+)+$ ]]; then + # The long-hand sed `y` command works in any sed variant. + envvar_key="$(echo "$envvar_key" | sed -e 's/^ES_SETTING_//; s/_/./g ; s/\.\./_/g; y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/' )" + es_opt="-E${envvar_key}=${envvar_value}" + es_arg_array+=("${es_opt}") fi fi done <<< "$(env)" diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index a07ee40e98878..eeea39a6ac399 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -25,9 +25,9 @@ if not defined ES_PATH_CONF ( rem now make ES_PATH_CONF absolute for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI -set ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} -set ES_DISTRIBUTION_TYPE=${es.distribution.type} -set ES_BUNDLED_JDK=${es.bundled_jdk} +set ES_DISTRIBUTION_FLAVOR=@es.distribution.flavor@ +set ES_DISTRIBUTION_TYPE=@es.distribution.type@ +set ES_BUNDLED_JDK=@es.bundled_jdk@ if "%ES_BUNDLED_JDK%" == "false" ( echo "warning: no-jdk distributions that do not bundle a JDK are deprecated and will be removed in a future release" >&2 diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index 4bceeeb02c383..2089be6444a61 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -17,7 +17,7 @@ echo elasticsearch-service-x64.exe was not found... exit /B 1 :okExe -set ES_VERSION=${project.version} +set ES_VERSION=@project.version@ if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%ES_HOME%\logs diff --git a/distribution/src/config/elasticsearch.yml b/distribution/src/config/elasticsearch.yml index 8e7808ccf64c6..ebed26b4f45eb 100644 --- a/distribution/src/config/elasticsearch.yml +++ b/distribution/src/config/elasticsearch.yml @@ -30,11 +30,11 @@ # # Path to directory where to store the data (separate multiple locations by comma): # -${path.data} +@path.data@ # # Path to log files: # -${path.logs} +@path.logs@ # # ----------------------------------- Memory ----------------------------------- # diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 81f476f566c2c..7a5422219e832 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -9,7 +9,7 @@ ## should create one or more files in the jvm.options.d ## directory containing your adjustments. ## -## See https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html +## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/jvm-options.html ## for more information. ## ################################################################ @@ -24,14 +24,15 @@ ## based on the available memory in your system and the roles ## each node is configured to fulfill. If specifying heap is ## required, it should be done through a file in jvm.options.d, -## and the min and max should be set to the same value. For -## example, to set the heap to 4 GB, create a new file in the -## jvm.options.d directory containing these lines: +## which should be named with .options suffix, and the min and +## max should be set to the same value. For example, to set the +## heap to 4 GB, create a new file in the jvm.options.d +## directory containing these lines: ## ## -Xms4g ## -Xmx4g ## -## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/heap-size.html ## for more information ## ################################################################ @@ -72,10 +73,10 @@ # specify an alternative path for heap dumps; ensure the directory exists and # has sufficient space -${heap.dump.path} +@heap.dump.path@ # specify an alternative path for JVM fatal error logs -${error.file} +@error.file@ ## GC logging --Xlog:gc*,gc+age=trace,safepoint:file=${loggc}:utctime,pid,tags:filecount=32,filesize=64m +-Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,pid,tags:filecount=32,filesize=64m diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index 2f6606aca64f9..02533be7fdbcc 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.common.settings; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.common.Randomness; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; @@ -198,8 +198,10 @@ public void testUpgradeNoop() throws Exception { public void testFailWhenCannotConsumeSecretStream() throws Exception { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); indexOutput.writeByte((byte) 0); // No password SecureRandom random = Randomness.createSecure(); @@ -227,8 +229,10 @@ public void testFailWhenCannotConsumeSecretStream() throws Exception { public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); indexOutput.writeByte((byte) 0); // No password SecureRandom random = Randomness.createSecure(); @@ -257,8 +261,10 @@ public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception { public void testFailWhenSecretStreamNotConsumed() throws Exception { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); indexOutput.writeByte((byte) 0); // No password SecureRandom random = Randomness.createSecure(); @@ -285,8 +291,10 @@ public void testFailWhenSecretStreamNotConsumed() throws Exception { public void testFailWhenEncryptedBytesStreamIsNotConsumed() throws Exception { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); indexOutput.writeByte((byte) 0); // No password SecureRandom random = Randomness.createSecure(); @@ -372,8 +380,10 @@ public void testIllegalSettingName() throws Exception { public void testBackcompatV1() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - try (IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(output, "elasticsearch.keystore", 1); output.writeByte((byte) 0); // hasPassword = false output.writeString("PKCS12"); @@ -403,10 +413,12 @@ public void testBackcompatV1() throws Exception { public void testBackcompatV2() throws Exception { assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm()); Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); byte[] fileBytes = new byte[20]; random().nextBytes(fileBytes); - try (IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + try ( + Directory directory = newFSDirectory(configDir); + IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT) + ) { CodecUtil.writeHeader(output, "elasticsearch.keystore", 2); output.writeByte((byte) 0); // hasPassword = false diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 21c95c2434fef..1e26ed3ccff0f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -13,6 +13,10 @@ archivesBaseName = 'elasticsearch-plugin-cli' dependencies { compileOnly project(":server") compileOnly project(":libs:elasticsearch-cli") + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" api "org.bouncycastle:bcpg-fips:1.0.4" api "org.bouncycastle:bc-fips:1.0.2" testImplementation project(":test:framework") @@ -60,9 +64,12 @@ tasks.named("thirdPartyAudit").configure { tasks.named('splitPackagesAudit').configure { // o.e.plugins is owned by server, these shouldb be renamed to plugincli - ignoreClasses 'org.elasticsearch.plugins.InstallPluginCommand', + ignoreClasses 'org.elasticsearch.plugins.InstallPluginAction', + 'org.elasticsearch.plugins.InstallPluginCommand', 'org.elasticsearch.plugins.ListPluginsCommand', 'org.elasticsearch.plugins.PluginCli', 'org.elasticsearch.plugins.ProgressInputStream', - 'org.elasticsearch.plugins.RemovePluginCommand' + 'org.elasticsearch.plugins.RemovePluginAction', + 'org.elasticsearch.plugins.RemovePluginCommand', + 'org.elasticsearch.plugins.PluginDescriptor' } diff --git a/distribution/tools/plugin-cli/licenses/jackson-annotations-2.10.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/jackson-annotations-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..0c548bb0e7711 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-annotations-2.10.4.jar.sha1 @@ -0,0 +1 @@ +6ae6028aff033f194c9710ad87c224ccaadeed6c \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/jackson-annotations-LICENSE b/distribution/tools/plugin-cli/licenses/jackson-annotations-LICENSE new file mode 100644 index 0000000000000..ff94ef8c456a6 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-annotations-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor annotations is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/distribution/tools/plugin-cli/licenses/jackson-annotations-NOTICE.txt b/distribution/tools/plugin-cli/licenses/jackson-annotations-NOTICE.txt new file mode 100644 index 0000000000000..5ab1e5636037e --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-annotations-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/distribution/tools/plugin-cli/licenses/jackson-databind-2.10.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/jackson-databind-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..27d5a72cd27af --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-databind-2.10.4.jar.sha1 @@ -0,0 +1 @@ +76e9152e93d4cf052f93a64596f633ba5b1c8ed9 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/jackson-databind-LICENSE b/distribution/tools/plugin-cli/licenses/jackson-databind-LICENSE new file mode 100644 index 0000000000000..6acf75483f9b0 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-databind-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor databind module is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/distribution/tools/plugin-cli/licenses/jackson-databind-NOTICE.txt b/distribution/tools/plugin-cli/licenses/jackson-databind-NOTICE.txt new file mode 100644 index 0000000000000..5ab1e5636037e --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/jackson-databind-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may be licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginAction.java new file mode 100644 index 0000000000000..90d5621410d78 --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginAction.java @@ -0,0 +1,1014 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.search.spell.LevenshteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.apache.lucene.util.Constants; +import org.bouncycastle.bcpg.ArmoredInputStream; +import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; +import org.bouncycastle.openpgp.PGPException; +import org.bouncycastle.openpgp.PGPPublicKey; +import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; +import org.bouncycastle.openpgp.PGPSignature; +import org.bouncycastle.openpgp.PGPSignatureList; +import org.bouncycastle.openpgp.PGPUtil; +import org.bouncycastle.openpgp.jcajce.JcaPGPObjectFactory; +import org.bouncycastle.openpgp.operator.jcajce.JcaKeyFingerprintCalculator; +import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.bootstrap.PluginPolicyInfo; +import org.elasticsearch.bootstrap.PolicyUtil; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.jdk.JarHell; + +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLConnection; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; + +/** + * A command for the plugin cli to install a plugin into elasticsearch. + *

+ * The install command takes a plugin id, which may be any of the following: + *

    + *
  • An official elasticsearch plugin name
  • + *
  • Maven coordinates to a plugin zip
  • + *
  • A URL to a plugin zip
  • + *
+ *

+ * Plugins are packaged as zip files. Each packaged plugin must contain a plugin properties file. + * See {@link PluginInfo}. + *

+ * The installation process first extracts the plugin files into a temporary + * directory in order to verify the plugin satisfies the following requirements: + *

    + *
  • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
  • + *
  • The plugin is not a module already provided with elasticsearch
  • + *
  • If the plugin contains extra security permissions, the policy file is validated
  • + *
+ *

+ * A plugin may also contain an optional {@code bin} directory which contains scripts. The + * scripts will be installed into a subdirectory of the elasticsearch bin directory, using + * the name of the plugin, and the scripts will be marked executable. + *

+ * A plugin may also contain an optional {@code config} directory which contains configuration + * files specific to the plugin. The config files be installed into a subdirectory of the + * elasticsearch config directory, using the name of the plugin. If any files to be installed + * already exist, they will be skipped. + */ +class InstallPluginAction implements Closeable { + + private static final String PROPERTY_STAGING_ID = "es.plugins.staging"; + + // exit codes for install + /** + * A plugin with the same name is already installed. + */ + static final int PLUGIN_EXISTS = 1; + /** + * The plugin zip is not properly structured. + */ + static final int PLUGIN_MALFORMED = 2; + + /** + * The builtin modules, which are plugins, but cannot be installed or removed. + */ + private static final Set MODULES; + + static { + try (var stream = InstallPluginAction.class.getResourceAsStream("/modules.txt")) { + MODULES = Streams.readAllLines(stream).stream().map(String::trim).collect(Collectors.toUnmodifiableSet()); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + + /** The official plugins that can be installed simply by name. */ + static final Set OFFICIAL_PLUGINS; + static { + try (var stream = InstallPluginAction.class.getResourceAsStream("/plugins.txt")) { + OFFICIAL_PLUGINS = Streams.readAllLines(stream).stream().map(String::trim).collect(Sets.toUnmodifiableSortedSet()); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + + static final Set BIN_DIR_PERMS; + static final Set BIN_FILES_PERMS; + static final Set CONFIG_DIR_PERMS; + static final Set CONFIG_FILES_PERMS; + static final Set PLUGIN_DIR_PERMS; + static final Set PLUGIN_FILES_PERMS; + + static { + // Bin directory get chmod 755 + BIN_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-xr-x")); + + // Bin files also get chmod 755 + BIN_FILES_PERMS = BIN_DIR_PERMS; + + // Config directory get chmod 750 + CONFIG_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-x---")); + + // Config files get chmod 660 + CONFIG_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-rw----")); + + // Plugin directory get chmod 755 + PLUGIN_DIR_PERMS = BIN_DIR_PERMS; + + // Plugins files get chmod 644 + PLUGIN_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-r--r--")); + } + + private final Terminal terminal; + private Environment env; + private boolean batch; + + InstallPluginAction(Terminal terminal, Environment env, boolean batch) { + this.terminal = terminal; + this.env = env; + this.batch = batch; + } + + // pkg private for testing + void execute(List plugins) throws Exception { + if (plugins.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "at least one plugin id is required"); + } + + final Set uniquePluginIds = new HashSet<>(); + for (final PluginDescriptor plugin : plugins) { + if (uniquePluginIds.add(plugin.getId()) == false) { + throw new UserException(ExitCodes.USAGE, "duplicate plugin id [" + plugin.getId() + "]"); + } + } + + final Map> deleteOnFailures = new LinkedHashMap<>(); + for (final PluginDescriptor plugin : plugins) { + final String pluginId = plugin.getId(); + terminal.println("-> Installing " + pluginId); + try { + if ("x-pack".equals(pluginId)) { + handleInstallXPack(buildFlavor()); + } + + final List deleteOnFailure = new ArrayList<>(); + deleteOnFailures.put(pluginId, deleteOnFailure); + + final Path pluginZip = download(plugin, env.tmpFile()); + final Path extractedZip = unzip(pluginZip, env.pluginsFile()); + deleteOnFailure.add(extractedZip); + final PluginInfo pluginInfo = installPlugin(extractedZip, deleteOnFailure); + terminal.println("-> Installed " + pluginInfo.getName()); + // swap the entry by plugin id for one with the installed plugin name, it gives a cleaner error message for URL installs + deleteOnFailures.remove(pluginId); + deleteOnFailures.put(pluginInfo.getName(), deleteOnFailure); + } catch (final Exception installProblem) { + terminal.println("-> Failed installing " + pluginId); + for (final Map.Entry> deleteOnFailureEntry : deleteOnFailures.entrySet()) { + terminal.println("-> Rolling back " + deleteOnFailureEntry.getKey()); + boolean success = false; + try { + IOUtils.rm(deleteOnFailureEntry.getValue().toArray(new Path[0])); + success = true; + } catch (final IOException exceptionWhileRemovingFiles) { + final Exception exception = new Exception( + "failed rolling back installation of [" + deleteOnFailureEntry.getKey() + "]", + exceptionWhileRemovingFiles + ); + installProblem.addSuppressed(exception); + terminal.println("-> Failed rolling back " + deleteOnFailureEntry.getKey()); + } + if (success) { + terminal.println("-> Rolled back " + deleteOnFailureEntry.getKey()); + } + } + throw installProblem; + } + } + terminal.println("-> Please restart Elasticsearch to activate any plugins installed"); + } + + Build.Flavor buildFlavor() { + return Build.CURRENT.flavor(); + } + + private static void handleInstallXPack(final Build.Flavor flavor) throws UserException { + switch (flavor) { + case DEFAULT: + throw new UserException(ExitCodes.CONFIG, "this distribution of Elasticsearch contains X-Pack by default"); + case OSS: + throw new UserException( + ExitCodes.CONFIG, + "X-Pack is not available with the oss distribution; to use X-Pack features use the default distribution" + ); + case UNKNOWN: + throw new IllegalStateException("your distribution is broken"); + } + } + + /** + * Downloads the plugin and returns the file it was downloaded to. + */ + private Path download(PluginDescriptor plugin, Path tmpDir) throws Exception { + final String pluginId = plugin.getId(); + + if (OFFICIAL_PLUGINS.contains(pluginId)) { + final String url = getElasticUrl(getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME); + terminal.println("-> Downloading " + pluginId + " from elastic"); + return downloadAndValidate(url, tmpDir, true); + } + + final String pluginUrl = plugin.getUrl(); + + // now try as maven coordinates, a valid URL would only have a colon and slash + String[] coordinates = pluginUrl.split(":"); + if (coordinates.length == 3 && pluginUrl.contains("/") == false && pluginUrl.startsWith("file:") == false) { + String mavenUrl = getMavenUrl(coordinates, Platforms.PLATFORM_NAME); + terminal.println("-> Downloading " + pluginId + " from maven central"); + return downloadAndValidate(mavenUrl, tmpDir, false); + } + + // fall back to plain old URL + if (pluginUrl.contains(":") == false) { + // definitely not a valid url, so assume it is a plugin name + List pluginSuggestions = checkMisspelledPlugin(pluginId); + String msg = "Unknown plugin " + pluginId; + if (pluginSuggestions.isEmpty() == false) { + msg += ", did you mean " + (pluginSuggestions.size() > 1 ? "any of " : "") + pluginSuggestions + "?"; + } + throw new UserException(ExitCodes.USAGE, msg); + } + terminal.println("-> Downloading " + URLDecoder.decode(pluginUrl, StandardCharsets.UTF_8)); + return downloadZip(pluginUrl, tmpDir); + } + + // pkg private so tests can override + String getStagingHash() { + return System.getProperty(PROPERTY_STAGING_ID); + } + + boolean isSnapshot() { + return Build.CURRENT.isSnapshot(); + } + + /** + * Returns the url for an official elasticsearch plugin. + */ + private String getElasticUrl( + final String stagingHash, + final Version version, + final boolean isSnapshot, + final String pluginId, + final String platform + ) throws IOException, UserException { + final String baseUrl; + if (isSnapshot && stagingHash == null) { + throw new UserException( + ExitCodes.CONFIG, + "attempted to install release build of official plugin on snapshot build of Elasticsearch" + ); + } + if (stagingHash != null) { + if (isSnapshot) { + baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId); + } else { + baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId); + } + } else { + baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); + } + final String platformUrl = String.format( + Locale.ROOT, + "%s/%s-%s-%s.zip", + baseUrl, + pluginId, + platform, + Build.CURRENT.getQualifiedVersion() + ); + if (urlExists(platformUrl)) { + return platformUrl; + } + return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.CURRENT.getQualifiedVersion()); + } + + private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { + return String.format( + Locale.ROOT, + "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", + hostname, + version, + stagingHash, + pluginId + ); + } + + /** + * Returns the url for an elasticsearch plugin in maven. + */ + private String getMavenUrl(String[] coordinates, String platform) throws IOException { + final String groupId = coordinates[0].replace(".", "/"); + final String artifactId = coordinates[1]; + final String version = coordinates[2]; + final String baseUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%s/%s/%s", groupId, artifactId, version); + final String platformUrl = String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, artifactId, platform, version); + if (urlExists(platformUrl)) { + return platformUrl; + } + return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, artifactId, version); + } + + /** + * Returns {@code true} if the given url exists, and {@code false} otherwise. + *

+ * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. + */ + // pkg private for tests to manipulate + @SuppressForbidden(reason = "Make HEAD request using URLConnection.connect()") + boolean urlExists(String urlString) throws IOException { + terminal.println(VERBOSE, "Checking if url exists: " + urlString); + URL url = new URL(urlString); + assert "https".equals(url.getProtocol()) : "Only http urls can be checked"; + HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection(); + urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); + urlConnection.setRequestMethod("HEAD"); + urlConnection.connect(); + return urlConnection.getResponseCode() == 200; + } + + /** + * Returns all the official plugin names that look similar to pluginId. + **/ + private List checkMisspelledPlugin(String pluginId) { + LevenshteinDistance ld = new LevenshteinDistance(); + List> scoredKeys = new ArrayList<>(); + for (String officialPlugin : OFFICIAL_PLUGINS) { + float distance = ld.getDistance(pluginId, officialPlugin); + if (distance > 0.7f) { + scoredKeys.add(new Tuple<>(distance, officialPlugin)); + } + } + CollectionUtil.timSort(scoredKeys, (a, b) -> b.v1().compareTo(a.v1())); + return scoredKeys.stream().map((a) -> a.v2()).collect(Collectors.toList()); + } + + /** Downloads a zip from the url, into a temp file under the given temp dir. */ + // pkg private for tests + @SuppressForbidden(reason = "We use getInputStream to download plugins") + Path downloadZip(String urlString, Path tmpDir) throws IOException { + terminal.println(VERBOSE, "Retrieving zip from " + urlString); + URL url = new URL(urlString); + Path zip = Files.createTempFile(tmpDir, null, ".zip"); + URLConnection urlConnection = url.openConnection(); + urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); + try ( + InputStream in = batch + ? urlConnection.getInputStream() + : new TerminalProgressInputStream(urlConnection.getInputStream(), urlConnection.getContentLength(), terminal) + ) { + // must overwrite since creating the temp file above actually created the file + Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING); + } + return zip; + } + + // for testing only + void setEnvironment(Environment env) { + this.env = env; + } + + // for testing only + void setBatch(boolean batch) { + this.batch = batch; + } + + /** + * content length might be -1 for unknown and progress only makes sense if the content length is greater than 0 + */ + private class TerminalProgressInputStream extends ProgressInputStream { + + private final Terminal terminal; + private int width = 50; + private final boolean enabled; + + TerminalProgressInputStream(InputStream is, int expectedTotalSize, Terminal terminal) { + super(is, expectedTotalSize); + this.terminal = terminal; + this.enabled = expectedTotalSize > 0; + } + + @Override + public void onProgress(int percent) { + if (enabled) { + int currentPosition = percent * width / 100; + StringBuilder sb = new StringBuilder("\r["); + sb.append(String.join("=", Collections.nCopies(currentPosition, ""))); + if (currentPosition > 0 && percent < 100) { + sb.append(">"); + } + sb.append(String.join(" ", Collections.nCopies(width - currentPosition, ""))); + sb.append("] %s   "); + if (percent == 100) { + sb.append("\n"); + } + terminal.print(Terminal.Verbosity.NORMAL, String.format(Locale.ROOT, sb.toString(), percent + "%")); + } + } + } + + @SuppressForbidden(reason = "URL#openStream") + private InputStream urlOpenStream(final URL url) throws IOException { + return url.openStream(); + } + + /** + * Downloads a ZIP from the URL. This method also validates the downloaded plugin ZIP via the following means: + *

    + *
  • + * For an official plugin we download the SHA-512 checksum and validate the integrity of the downloaded ZIP. We also download the + * armored signature and validate the authenticity of the downloaded ZIP. + *
  • + *
  • + * For a non-official plugin we download the SHA-512 checksum and fallback to the SHA-1 checksum and validate the integrity of the + * downloaded ZIP. + *
  • + *
+ * + * @param urlString the URL of the plugin ZIP + * @param tmpDir a temporary directory to write downloaded files to + * @param officialPlugin true if the plugin is an official plugin + * @return the path to the downloaded plugin ZIP + * @throws IOException if an I/O exception occurs download or reading files and resources + * @throws PGPException if an exception occurs verifying the downloaded ZIP signature + * @throws UserException if checksum validation fails + */ + private Path downloadAndValidate(final String urlString, final Path tmpDir, final boolean officialPlugin) throws IOException, + PGPException, UserException { + Path zip = downloadZip(urlString, tmpDir); + pathsToDeleteOnShutdown.add(zip); + String checksumUrlString = urlString + ".sha512"; + URL checksumUrl = openUrl(checksumUrlString); + String digestAlgo = "SHA-512"; + if (checksumUrl == null && officialPlugin == false) { + // fallback to sha1, until 7.0, but with warning + terminal.println( + "Warning: sha512 not found, falling back to sha1. This behavior is deprecated and will be removed in a " + + "future release. Please update the plugin to use a sha512 checksum." + ); + checksumUrlString = urlString + ".sha1"; + checksumUrl = openUrl(checksumUrlString); + digestAlgo = "SHA-1"; + } + if (checksumUrl == null) { + throw new UserException(ExitCodes.IO_ERROR, "Plugin checksum missing: " + checksumUrlString); + } + final String expectedChecksum; + try (InputStream in = urlOpenStream(checksumUrl)) { + /* + * The supported format of the SHA-1 files is a single-line file containing the SHA-1. The supported format of the SHA-512 files + * is a single-line file containing the SHA-512 and the filename, separated by two spaces. For SHA-1, we verify that the hash + * matches, and that the file contains a single line. For SHA-512, we verify that the hash and the filename match, and that the + * file contains a single line. + */ + if (digestAlgo.equals("SHA-1")) { + final BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); + expectedChecksum = checksumReader.readLine(); + if (checksumReader.readLine() != null) { + throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); + } + } else { + final BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); + final String checksumLine = checksumReader.readLine(); + final String[] fields = checksumLine.split(" {2}"); + if (officialPlugin && fields.length != 2 || officialPlugin == false && fields.length > 2) { + throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); + } + expectedChecksum = fields[0]; + if (fields.length == 2) { + // checksum line contains filename as well + final String[] segments = URI.create(urlString).getPath().split("/"); + final String expectedFile = segments[segments.length - 1]; + if (fields[1].equals(expectedFile) == false) { + final String message = String.format( + Locale.ROOT, + "checksum file at [%s] is not for this plugin, expected [%s] but was [%s]", + checksumUrl, + expectedFile, + fields[1] + ); + throw new UserException(ExitCodes.IO_ERROR, message); + } + } + if (checksumReader.readLine() != null) { + throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); + } + } + } + + // read the bytes of the plugin zip in chunks to avoid out of memory errors + try (InputStream zis = Files.newInputStream(zip)) { + try { + final MessageDigest digest = MessageDigest.getInstance(digestAlgo); + final byte[] bytes = new byte[8192]; + int read; + while ((read = zis.read(bytes)) != -1) { + assert read > 0 : read; + digest.update(bytes, 0, read); + } + final String actualChecksum = MessageDigests.toHexString(digest.digest()); + if (expectedChecksum.equals(actualChecksum) == false) { + throw new UserException( + ExitCodes.IO_ERROR, + digestAlgo + " mismatch, expected " + expectedChecksum + " but got " + actualChecksum + ); + } + } catch (final NoSuchAlgorithmException e) { + // this should never happen as we are using SHA-1 and SHA-512 here + throw new AssertionError(e); + } + } + + if (officialPlugin) { + verifySignature(zip, urlString); + } + + return zip; + } + + /** + * Verify the signature of the downloaded plugin ZIP. The signature is obtained from the source of the downloaded plugin by appending + * ".asc" to the URL. It is expected that the plugin is signed with the Elastic signing key with ID D27D666CD88E42B4. + * + * @param zip the path to the downloaded plugin ZIP + * @param urlString the URL source of the downloade plugin ZIP + * @throws IOException if an I/O exception occurs reading from various input streams + * @throws PGPException if the PGP implementation throws an internal exception during verification + */ + void verifySignature(final Path zip, final String urlString) throws IOException, PGPException { + final String ascUrlString = urlString + ".asc"; + final URL ascUrl = openUrl(ascUrlString); + try ( + // fin is a file stream over the downloaded plugin zip whose signature to verify + InputStream fin = pluginZipInputStream(zip); + // sin is a URL stream to the signature corresponding to the downloaded plugin zip + InputStream sin = urlOpenStream(ascUrl); + // ain is a input stream to the public key in ASCII-Armor format (RFC4880) + InputStream ain = new ArmoredInputStream(getPublicKey()) + ) { + final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin)); + final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0); + + // validate the signature has key ID matching our public key ID + final String keyId = Long.toHexString(signature.getKeyID()).toUpperCase(Locale.ROOT); + if (getPublicKeyId().equals(keyId) == false) { + throw new IllegalStateException("key id [" + keyId + "] does not match expected key id [" + getPublicKeyId() + "]"); + } + + // compute the signature of the downloaded plugin zip + final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator()); + final PGPPublicKey key = collection.getPublicKey(signature.getKeyID()); + signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleFipsProvider()), key); + final byte[] buffer = new byte[1024]; + int read; + while ((read = fin.read(buffer)) != -1) { + signature.update(buffer, 0, read); + } + + // finally we verify the signature of the downloaded plugin zip matches the expected signature + if (signature.verify() == false) { + throw new IllegalStateException("signature verification for [" + urlString + "] failed"); + } + } + } + + /** + * An input stream to the raw bytes of the plugin ZIP. + * + * @param zip the path to the downloaded plugin ZIP + * @return an input stream to the raw bytes of the plugin ZIP. + * @throws IOException if an I/O exception occurs preparing the input stream + */ + InputStream pluginZipInputStream(final Path zip) throws IOException { + return Files.newInputStream(zip); + } + + /** + * Return the public key ID of the signing key that is expected to have signed the official plugin. + * + * @return the public key ID + */ + String getPublicKeyId() { + return "D27D666CD88E42B4"; + } + + /** + * An input stream to the public key of the signing key. + * + * @return an input stream to the public key + */ + InputStream getPublicKey() { + return InstallPluginAction.class.getResourceAsStream("/public_key.asc"); + } + + /** + * Creates a URL and opens a connection. + *

+ * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. + */ + // pkg private for tests + URL openUrl(String urlString) throws IOException { + URL checksumUrl = new URL(urlString); + HttpURLConnection connection = (HttpURLConnection) checksumUrl.openConnection(); + if (connection.getResponseCode() == 404) { + return null; + } + return checksumUrl; + } + + private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException { + // unzip plugin to a staging temp dir + + final Path target = stagingDirectory(pluginsDir); + pathsToDeleteOnShutdown.add(target); + + try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) { + ZipEntry entry; + byte[] buffer = new byte[8192]; + while ((entry = zipInput.getNextEntry()) != null) { + if (entry.getName().startsWith("elasticsearch/")) { + throw new UserException( + PLUGIN_MALFORMED, + "This plugin was built with an older plugin structure." + + " Contact the plugin author to remove the intermediate \"elasticsearch\" directory within the plugin zip." + ); + } + Path targetFile = target.resolve(entry.getName()); + + // Using the entry name as a path can result in an entry outside of the plugin dir, + // either if the name starts with the root of the filesystem, or it is a relative + // entry like ../whatever. This check attempts to identify both cases by first + // normalizing the path (which removes foo/..) and ensuring the normalized entry + // is still rooted with the target plugin directory. + if (targetFile.normalize().startsWith(target) == false) { + throw new UserException( + PLUGIN_MALFORMED, + "Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory" + ); + } + + // be on the safe side: do not rely on that directories are always extracted + // before their children (although this makes sense, but is it guaranteed?) + if (Files.isSymbolicLink(targetFile.getParent()) == false) { + Files.createDirectories(targetFile.getParent()); + } + if (entry.isDirectory() == false) { + try (OutputStream out = Files.newOutputStream(targetFile)) { + int len; + while ((len = zipInput.read(buffer)) >= 0) { + out.write(buffer, 0, len); + } + } + } + zipInput.closeEntry(); + } + } catch (UserException e) { + IOUtils.rm(target); + throw e; + } + Files.delete(zip); + return target; + } + + private Path stagingDirectory(Path pluginsDir) throws IOException { + try { + return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(PLUGIN_DIR_PERMS)); + } catch (UnsupportedOperationException e) { + return stagingDirectoryWithoutPosixPermissions(pluginsDir); + } + } + + private Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOException { + return Files.createTempDirectory(pluginsDir, ".installing-"); + } + + // checking for existing version of the plugin + private void verifyPluginName(Path pluginPath, String pluginName) throws UserException, IOException { + // don't let user install plugin conflicting with module... + // they might be unavoidably in maven central and are packaged up the same way) + if (MODULES.contains(pluginName)) { + throw new UserException(ExitCodes.USAGE, "plugin '" + pluginName + "' cannot be installed as a plugin, it is a system module"); + } + + final Path destination = pluginPath.resolve(pluginName); + if (Files.exists(destination)) { + final String message = String.format( + Locale.ROOT, + "plugin directory [%s] already exists; if you need to update the plugin, " + "uninstall it first using command 'remove %s'", + destination, + pluginName + ); + throw new UserException(PLUGIN_EXISTS, message); + } + } + + /** + * Load information about the plugin, and verify it can be installed with no errors. + */ + private PluginInfo loadPluginInfo(Path pluginRoot) throws Exception { + final PluginInfo info = PluginInfo.readFromProperties(pluginRoot); + if (info.hasNativeController()) { + throw new IllegalStateException("plugins can not have native controllers"); + } + PluginsService.verifyCompatibility(info); + + // checking for existing version of the plugin + verifyPluginName(env.pluginsFile(), info.getName()); + + PluginsService.checkForFailedPluginRemovals(env.pluginsFile()); + + terminal.println(VERBOSE, info.toString()); + + // check for jar hell before any copying + jarHellCheck(info, pluginRoot, env.pluginsFile(), env.modulesFile()); + + return info; + } + + private static final String LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR; + + static { + LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR = String.format(Locale.ROOT, ".+%1$slib%1$stools%1$splugin-cli%1$s[^%1$s]+\\.jar", "(/|\\\\)"); + } + + /** + * check a candidate plugin for jar hell before installing it + */ + void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, Path modulesDir) throws Exception { + // create list of current jars in classpath + final Set classpath = JarHell.parseClassPath().stream().filter(url -> { + try { + return url.toURI().getPath().matches(LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR) == false; + } catch (final URISyntaxException e) { + throw new AssertionError(e); + } + }).collect(Collectors.toSet()); + + // read existing bundles. this does some checks on the installation too. + Set bundles = new HashSet<>(PluginsService.getPluginBundles(pluginsDir)); + bundles.addAll(PluginsService.getModuleBundles(modulesDir)); + bundles.add(new PluginsService.Bundle(candidateInfo, candidateDir)); + List sortedBundles = PluginsService.sortBundles(bundles); + + // check jarhell of all plugins so we know this plugin and anything depending on it are ok together + // TODO: optimize to skip any bundles not connected to the candidate plugin? + Map> transitiveUrls = new HashMap<>(); + for (PluginsService.Bundle bundle : sortedBundles) { + PluginsService.checkBundleJarHell(classpath, bundle, transitiveUrls); + } + + // TODO: no jars should be an error + // TODO: verify the classname exists in one of the jars! + } + + /** + * Installs the plugin from {@code tmpRoot} into the plugins dir. + * If the plugin has a bin dir and/or a config dir, those are moved. + */ + private PluginInfo installPlugin(Path tmpRoot, List deleteOnFailure) throws Exception { + final PluginInfo info = loadPluginInfo(tmpRoot); + checkCanInstallationProceed(terminal, Build.CURRENT.flavor(), info); + PluginPolicyInfo pluginPolicy = PolicyUtil.getPluginPolicyInfo(tmpRoot, env.tmpFile()); + if (pluginPolicy != null) { + Set permissions = PluginSecurity.getPermissionDescriptions(pluginPolicy, env.tmpFile()); + PluginSecurity.confirmPolicyExceptions(terminal, permissions, batch); + } + + final Path destination = env.pluginsFile().resolve(info.getName()); + deleteOnFailure.add(destination); + + installPluginSupportFiles( + info, + tmpRoot, + env.binFile().resolve(info.getName()), + env.configFile().resolve(info.getName()), + deleteOnFailure + ); + movePlugin(tmpRoot, destination); + return info; + } + + /** + * Moves bin and config directories from the plugin if they exist + */ + private void installPluginSupportFiles(PluginInfo info, Path tmpRoot, Path destBinDir, Path destConfigDir, List deleteOnFailure) + throws Exception { + Path tmpBinDir = tmpRoot.resolve("bin"); + if (Files.exists(tmpBinDir)) { + deleteOnFailure.add(destBinDir); + installBin(info, tmpBinDir, destBinDir); + } + + Path tmpConfigDir = tmpRoot.resolve("config"); + if (Files.exists(tmpConfigDir)) { + // some files may already exist, and we don't remove plugin config files on plugin removal, + // so any installed config files are left on failure too + installConfig(info, tmpConfigDir, destConfigDir); + } + } + + /** + * Moves the plugin directory into its final destination. + **/ + private void movePlugin(Path tmpRoot, Path destination) throws IOException { + Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE); + Files.walkFileTree(destination, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { + final String parentDirName = file.getParent().getFileName().toString(); + if ("bin".equals(parentDirName) + // "MacOS" is an alternative to "bin" on macOS + || (Constants.MAC_OS_X && "MacOS".equals(parentDirName))) { + setFileAttributes(file, BIN_FILES_PERMS); + } else { + setFileAttributes(file, PLUGIN_FILES_PERMS); + } + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException { + setFileAttributes(dir, PLUGIN_DIR_PERMS); + return FileVisitResult.CONTINUE; + } + }); + } + + /** + * Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. + */ + private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { + if (Files.isDirectory(tmpBinDir) == false) { + throw new UserException(PLUGIN_MALFORMED, "bin in plugin " + info.getName() + " is not a directory"); + } + Files.createDirectories(destBinDir); + setFileAttributes(destBinDir, BIN_DIR_PERMS); + + try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { + for (Path srcFile : stream) { + if (Files.isDirectory(srcFile)) { + throw new UserException( + PLUGIN_MALFORMED, + "Directories not allowed in bin dir " + "for plugin " + info.getName() + ", found " + srcFile.getFileName() + ); + } + + Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); + Files.copy(srcFile, destFile); + setFileAttributes(destFile, BIN_FILES_PERMS); + } + } + IOUtils.rm(tmpBinDir); // clean up what we just copied + } + + /** + * Copies the files from {@code tmpConfigDir} into {@code destConfigDir}. + * Any files existing in both the source and destination will be skipped. + */ + private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { + if (Files.isDirectory(tmpConfigDir) == false) { + throw new UserException(PLUGIN_MALFORMED, "config in plugin " + info.getName() + " is not a directory"); + } + + Files.createDirectories(destConfigDir); + setFileAttributes(destConfigDir, CONFIG_DIR_PERMS); + final PosixFileAttributeView destConfigDirAttributesView = Files.getFileAttributeView( + destConfigDir.getParent(), + PosixFileAttributeView.class + ); + final PosixFileAttributes destConfigDirAttributes = destConfigDirAttributesView != null + ? destConfigDirAttributesView.readAttributes() + : null; + if (destConfigDirAttributes != null) { + setOwnerGroup(destConfigDir, destConfigDirAttributes); + } + + try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { + for (Path srcFile : stream) { + if (Files.isDirectory(srcFile)) { + throw new UserException(PLUGIN_MALFORMED, "Directories not allowed in config dir for plugin " + info.getName()); + } + + Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); + if (Files.exists(destFile) == false) { + Files.copy(srcFile, destFile); + setFileAttributes(destFile, CONFIG_FILES_PERMS); + if (destConfigDirAttributes != null) { + setOwnerGroup(destFile, destConfigDirAttributes); + } + } + } + } + IOUtils.rm(tmpConfigDir); // clean up what we just copied + } + + private static void setOwnerGroup(final Path path, final PosixFileAttributes attributes) throws IOException { + Objects.requireNonNull(attributes); + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); + assert fileAttributeView != null; + fileAttributeView.setOwner(attributes.owner()); + fileAttributeView.setGroup(attributes.group()); + } + + /** + * Sets the attributes for a path iff posix attributes are supported + */ + private static void setFileAttributes(final Path path, final Set permissions) throws IOException { + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); + if (fileAttributeView != null) { + Files.setPosixFilePermissions(path, permissions); + } + } + + private final List pathsToDeleteOnShutdown = new ArrayList<>(); + + @Override + public void close() throws IOException { + IOUtils.rm(pathsToDeleteOnShutdown.toArray(new Path[pathsToDeleteOnShutdown.size()])); + } + + static void checkCanInstallationProceed(Terminal terminal, Build.Flavor flavor, PluginInfo info) throws Exception { + if (info.isLicensed() == false) { + return; + } + + if (flavor == Build.Flavor.DEFAULT) { + return; + } + + List.of( + "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", + "@ ERROR: This is a licensed plugin @", + "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", + "", + "This plugin is covered by the Elastic license, but this", + "installation of Elasticsearch is: [" + flavor + "]." + ).forEach(terminal::errorPrintln); + + throw new UserException(ExitCodes.NOPERM, "Plugin license is incompatible with [" + flavor + "] installation"); + } +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index aa7417c2e4808..7e9dd100204ed 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -11,90 +11,24 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.apache.lucene.search.spell.LevenshteinDistance; -import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.Constants; -import org.bouncycastle.bcpg.ArmoredInputStream; -import org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider; -import org.bouncycastle.openpgp.PGPException; -import org.bouncycastle.openpgp.PGPPublicKey; -import org.bouncycastle.openpgp.PGPPublicKeyRingCollection; -import org.bouncycastle.openpgp.PGPSignature; -import org.bouncycastle.openpgp.PGPSignatureList; -import org.bouncycastle.openpgp.PGPUtil; -import org.bouncycastle.openpgp.jcajce.JcaPGPObjectFactory; -import org.bouncycastle.openpgp.operator.jcajce.JcaKeyFingerprintCalculator; -import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; -import org.elasticsearch.Build; -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.PluginPolicyInfo; -import org.elasticsearch.bootstrap.PolicyUtil; import org.elasticsearch.cli.EnvironmentAwareCommand; -import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.jdk.JarHell; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.UncheckedIOException; -import java.net.HttpURLConnection; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.net.URLConnection; -import java.net.URLDecoder; -import java.nio.charset.StandardCharsets; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.StandardCopyOption; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.PosixFilePermissions; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Set; import java.util.stream.Collectors; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; - -import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin cli to install a plugin into elasticsearch. - * + *

* The install command takes a plugin id, which may be any of the following: *

    *
  • An official elasticsearch plugin name
  • *
  • Maven coordinates to a plugin zip
  • *
  • A URL to a plugin zip
  • *
- * + *

* Plugins are packaged as zip files. Each packaged plugin must contain a plugin properties file. * See {@link PluginInfo}. *

@@ -117,64 +51,9 @@ */ class InstallPluginCommand extends EnvironmentAwareCommand { - private static final String PROPERTY_STAGING_ID = "es.plugins.staging"; - - // exit codes for install - /** A plugin with the same name is already installed. */ - static final int PLUGIN_EXISTS = 1; - /** The plugin zip is not properly structured. */ - static final int PLUGIN_MALFORMED = 2; - - /** The builtin modules, which are plugins, but cannot be installed or removed. */ - private static final Set MODULES; - static { - try (var stream = InstallPluginCommand.class.getResourceAsStream("/modules.txt")) { - MODULES = Streams.readAllLines(stream).stream().map(String::trim).collect(Collectors.toUnmodifiableSet()); - } catch (final IOException e) { - throw new UncheckedIOException(e); - } - } - - /** The official plugins that can be installed simply by name. */ - static final Set OFFICIAL_PLUGINS; - static { - try (var stream = InstallPluginCommand.class.getResourceAsStream("/plugins.txt")) { - OFFICIAL_PLUGINS = Streams.readAllLines(stream).stream().map(String::trim).collect(Sets.toUnmodifiableSortedSet()); - } catch (final IOException e) { - throw new UncheckedIOException(e); - } - } - private final OptionSpec batchOption; private final OptionSpec arguments; - static final Set BIN_DIR_PERMS; - static final Set BIN_FILES_PERMS; - static final Set CONFIG_DIR_PERMS; - static final Set CONFIG_FILES_PERMS; - static final Set PLUGIN_DIR_PERMS; - static final Set PLUGIN_FILES_PERMS; - - static { - // Bin directory get chmod 755 - BIN_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-xr-x")); - - // Bin files also get chmod 755 - BIN_FILES_PERMS = BIN_DIR_PERMS; - - // Config directory get chmod 750 - CONFIG_DIR_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rwxr-x---")); - - // Config files get chmod 660 - CONFIG_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-rw----")); - - // Plugin directory get chmod 755 - PLUGIN_DIR_PERMS = BIN_DIR_PERMS; - - // Plugins files get chmod 644 - PLUGIN_FILES_PERMS = Collections.unmodifiableSet(PosixFilePermissions.fromString("rw-r--r--")); - } - InstallPluginCommand() { super("Install a plugin"); this.batchOption = parser.acceptsAll( @@ -187,7 +66,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand { @Override protected void printAdditionalHelp(Terminal terminal) { terminal.println("The following official plugins may be installed by name:"); - for (String plugin : OFFICIAL_PLUGINS) { + for (String plugin : InstallPluginAction.OFFICIAL_PLUGINS) { terminal.println(" " + plugin); } terminal.println(""); @@ -195,811 +74,13 @@ protected void printAdditionalHelp(Terminal terminal) { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - List pluginId = arguments.values(options); + List plugins = arguments.values(options) + .stream() + .map(id -> new PluginDescriptor(id, id)) + .collect(Collectors.toList()); final boolean isBatch = options.has(batchOption); - execute(terminal, pluginId, isBatch, env); - } - - // pkg private for testing - void execute(Terminal terminal, List pluginIds, boolean isBatch, Environment env) throws Exception { - if (pluginIds.isEmpty()) { - throw new UserException(ExitCodes.USAGE, "at least one plugin id is required"); - } - - final Set uniquePluginIds = new HashSet<>(); - for (final String pluginId : pluginIds) { - if (uniquePluginIds.add(pluginId) == false) { - throw new UserException(ExitCodes.USAGE, "duplicate plugin id [" + pluginId + "]"); - } - } - - final Map> deleteOnFailures = new LinkedHashMap<>(); - for (final String pluginId : pluginIds) { - terminal.println("-> Installing " + pluginId); - try { - if ("x-pack".equals(pluginId)) { - handleInstallXPack(buildFlavor()); - } - - final List deleteOnFailure = new ArrayList<>(); - deleteOnFailures.put(pluginId, deleteOnFailure); - - final Path pluginZip = download(terminal, pluginId, env.tmpFile(), isBatch); - final Path extractedZip = unzip(pluginZip, env.pluginsFile()); - deleteOnFailure.add(extractedZip); - final PluginInfo pluginInfo = installPlugin(terminal, isBatch, extractedZip, env, deleteOnFailure); - terminal.println("-> Installed " + pluginInfo.getName()); - // swap the entry by plugin id for one with the installed plugin name, it gives a cleaner error message for URL installs - deleteOnFailures.remove(pluginId); - deleteOnFailures.put(pluginInfo.getName(), deleteOnFailure); - } catch (final Exception installProblem) { - terminal.println("-> Failed installing " + pluginId); - for (final Map.Entry> deleteOnFailureEntry : deleteOnFailures.entrySet()) { - terminal.println("-> Rolling back " + deleteOnFailureEntry.getKey()); - boolean success = false; - try { - IOUtils.rm(deleteOnFailureEntry.getValue().toArray(new Path[0])); - success = true; - } catch (final IOException exceptionWhileRemovingFiles) { - final Exception exception = new Exception( - "failed rolling back installation of [" + deleteOnFailureEntry.getKey() + "]", - exceptionWhileRemovingFiles - ); - installProblem.addSuppressed(exception); - terminal.println("-> Failed rolling back " + deleteOnFailureEntry.getKey()); - } - if (success) { - terminal.println("-> Rolled back " + deleteOnFailureEntry.getKey()); - } - } - throw installProblem; - } - } - terminal.println("-> Please restart Elasticsearch to activate any plugins installed"); - } - - Build.Flavor buildFlavor() { - return Build.CURRENT.flavor(); - } - - private static void handleInstallXPack(final Build.Flavor flavor) throws UserException { - switch (flavor) { - case DEFAULT: - throw new UserException(ExitCodes.CONFIG, "this distribution of Elasticsearch contains X-Pack by default"); - case OSS: - throw new UserException( - ExitCodes.CONFIG, - "X-Pack is not available with the oss distribution; to use X-Pack features use the default distribution" - ); - case UNKNOWN: - throw new IllegalStateException("your distribution is broken"); - } - } - - /** Downloads the plugin and returns the file it was downloaded to. */ - private Path download(Terminal terminal, String pluginId, Path tmpDir, boolean isBatch) throws Exception { - if (OFFICIAL_PLUGINS.contains(pluginId)) { - final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME); - terminal.println("-> Downloading " + pluginId + " from elastic"); - return downloadAndValidate(terminal, url, tmpDir, true, isBatch); - } - - // now try as maven coordinates, a valid URL would only have a colon and slash - String[] coordinates = pluginId.split(":"); - if (coordinates.length == 3 && pluginId.contains("/") == false && pluginId.startsWith("file:") == false) { - String mavenUrl = getMavenUrl(terminal, coordinates, Platforms.PLATFORM_NAME); - terminal.println("-> Downloading " + pluginId + " from maven central"); - return downloadAndValidate(terminal, mavenUrl, tmpDir, false, isBatch); - } - - // fall back to plain old URL - if (pluginId.contains(":") == false) { - // definitely not a valid url, so assume it is a plugin name - List plugins = checkMisspelledPlugin(pluginId); - String msg = "Unknown plugin " + pluginId; - if (plugins.isEmpty() == false) { - msg += ", did you mean " + (plugins.size() == 1 ? "[" + plugins.get(0) + "]" : "any of " + plugins.toString()) + "?"; - } - throw new UserException(ExitCodes.USAGE, msg); - } - terminal.println("-> Downloading " + URLDecoder.decode(pluginId, "UTF-8")); - return downloadZip(terminal, pluginId, tmpDir, isBatch); - } - - // pkg private so tests can override - String getStagingHash() { - return System.getProperty(PROPERTY_STAGING_ID); - } - - boolean isSnapshot() { - return Build.CURRENT.isSnapshot(); - } - - /** Returns the url for an official elasticsearch plugin. */ - private String getElasticUrl( - final Terminal terminal, - final String stagingHash, - final Version version, - final boolean isSnapshot, - final String pluginId, - final String platform - ) throws IOException, UserException { - final String baseUrl; - if (isSnapshot && stagingHash == null) { - throw new UserException( - ExitCodes.CONFIG, - "attempted to install release build of official plugin on snapshot build of Elasticsearch" - ); - } - if (stagingHash != null) { - if (isSnapshot) { - baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId); - } else { - baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId); - } - } else { - baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); - } - final String platformUrl = String.format( - Locale.ROOT, - "%s/%s-%s-%s.zip", - baseUrl, - pluginId, - platform, - Build.CURRENT.getQualifiedVersion() - ); - if (urlExists(terminal, platformUrl)) { - return platformUrl; - } - return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.CURRENT.getQualifiedVersion()); - } - - private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { - return String.format( - Locale.ROOT, - "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", - hostname, - version, - stagingHash, - pluginId - ); - } - - /** Returns the url for an elasticsearch plugin in maven. */ - private String getMavenUrl(Terminal terminal, String[] coordinates, String platform) throws IOException { - final String groupId = coordinates[0].replace(".", "/"); - final String artifactId = coordinates[1]; - final String version = coordinates[2]; - final String baseUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%s/%s/%s", groupId, artifactId, version); - final String platformUrl = String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, artifactId, platform, version); - if (urlExists(terminal, platformUrl)) { - return platformUrl; - } - return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, artifactId, version); - } - - /** - * Returns {@code true} if the given url exists, and {@code false} otherwise. - * - * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. - */ - // pkg private for tests to manipulate - @SuppressForbidden(reason = "Make HEAD request using URLConnection.connect()") - boolean urlExists(Terminal terminal, String urlString) throws IOException { - terminal.println(VERBOSE, "Checking if url exists: " + urlString); - URL url = new URL(urlString); - assert "https".equals(url.getProtocol()) : "Only http urls can be checked"; - HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection(); - urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); - urlConnection.setRequestMethod("HEAD"); - urlConnection.connect(); - return urlConnection.getResponseCode() == 200; - } - - /** Returns all the official plugin names that look similar to pluginId. **/ - private List checkMisspelledPlugin(String pluginId) { - LevenshteinDistance ld = new LevenshteinDistance(); - List> scoredKeys = new ArrayList<>(); - for (String officialPlugin : OFFICIAL_PLUGINS) { - float distance = ld.getDistance(pluginId, officialPlugin); - if (distance > 0.7f) { - scoredKeys.add(new Tuple<>(distance, officialPlugin)); - } - } - CollectionUtil.timSort(scoredKeys, (a, b) -> b.v1().compareTo(a.v1())); - return scoredKeys.stream().map((a) -> a.v2()).collect(Collectors.toList()); - } - - /** Downloads a zip from the url, into a temp file under the given temp dir. */ - // pkg private for tests - @SuppressForbidden(reason = "We use getInputStream to download plugins") - Path downloadZip(Terminal terminal, String urlString, Path tmpDir, boolean isBatch) throws IOException { - terminal.println(VERBOSE, "Retrieving zip from " + urlString); - URL url = new URL(urlString); - Path zip = Files.createTempFile(tmpDir, null, ".zip"); - URLConnection urlConnection = url.openConnection(); - urlConnection.addRequestProperty("User-Agent", "elasticsearch-plugin-installer"); - try ( - InputStream in = isBatch - ? urlConnection.getInputStream() - : new TerminalProgressInputStream(urlConnection.getInputStream(), urlConnection.getContentLength(), terminal) - ) { - // must overwrite since creating the temp file above actually created the file - Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING); - } - return zip; - } - - /** - * content length might be -1 for unknown and progress only makes sense if the content length is greater than 0 - */ - private class TerminalProgressInputStream extends ProgressInputStream { - - private final Terminal terminal; - private int width = 50; - private final boolean enabled; - - TerminalProgressInputStream(InputStream is, int expectedTotalSize, Terminal terminal) { - super(is, expectedTotalSize); - this.terminal = terminal; - this.enabled = expectedTotalSize > 0; - } - - @Override - public void onProgress(int percent) { - if (enabled) { - int currentPosition = percent * width / 100; - StringBuilder sb = new StringBuilder("\r["); - sb.append(String.join("=", Collections.nCopies(currentPosition, ""))); - if (currentPosition > 0 && percent < 100) { - sb.append(">"); - } - sb.append(String.join(" ", Collections.nCopies(width - currentPosition, ""))); - sb.append("] %s   "); - if (percent == 100) { - sb.append("\n"); - } - terminal.print(Terminal.Verbosity.NORMAL, String.format(Locale.ROOT, sb.toString(), percent + "%")); - } - } - } - - @SuppressForbidden(reason = "URL#openStream") - private InputStream urlOpenStream(final URL url) throws IOException { - return url.openStream(); - } - - /** - * Downloads a ZIP from the URL. This method also validates the downloaded plugin ZIP via the following means: - *

    - *
  • - * For an official plugin we download the SHA-512 checksum and validate the integrity of the downloaded ZIP. We also download the - * armored signature and validate the authenticity of the downloaded ZIP. - *
  • - *
  • - * For a non-official plugin we download the SHA-512 checksum and fallback to the SHA-1 checksum and validate the integrity of the - * downloaded ZIP. - *
  • - *
- * - * @param terminal a terminal to log messages to - * @param urlString the URL of the plugin ZIP - * @param tmpDir a temporary directory to write downloaded files to - * @param officialPlugin true if the plugin is an official plugin - * @param isBatch true if the install is running in batch mode - * @return the path to the downloaded plugin ZIP - * @throws IOException if an I/O exception occurs download or reading files and resources - * @throws PGPException if an exception occurs verifying the downloaded ZIP signature - * @throws UserException if checksum validation fails - */ - private Path downloadAndValidate( - final Terminal terminal, - final String urlString, - final Path tmpDir, - final boolean officialPlugin, - boolean isBatch - ) throws IOException, PGPException, UserException { - Path zip = downloadZip(terminal, urlString, tmpDir, isBatch); - pathsToDeleteOnShutdown.add(zip); - String checksumUrlString = urlString + ".sha512"; - URL checksumUrl = openUrl(checksumUrlString); - String digestAlgo = "SHA-512"; - if (checksumUrl == null && officialPlugin == false) { - // fallback to sha1, until 7.0, but with warning - terminal.println( - "Warning: sha512 not found, falling back to sha1. This behavior is deprecated and will be removed in a " - + "future release. Please update the plugin to use a sha512 checksum." - ); - checksumUrlString = urlString + ".sha1"; - checksumUrl = openUrl(checksumUrlString); - digestAlgo = "SHA-1"; - } - if (checksumUrl == null) { - throw new UserException(ExitCodes.IO_ERROR, "Plugin checksum missing: " + checksumUrlString); - } - final String expectedChecksum; - try (InputStream in = urlOpenStream(checksumUrl)) { - /* - * The supported format of the SHA-1 files is a single-line file containing the SHA-1. The supported format of the SHA-512 files - * is a single-line file containing the SHA-512 and the filename, separated by two spaces. For SHA-1, we verify that the hash - * matches, and that the file contains a single line. For SHA-512, we verify that the hash and the filename match, and that the - * file contains a single line. - */ - if (digestAlgo.equals("SHA-1")) { - final BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); - expectedChecksum = checksumReader.readLine(); - if (checksumReader.readLine() != null) { - throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); - } - } else { - final BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); - final String checksumLine = checksumReader.readLine(); - final String[] fields = checksumLine.split(" {2}"); - if (officialPlugin && fields.length != 2 || officialPlugin == false && fields.length > 2) { - throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); - } - expectedChecksum = fields[0]; - if (fields.length == 2) { - // checksum line contains filename as well - final String[] segments = URI.create(urlString).getPath().split("/"); - final String expectedFile = segments[segments.length - 1]; - if (fields[1].equals(expectedFile) == false) { - final String message = String.format( - Locale.ROOT, - "checksum file at [%s] is not for this plugin, expected [%s] but was [%s]", - checksumUrl, - expectedFile, - fields[1] - ); - throw new UserException(ExitCodes.IO_ERROR, message); - } - } - if (checksumReader.readLine() != null) { - throw new UserException(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); - } - } - } - - // read the bytes of the plugin zip in chunks to avoid out of memory errors - try (InputStream zis = Files.newInputStream(zip)) { - try { - final MessageDigest digest = MessageDigest.getInstance(digestAlgo); - final byte[] bytes = new byte[8192]; - int read; - while ((read = zis.read(bytes)) != -1) { - assert read > 0 : read; - digest.update(bytes, 0, read); - } - final String actualChecksum = MessageDigests.toHexString(digest.digest()); - if (expectedChecksum.equals(actualChecksum) == false) { - throw new UserException( - ExitCodes.IO_ERROR, - digestAlgo + " mismatch, expected " + expectedChecksum + " but got " + actualChecksum - ); - } - } catch (final NoSuchAlgorithmException e) { - // this should never happen as we are using SHA-1 and SHA-512 here - throw new AssertionError(e); - } - } - - if (officialPlugin) { - verifySignature(zip, urlString); - } - - return zip; - } - - /** - * Verify the signature of the downloaded plugin ZIP. The signature is obtained from the source of the downloaded plugin by appending - * ".asc" to the URL. It is expected that the plugin is signed with the Elastic signing key with ID D27D666CD88E42B4. - * - * @param zip the path to the downloaded plugin ZIP - * @param urlString the URL source of the downloade plugin ZIP - * @throws IOException if an I/O exception occurs reading from various input streams - * @throws PGPException if the PGP implementation throws an internal exception during verification - */ - void verifySignature(final Path zip, final String urlString) throws IOException, PGPException { - final String ascUrlString = urlString + ".asc"; - final URL ascUrl = openUrl(ascUrlString); - try ( - // fin is a file stream over the downloaded plugin zip whose signature to verify - InputStream fin = pluginZipInputStream(zip); - // sin is a URL stream to the signature corresponding to the downloaded plugin zip - InputStream sin = urlOpenStream(ascUrl); - // ain is a input stream to the public key in ASCII-Armor format (RFC4880) - InputStream ain = new ArmoredInputStream(getPublicKey()) - ) { - final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin)); - final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0); - - // validate the signature has key ID matching our public key ID - final String keyId = Long.toHexString(signature.getKeyID()).toUpperCase(Locale.ROOT); - if (getPublicKeyId().equals(keyId) == false) { - throw new IllegalStateException("key id [" + keyId + "] does not match expected key id [" + getPublicKeyId() + "]"); - } - - // compute the signature of the downloaded plugin zip - final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator()); - final PGPPublicKey key = collection.getPublicKey(signature.getKeyID()); - signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleFipsProvider()), key); - final byte[] buffer = new byte[1024]; - int read; - while ((read = fin.read(buffer)) != -1) { - signature.update(buffer, 0, read); - } - - // finally we verify the signature of the downloaded plugin zip matches the expected signature - if (signature.verify() == false) { - throw new IllegalStateException("signature verification for [" + urlString + "] failed"); - } - } - } - - /** - * An input stream to the raw bytes of the plugin ZIP. - * - * @param zip the path to the downloaded plugin ZIP - * @return an input stream to the raw bytes of the plugin ZIP. - * @throws IOException if an I/O exception occurs preparing the input stream - */ - InputStream pluginZipInputStream(final Path zip) throws IOException { - return Files.newInputStream(zip); - } - - /** - * Return the public key ID of the signing key that is expected to have signed the official plugin. - * - * @return the public key ID - */ - String getPublicKeyId() { - return "D27D666CD88E42B4"; - } - - /** - * An input stream to the public key of the signing key. - * - * @return an input stream to the public key - */ - InputStream getPublicKey() { - return InstallPluginCommand.class.getResourceAsStream("/public_key.asc"); - } - - /** - * Creates a URL and opens a connection. - * - * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. - */ - // pkg private for tests - URL openUrl(String urlString) throws IOException { - URL checksumUrl = new URL(urlString); - HttpURLConnection connection = (HttpURLConnection) checksumUrl.openConnection(); - if (connection.getResponseCode() == 404) { - return null; - } - return checksumUrl; - } - - private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException { - // unzip plugin to a staging temp dir - - final Path target = stagingDirectory(pluginsDir); - pathsToDeleteOnShutdown.add(target); - - try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) { - ZipEntry entry; - byte[] buffer = new byte[8192]; - while ((entry = zipInput.getNextEntry()) != null) { - if (entry.getName().startsWith("elasticsearch/")) { - throw new UserException( - PLUGIN_MALFORMED, - "This plugin was built with an older plugin structure." - + " Contact the plugin author to remove the intermediate \"elasticsearch\" directory within the plugin zip." - ); - } - Path targetFile = target.resolve(entry.getName()); - - // Using the entry name as a path can result in an entry outside of the plugin dir, - // either if the name starts with the root of the filesystem, or it is a relative - // entry like ../whatever. This check attempts to identify both cases by first - // normalizing the path (which removes foo/..) and ensuring the normalized entry - // is still rooted with the target plugin directory. - if (targetFile.normalize().startsWith(target) == false) { - throw new UserException( - PLUGIN_MALFORMED, - "Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory" - ); - } - - // be on the safe side: do not rely on that directories are always extracted - // before their children (although this makes sense, but is it guaranteed?) - if (Files.isSymbolicLink(targetFile.getParent()) == false) { - Files.createDirectories(targetFile.getParent()); - } - if (entry.isDirectory() == false) { - try (OutputStream out = Files.newOutputStream(targetFile)) { - int len; - while ((len = zipInput.read(buffer)) >= 0) { - out.write(buffer, 0, len); - } - } - } - zipInput.closeEntry(); - } - } catch (UserException e) { - IOUtils.rm(target); - throw e; - } - Files.delete(zip); - return target; - } - - private Path stagingDirectory(Path pluginsDir) throws IOException { - try { - return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(PLUGIN_DIR_PERMS)); - } catch (UnsupportedOperationException e) { - return stagingDirectoryWithoutPosixPermissions(pluginsDir); - } - } - - private Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOException { - return Files.createTempDirectory(pluginsDir, ".installing-"); - } - - // checking for existing version of the plugin - private void verifyPluginName(Path pluginPath, String pluginName) throws UserException, IOException { - // don't let user install plugin conflicting with module... - // they might be unavoidably in maven central and are packaged up the same way) - if (MODULES.contains(pluginName)) { - throw new UserException(ExitCodes.USAGE, "plugin '" + pluginName + "' cannot be installed as a plugin, it is a system module"); - } - - final Path destination = pluginPath.resolve(pluginName); - if (Files.exists(destination)) { - final String message = String.format( - Locale.ROOT, - "plugin directory [%s] already exists; if you need to update the plugin, " + "uninstall it first using command 'remove %s'", - destination, - pluginName - ); - throw new UserException(PLUGIN_EXISTS, message); - } - } - - /** Load information about the plugin, and verify it can be installed with no errors. */ - private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, Environment env) throws Exception { - final PluginInfo info = PluginInfo.readFromProperties(pluginRoot); - if (info.hasNativeController()) { - throw new IllegalStateException("plugins can not have native controllers"); - } - PluginsService.verifyCompatibility(info); - - // checking for existing version of the plugin - verifyPluginName(env.pluginsFile(), info.getName()); - - PluginsService.checkForFailedPluginRemovals(env.pluginsFile()); - - terminal.println(VERBOSE, info.toString()); - - // check for jar hell before any copying - jarHellCheck(info, pluginRoot, env.pluginsFile(), env.modulesFile()); - - return info; - } - - private static final String LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR; - - static { - LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR = String.format(Locale.ROOT, ".+%1$slib%1$stools%1$splugin-cli%1$s[^%1$s]+\\.jar", "(/|\\\\)"); - } - - /** check a candidate plugin for jar hell before installing it */ - void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, Path modulesDir) throws Exception { - // create list of current jars in classpath - final Set classpath = JarHell.parseClassPath().stream().filter(url -> { - try { - return url.toURI().getPath().matches(LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR) == false; - } catch (final URISyntaxException e) { - throw new AssertionError(e); - } - }).collect(Collectors.toSet()); - - // read existing bundles. this does some checks on the installation too. - Set bundles = new HashSet<>(PluginsService.getPluginBundles(pluginsDir)); - bundles.addAll(PluginsService.getModuleBundles(modulesDir)); - bundles.add(new PluginsService.Bundle(candidateInfo, candidateDir)); - List sortedBundles = PluginsService.sortBundles(bundles); - - // check jarhell of all plugins so we know this plugin and anything depending on it are ok together - // TODO: optimize to skip any bundles not connected to the candidate plugin? - Map> transitiveUrls = new HashMap<>(); - for (PluginsService.Bundle bundle : sortedBundles) { - PluginsService.checkBundleJarHell(classpath, bundle, transitiveUrls); - } - - // TODO: no jars should be an error - // TODO: verify the classname exists in one of the jars! - } - - /** - * Installs the plugin from {@code tmpRoot} into the plugins dir. - * If the plugin has a bin dir and/or a config dir, those are moved. - */ - private PluginInfo installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) - throws Exception { - final PluginInfo info = loadPluginInfo(terminal, tmpRoot, env); - checkCanInstallationProceed(terminal, Build.CURRENT.flavor(), info); - PluginPolicyInfo pluginPolicy = PolicyUtil.getPluginPolicyInfo(tmpRoot, env.tmpFile()); - if (pluginPolicy != null) { - Set permissions = PluginSecurity.getPermissionDescriptions(pluginPolicy, env.tmpFile()); - PluginSecurity.confirmPolicyExceptions(terminal, permissions, isBatch); - } - - final Path destination = env.pluginsFile().resolve(info.getName()); - deleteOnFailure.add(destination); - - installPluginSupportFiles( - info, - tmpRoot, - env.binFile().resolve(info.getName()), - env.configFile().resolve(info.getName()), - deleteOnFailure - ); - movePlugin(tmpRoot, destination); - return info; - } - - /** Moves bin and config directories from the plugin if they exist */ - private void installPluginSupportFiles(PluginInfo info, Path tmpRoot, Path destBinDir, Path destConfigDir, List deleteOnFailure) - throws Exception { - Path tmpBinDir = tmpRoot.resolve("bin"); - if (Files.exists(tmpBinDir)) { - deleteOnFailure.add(destBinDir); - installBin(info, tmpBinDir, destBinDir); - } - - Path tmpConfigDir = tmpRoot.resolve("config"); - if (Files.exists(tmpConfigDir)) { - // some files may already exist, and we don't remove plugin config files on plugin removal, - // so any installed config files are left on failure too - installConfig(info, tmpConfigDir, destConfigDir); - } - } - - /** Moves the plugin directory into its final destination. **/ - private void movePlugin(Path tmpRoot, Path destination) throws IOException { - Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE); - Files.walkFileTree(destination, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { - final String parentDirName = file.getParent().getFileName().toString(); - if ("bin".equals(parentDirName) - // "MacOS" is an alternative to "bin" on macOS - || (Constants.MAC_OS_X && "MacOS".equals(parentDirName))) { - setFileAttributes(file, BIN_FILES_PERMS); - } else { - setFileAttributes(file, PLUGIN_FILES_PERMS); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException { - setFileAttributes(dir, PLUGIN_DIR_PERMS); - return FileVisitResult.CONTINUE; - } - }); - } - - /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ - private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { - if (Files.isDirectory(tmpBinDir) == false) { - throw new UserException(PLUGIN_MALFORMED, "bin in plugin " + info.getName() + " is not a directory"); - } - Files.createDirectories(destBinDir); - setFileAttributes(destBinDir, BIN_DIR_PERMS); - - try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { - for (Path srcFile : stream) { - if (Files.isDirectory(srcFile)) { - throw new UserException( - PLUGIN_MALFORMED, - "Directories not allowed in bin dir " + "for plugin " + info.getName() + ", found " + srcFile.getFileName() - ); - } - - Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); - Files.copy(srcFile, destFile); - setFileAttributes(destFile, BIN_FILES_PERMS); - } - } - IOUtils.rm(tmpBinDir); // clean up what we just copied - } - - /** - * Copies the files from {@code tmpConfigDir} into {@code destConfigDir}. - * Any files existing in both the source and destination will be skipped. - */ - private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { - if (Files.isDirectory(tmpConfigDir) == false) { - throw new UserException(PLUGIN_MALFORMED, "config in plugin " + info.getName() + " is not a directory"); - } - - Files.createDirectories(destConfigDir); - setFileAttributes(destConfigDir, CONFIG_DIR_PERMS); - final PosixFileAttributeView destConfigDirAttributesView = Files.getFileAttributeView( - destConfigDir.getParent(), - PosixFileAttributeView.class - ); - final PosixFileAttributes destConfigDirAttributes = destConfigDirAttributesView != null - ? destConfigDirAttributesView.readAttributes() - : null; - if (destConfigDirAttributes != null) { - setOwnerGroup(destConfigDir, destConfigDirAttributes); - } - - try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { - for (Path srcFile : stream) { - if (Files.isDirectory(srcFile)) { - throw new UserException(PLUGIN_MALFORMED, "Directories not allowed in config dir for plugin " + info.getName()); - } - - Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); - if (Files.exists(destFile) == false) { - Files.copy(srcFile, destFile); - setFileAttributes(destFile, CONFIG_FILES_PERMS); - if (destConfigDirAttributes != null) { - setOwnerGroup(destFile, destConfigDirAttributes); - } - } - } - } - IOUtils.rm(tmpConfigDir); // clean up what we just copied - } - - private static void setOwnerGroup(final Path path, final PosixFileAttributes attributes) throws IOException { - Objects.requireNonNull(attributes); - PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); - assert fileAttributeView != null; - fileAttributeView.setOwner(attributes.owner()); - fileAttributeView.setGroup(attributes.group()); - } - - /** - * Sets the attributes for a path iff posix attributes are supported - */ - private static void setFileAttributes(final Path path, final Set permissions) throws IOException { - PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); - if (fileAttributeView != null) { - Files.setPosixFilePermissions(path, permissions); - } - } - - private final List pathsToDeleteOnShutdown = new ArrayList<>(); - - @Override - public void close() throws IOException { - IOUtils.rm(pathsToDeleteOnShutdown.toArray(new Path[pathsToDeleteOnShutdown.size()])); - } - - static void checkCanInstallationProceed(Terminal terminal, Build.Flavor flavor, PluginInfo info) throws Exception { - if (info.isLicensed() == false) { - return; - } - - if (flavor == Build.Flavor.DEFAULT) { - return; - } - - List.of( - "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", - "@ ERROR: This is a licensed plugin @", - "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", - "", - "This plugin is covered by the Elastic license, but this", - "installation of Elasticsearch is: [" + flavor + "]." - ).forEach(terminal::errorPrintln); - throw new UserException(ExitCodes.NOPERM, "Plugin license is incompatible with [" + flavor + "] installation"); + InstallPluginAction action = new InstallPluginAction(terminal, env, isBatch); + action.execute(plugins); } } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java new file mode 100644 index 0000000000000..fed84d1b53605 --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins; + +import java.util.Objects; + +public class PluginDescriptor { + private String id; + private String url; + private String proxy; + + public PluginDescriptor() {} + + public PluginDescriptor(String id, String url, String proxy) { + this.id = id; + this.url = url; + this.proxy = proxy; + } + + public PluginDescriptor(String id, String url) { + this(id, url, null); + } + + public PluginDescriptor(String id) { + this(id, null, null); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + + public String getProxy() { + return proxy; + } + + public void setProxy(String proxy) { + this.proxy = proxy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PluginDescriptor that = (PluginDescriptor) o; + return id.equals(that.id) && Objects.equals(url, that.url) && Objects.equals(proxy, that.proxy); + } + + @Override + public int hashCode() { + return Objects.hash(id, url, proxy); + } +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginAction.java new file mode 100644 index 0000000000000..c1c40fb7eaf08 --- /dev/null +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginAction.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.StringJoiner; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; + +/** + * An action for the plugin CLI to remove plugins from Elasticsearch. + */ +class RemovePluginAction { + + // exit codes for remove + /** A plugin cannot be removed because it is extended by another plugin. */ + static final int PLUGIN_STILL_USED = 11; + + private final Terminal terminal; + private final Environment env; + private final boolean purge; + + /** + * Creates a new action. + * + * @param terminal the terminal to use for input/output + * @param env the environment for the local node + * @param purge if true, plugin configuration files will be removed but otherwise preserved + */ + RemovePluginAction(Terminal terminal, Environment env, boolean purge) { + this.terminal = terminal; + this.env = env; + this.purge = purge; + } + + /** + * Remove the plugin specified by {@code pluginName}. + * + * @param plugins the IDs of the plugins to remove + * @throws IOException if any I/O exception occurs while performing a file operation + * @throws UserException if plugins is null or empty + * @throws UserException if plugin directory does not exist + * @throws UserException if the plugin bin directory is not a directory + */ + void execute(List plugins) throws IOException, UserException { + if (plugins == null || plugins.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "At least one plugin ID is required"); + } + + ensurePluginsNotUsedByOtherPlugins(plugins); + + for (PluginDescriptor plugin : plugins) { + checkCanRemove(plugin); + } + + for (PluginDescriptor plugin : plugins) { + removePlugin(plugin); + } + } + + private void ensurePluginsNotUsedByOtherPlugins(List plugins) throws IOException, UserException { + // First make sure nothing extends this plugin + final Map> usedBy = new HashMap<>(); + Set bundles = PluginsService.getPluginBundles(env.pluginsFile()); + for (PluginsService.Bundle bundle : bundles) { + for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { + for (PluginDescriptor plugin : plugins) { + String pluginId = plugin.getId(); + if (extendedPlugin.equals(pluginId)) { + usedBy.computeIfAbsent(bundle.plugin.getName(), (_key -> new ArrayList<>())).add(pluginId); + } + } + } + } + if (usedBy.isEmpty()) { + return; + } + + final StringJoiner message = new StringJoiner("\n"); + message.add("Cannot remove plugins because the following are extended by other plugins:"); + usedBy.forEach((key, value) -> { + String s = "\t" + key + " used by " + value; + message.add(s); + }); + + throw new UserException(PLUGIN_STILL_USED, message.toString()); + } + + private void checkCanRemove(PluginDescriptor plugin) throws UserException { + String pluginId = plugin.getId(); + final Path pluginDir = env.pluginsFile().resolve(pluginId); + final Path pluginConfigDir = env.configFile().resolve(pluginId); + final Path removing = env.pluginsFile().resolve(".removing-" + pluginId); + + /* + * If the plugin does not exist and the plugin config does not exist, fail to the user that the plugin is not found, unless there's + * a marker file left from a previously failed attempt in which case we proceed to clean up the marker file. Or, if the plugin does + * not exist, the plugin config does, and we are not purging, again fail to the user that the plugin is not found. + */ + if ((Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) == false && Files.exists(removing) == false) + || (Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) && this.purge == false)) { + final String message = String.format( + Locale.ROOT, + "plugin [%s] not found; run 'elasticsearch-plugin list' to get list of installed plugins", + pluginId + ); + throw new UserException(ExitCodes.CONFIG, message); + } + + final Path pluginBinDir = env.binFile().resolve(pluginId); + if (Files.exists(pluginBinDir)) { + if (Files.isDirectory(pluginBinDir) == false) { + throw new UserException(ExitCodes.IO_ERROR, "bin dir for " + pluginId + " is not a directory"); + } + } + } + + private void removePlugin(PluginDescriptor plugin) throws IOException { + final String pluginId = plugin.getId(); + final Path pluginDir = env.pluginsFile().resolve(pluginId); + final Path pluginConfigDir = env.configFile().resolve(pluginId); + final Path removing = env.pluginsFile().resolve(".removing-" + pluginId); + + terminal.println("-> removing [" + pluginId + "]..."); + + final List pluginPaths = new ArrayList<>(); + + /* + * Add the contents of the plugin directory before creating the marker file and adding it to the list of paths to be deleted so + * that the marker file is the last file to be deleted. + */ + if (Files.exists(pluginDir)) { + try (Stream paths = Files.list(pluginDir)) { + pluginPaths.addAll(paths.collect(Collectors.toList())); + } + terminal.println(VERBOSE, "removing [" + pluginDir + "]"); + } + + final Path pluginBinDir = env.binFile().resolve(pluginId); + if (Files.exists(pluginBinDir)) { + try (Stream paths = Files.list(pluginBinDir)) { + pluginPaths.addAll(paths.collect(Collectors.toList())); + } + pluginPaths.add(pluginBinDir); + terminal.println(VERBOSE, "removing [" + pluginBinDir + "]"); + } + + if (Files.exists(pluginConfigDir)) { + if (this.purge) { + try (Stream paths = Files.list(pluginConfigDir)) { + pluginPaths.addAll(paths.collect(Collectors.toList())); + } + pluginPaths.add(pluginConfigDir); + terminal.println(VERBOSE, "removing [" + pluginConfigDir + "]"); + } else { + /* + * By default we preserve the config files in case the user is upgrading the plugin, but we print a message so the user + * knows in case they want to remove manually. + */ + final String message = String.format( + Locale.ROOT, + "-> preserving plugin config files [%s] in case of upgrade; use --purge if not needed", + pluginConfigDir + ); + terminal.println(message); + } + } + + /* + * We are going to create a marker file in the plugin directory that indicates that this plugin is a state of removal. If the + * removal fails, the existence of this marker file indicates that the plugin is in a garbage state. We check for existence of this + * marker file during startup so that we do not startup with plugins in such a garbage state. Up to this point, we have not done + * anything destructive, so we create the marker file as the last action before executing destructive operations. We place this + * marker file in the root plugin directory (not the specific plugin directory) so that we do not have to create the specific plugin + * directory if it does not exist (we are purging configuration files). + */ + try { + Files.createFile(removing); + } catch (final FileAlreadyExistsException e) { + /* + * We need to suppress the marker file already existing as we could be in this state if a previous removal attempt failed and + * the user is attempting to remove the plugin again. + */ + terminal.println(VERBOSE, "marker file [" + removing + "] already exists"); + } + + // add the plugin directory + pluginPaths.add(pluginDir); + + // finally, add the marker file + pluginPaths.add(removing); + + IOUtils.rm(pluginPaths.toArray(new Path[0])); + } +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index a09a844551aad..e9a47d7ce078e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -12,38 +12,17 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.EnvironmentAwareCommand; -import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import java.io.IOException; -import java.nio.file.FileAlreadyExistsException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.StringJoiner; import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin CLI to remove plugins from Elasticsearch. */ class RemovePluginCommand extends EnvironmentAwareCommand { - - // exit codes for remove - /** A plugin cannot be removed because it is extended by another plugin. */ - static final int PLUGIN_STILL_USED = 11; - private final OptionSpec purgeOption; private final OptionSpec arguments; @@ -55,168 +34,9 @@ class RemovePluginCommand extends EnvironmentAwareCommand { @Override protected void execute(final Terminal terminal, final OptionSet options, final Environment env) throws Exception { - final List pluginIds = arguments.values(options); - final boolean purge = options.has(purgeOption); - execute(terminal, env, pluginIds, purge); - } - - /** - * Remove the plugin specified by {@code pluginName}. - * - * @param terminal the terminal to use for input/output - * @param env the environment for the local node - * @param pluginIds the IDs of the plugins to remove - * @param purge if true, plugin configuration files will be removed but otherwise preserved - * @throws IOException if any I/O exception occurs while performing a file operation - * @throws UserException if pluginIds is null or empty - * @throws UserException if plugin directory does not exist - * @throws UserException if the plugin bin directory is not a directory - */ - void execute(Terminal terminal, Environment env, List pluginIds, boolean purge) throws IOException, UserException { - if (pluginIds == null || pluginIds.isEmpty()) { - throw new UserException(ExitCodes.USAGE, "At least one plugin ID is required"); - } - - ensurePluginsNotUsedByOtherPlugins(env, pluginIds); - - for (String pluginId : pluginIds) { - checkCanRemove(env, pluginId, purge); - } - - for (String pluginId : pluginIds) { - removePlugin(env, terminal, pluginId, purge); - } - } - - private void ensurePluginsNotUsedByOtherPlugins(Environment env, List pluginIds) throws IOException, UserException { - // First make sure nothing extends this plugin - final Map> usedBy = new HashMap<>(); - Set bundles = PluginsService.getPluginBundles(env.pluginsFile()); - for (PluginsService.Bundle bundle : bundles) { - for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { - for (String pluginId : pluginIds) { - if (extendedPlugin.equals(pluginId)) { - usedBy.computeIfAbsent(bundle.plugin.getName(), (_key -> new ArrayList<>())).add(pluginId); - } - } - } - } - if (usedBy.isEmpty()) { - return; - } - - final StringJoiner message = new StringJoiner("\n"); - message.add("Cannot remove plugins because the following are extended by other plugins:"); - usedBy.forEach((key, value) -> { - String s = "\t" + key + " used by " + value; - message.add(s); - }); - - throw new UserException(PLUGIN_STILL_USED, message.toString()); - } - - private void checkCanRemove(Environment env, String pluginId, boolean purge) throws UserException { - final Path pluginDir = env.pluginsFile().resolve(pluginId); - final Path pluginConfigDir = env.configFile().resolve(pluginId); - final Path removing = env.pluginsFile().resolve(".removing-" + pluginId); - - /* - * If the plugin does not exist and the plugin config does not exist, fail to the user that the plugin is not found, unless there's - * a marker file left from a previously failed attempt in which case we proceed to clean up the marker file. Or, if the plugin does - * not exist, the plugin config does, and we are not purging, again fail to the user that the plugin is not found. - */ - if ((Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) == false && Files.exists(removing) == false) - || (Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) && purge == false)) { - final String message = String.format( - Locale.ROOT, - "plugin [%s] not found; run 'elasticsearch-plugin list' to get list of installed plugins", - pluginId - ); - throw new UserException(ExitCodes.CONFIG, message); - } - - final Path pluginBinDir = env.binFile().resolve(pluginId); - if (Files.exists(pluginBinDir)) { - if (Files.isDirectory(pluginBinDir) == false) { - throw new UserException(ExitCodes.IO_ERROR, "bin dir for " + pluginId + " is not a directory"); - } - } - } - - private void removePlugin(Environment env, Terminal terminal, String pluginId, boolean purge) throws IOException { - final Path pluginDir = env.pluginsFile().resolve(pluginId); - final Path pluginConfigDir = env.configFile().resolve(pluginId); - final Path removing = env.pluginsFile().resolve(".removing-" + pluginId); - - terminal.println("-> removing [" + pluginId + "]..."); - - final List pluginPaths = new ArrayList<>(); - - /* - * Add the contents of the plugin directory before creating the marker file and adding it to the list of paths to be deleted so - * that the marker file is the last file to be deleted. - */ - if (Files.exists(pluginDir)) { - try (Stream paths = Files.list(pluginDir)) { - pluginPaths.addAll(paths.collect(Collectors.toList())); - } - terminal.println(VERBOSE, "removing [" + pluginDir + "]"); - } - - final Path pluginBinDir = env.binFile().resolve(pluginId); - if (Files.exists(pluginBinDir)) { - try (Stream paths = Files.list(pluginBinDir)) { - pluginPaths.addAll(paths.collect(Collectors.toList())); - } - pluginPaths.add(pluginBinDir); - terminal.println(VERBOSE, "removing [" + pluginBinDir + "]"); - } - - if (Files.exists(pluginConfigDir)) { - if (purge) { - try (Stream paths = Files.list(pluginConfigDir)) { - pluginPaths.addAll(paths.collect(Collectors.toList())); - } - pluginPaths.add(pluginConfigDir); - terminal.println(VERBOSE, "removing [" + pluginConfigDir + "]"); - } else { - /* - * By default we preserve the config files in case the user is upgrading the plugin, but we print a message so the user - * knows in case they want to remove manually. - */ - final String message = String.format( - Locale.ROOT, - "-> preserving plugin config files [%s] in case of upgrade; use --purge if not needed", - pluginConfigDir - ); - terminal.println(message); - } - } - - /* - * We are going to create a marker file in the plugin directory that indicates that this plugin is a state of removal. If the - * removal fails, the existence of this marker file indicates that the plugin is in a garbage state. We check for existence of this - * marker file during startup so that we do not startup with plugins in such a garbage state. Up to this point, we have not done - * anything destructive, so we create the marker file as the last action before executing destructive operations. We place this - * marker file in the root plugin directory (not the specific plugin directory) so that we do not have to create the specific plugin - * directory if it does not exist (we are purging configuration files). - */ - try { - Files.createFile(removing); - } catch (final FileAlreadyExistsException e) { - /* - * We need to suppress the marker file already existing as we could be in this state if a previous removal attempt failed and - * the user is attempting to remove the plugin again. - */ - terminal.println(VERBOSE, "marker file [" + removing + "] already exists"); - } - - // add the plugin directory - pluginPaths.add(pluginDir); - - // finally, add the marker file - pluginPaths.add(removing); + final List plugins = arguments.values(options).stream().map(PluginDescriptor::new).collect(Collectors.toList()); - IOUtils.rm(pluginPaths.toArray(new Path[0])); + final RemovePluginAction action = new RemovePluginAction(terminal, env, options.has(purgeOption)); + action.execute(plugins); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallLicensedPluginTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallLicensedPluginTests.java index 49648bf90c6b3..876d0b1c82ca2 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallLicensedPluginTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallLicensedPluginTests.java @@ -28,18 +28,18 @@ public class InstallLicensedPluginTests extends ESTestCase { public void testUnlicensedPlugin() throws Exception { MockTerminal terminal = new MockTerminal(); PluginInfo pluginInfo = buildInfo(false); - InstallPluginCommand.checkCanInstallationProceed(terminal, Build.Flavor.OSS, pluginInfo); + InstallPluginAction.checkCanInstallationProceed(terminal, Build.Flavor.OSS, pluginInfo); } /** * Check that a licensed plugin cannot be installed on OSS. */ - public void testInstallPluginCommandOnOss() throws Exception { + public void testInstallPluginActionOnOss() throws Exception { MockTerminal terminal = new MockTerminal(); PluginInfo pluginInfo = buildInfo(true); final UserException userException = expectThrows( UserException.class, - () -> InstallPluginCommand.checkCanInstallationProceed(terminal, Build.Flavor.OSS, pluginInfo) + () -> InstallPluginAction.checkCanInstallationProceed(terminal, Build.Flavor.OSS, pluginInfo) ); assertThat(userException.exitCode, equalTo(ExitCodes.NOPERM)); @@ -49,12 +49,12 @@ public void testInstallPluginCommandOnOss() throws Exception { /** * Check that a licensed plugin cannot be installed when the distribution type is unknown. */ - public void testInstallPluginCommandOnUnknownDistribution() throws Exception { + public void testInstallPluginActionOnUnknownDistribution() throws Exception { MockTerminal terminal = new MockTerminal(); PluginInfo pluginInfo = buildInfo(true); expectThrows( UserException.class, - () -> InstallPluginCommand.checkCanInstallationProceed(terminal, Build.Flavor.UNKNOWN, pluginInfo) + () -> InstallPluginAction.checkCanInstallationProceed(terminal, Build.Flavor.UNKNOWN, pluginInfo) ); assertThat(terminal.getErrorOutput(), containsString("ERROR: This is a licensed plugin")); } @@ -62,10 +62,10 @@ public void testInstallPluginCommandOnUnknownDistribution() throws Exception { /** * Check that a licensed plugin can be installed when the distribution type is default. */ - public void testInstallPluginCommandOnDefault() throws Exception { + public void testInstallPluginActionOnDefault() throws Exception { MockTerminal terminal = new MockTerminal(); PluginInfo pluginInfo = buildInfo(true); - InstallPluginCommand.checkCanInstallationProceed(terminal, Build.Flavor.DEFAULT, pluginInfo); + InstallPluginAction.checkCanInstallationProceed(terminal, Build.Flavor.DEFAULT, pluginInfo); } private PluginInfo buildInfo(boolean isLicensed) { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginActionTests.java similarity index 83% rename from distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java rename to distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginActionTests.java index d002be80f2ee9..febc7163d26c9 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginActionTests.java @@ -78,7 +78,6 @@ import java.security.KeyPairGenerator; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -105,22 +104,22 @@ import static org.hamcrest.Matchers.startsWith; @LuceneTestCase.SuppressFileSystems("*") -public class InstallPluginCommandTests extends ESTestCase { +public class InstallPluginActionTests extends ESTestCase { - private InstallPluginCommand skipJarHellCommand; - private InstallPluginCommand defaultCommand; + private InstallPluginAction skipJarHellAction; + private InstallPluginAction defaultAction; private final Function temp; - private final MockTerminal terminal = new MockTerminal(); + private MockTerminal terminal; + private Tuple env; + private Path pluginDir; - private final FileSystem fs; private final boolean isPosix; private final boolean isReal; private final String javaIoTmpdir; @SuppressForbidden(reason = "sets java.io.tmpdir") - public InstallPluginCommandTests(FileSystem fs, Function temp) { - this.fs = fs; + public InstallPluginActionTests(FileSystem fs, Function temp) { this.temp = temp; this.isPosix = fs.supportedFileAttributeViews().contains("posix"); this.isReal = fs == PathUtils.getDefaultFileSystem(); @@ -133,22 +132,24 @@ public InstallPluginCommandTests(FileSystem fs, Function temp) { @Before public void setUp() throws Exception { super.setUp(); - skipJarHellCommand = new InstallPluginCommand() { + pluginDir = createPluginDir(temp); + terminal = new MockTerminal(); + env = createEnv(temp); + skipJarHellAction = new InstallPluginAction(terminal, null, false) { @Override - void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) throws Exception { + void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) { // no jarhell check } }; - defaultCommand = new InstallPluginCommand(); - terminal.reset(); + defaultAction = new InstallPluginAction(terminal, env.v2(), false); } @Override @After @SuppressForbidden(reason = "resets java.io.tmpdir") public void tearDown() throws Exception { - defaultCommand.close(); - skipJarHellCommand.close(); + defaultAction.close(); + skipJarHellAction.close(); System.setProperty("java.io.tmpdir", javaIoTmpdir); PathUtilsForTesting.teardown(); super.tearDown(); @@ -188,7 +189,7 @@ private static Configuration toPosix(Configuration configuration) { } /** Creates a test environment with bin, config and plugins directories. */ - static Tuple createEnv(FileSystem fs, Function temp) throws IOException { + static Tuple createEnv(Function temp) throws IOException { Path home = temp.apply("install-plugin-command-tests"); Files.createDirectories(home.resolve("bin")); Files.createFile(home.resolve("bin").resolve("elasticsearch")); @@ -200,7 +201,7 @@ static Tuple createEnv(FileSystem fs, Function return Tuple.tuple(home, TestEnvironment.newEnvironment(settings)); } - static Path createPluginDir(Function temp) throws IOException { + static Path createPluginDir(Function temp) { return temp.apply("pluginDir"); } @@ -226,8 +227,8 @@ static Path writeZip(Path structure, String prefix) throws IOException { } /** creates a plugin .zip and returns the url for testing */ - static String createPluginUrl(String name, Path structure, String... additionalProps) throws IOException { - return createPlugin(name, structure, additionalProps).toUri().toURL().toString(); + static PluginDescriptor createPluginZip(String name, Path structure, String... additionalProps) throws IOException { + return createPlugin(name, structure, additionalProps); } static void writePlugin(String name, Path structure, String... additionalProps) throws IOException { @@ -264,26 +265,32 @@ static void writePluginSecurityPolicy(Path pluginDir, String... permissions) thr Files.write(pluginDir.resolve("plugin-security.policy"), securityPolicyContent.toString().getBytes(StandardCharsets.UTF_8)); } - static Path createPlugin(String name, Path structure, String... additionalProps) throws IOException { + static PluginDescriptor createPlugin(String name, Path structure, String... additionalProps) throws IOException { writePlugin(name, structure, additionalProps); - return writeZip(structure, null); + return new PluginDescriptor(name, writeZip(structure, null).toUri().toURL().toString()); + } + + void installPlugin(String id) throws Exception { + PluginDescriptor plugin = id == null ? null : new PluginDescriptor(id, id); + installPlugin(plugin, env.v1(), skipJarHellAction); } - void installPlugin(String pluginUrl, Path home) throws Exception { - installPlugin(pluginUrl, home, skipJarHellCommand); + void installPlugin(PluginDescriptor plugin) throws Exception { + installPlugin(plugin, env.v1(), skipJarHellAction); } - void installPlugins(final List pluginUrls, final Path home) throws Exception { - installPlugins(pluginUrls, home, skipJarHellCommand); + void installPlugins(final List plugins, final Path home) throws Exception { + installPlugins(plugins, home, skipJarHellAction); } - void installPlugin(String pluginUrl, Path home, InstallPluginCommand command) throws Exception { - installPlugins(pluginUrl == null ? List.of() : List.of(pluginUrl), home, command); + void installPlugin(PluginDescriptor plugin, Path home, InstallPluginAction action) throws Exception { + installPlugins(plugin == null ? List.of() : List.of(plugin), home, action); } - void installPlugins(final List pluginUrls, final Path home, final InstallPluginCommand command) throws Exception { + void installPlugins(final List plugins, final Path home, final InstallPluginAction action) throws Exception { final Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - command.execute(terminal, pluginUrls, false, env); + action.setEnvironment(env); + action.execute(plugins); } void assertPlugin(String name, Path original, Environment env) throws IOException { @@ -326,16 +333,12 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx Path binDir = env.binFile().resolve(name); assertTrue("bin dir exists", Files.exists(binDir)); assertTrue("bin is a dir", Files.isDirectory(binDir)); - PosixFileAttributes binAttributes = null; - if (isPosix) { - binAttributes = Files.readAttributes(env.binFile(), PosixFileAttributes.class); - } try (DirectoryStream stream = Files.newDirectoryStream(binDir)) { for (Path file : stream) { assertFalse("not a dir", Files.isDirectory(file)); if (isPosix) { PosixFileAttributes attributes = Files.readAttributes(file, PosixFileAttributes.class); - assertEquals(InstallPluginCommand.BIN_FILES_PERMS, attributes.permissions()); + assertEquals(InstallPluginAction.BIN_FILES_PERMS, attributes.permissions()); } } } @@ -387,45 +390,40 @@ void assertInstallCleaned(Environment env) throws IOException { } } - public void testMissingPluginId() throws IOException { - final Tuple env = createEnv(fs, temp); - final UserException e = expectThrows(UserException.class, () -> installPlugin(null, env.v1())); + public void testMissingPluginId() { + final UserException e = expectThrows(UserException.class, () -> installPlugin((String) null)); assertTrue(e.getMessage(), e.getMessage().contains("at least one plugin id is required")); } public void testSomethingWorks() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); } public void testMultipleWorks() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String fake1PluginZip = createPluginUrl("fake1", pluginDir); - String fake2PluginZip = createPluginUrl("fake2", pluginDir); + PluginDescriptor fake1PluginZip = createPluginZip("fake1", pluginDir); + PluginDescriptor fake2PluginZip = createPluginZip("fake2", pluginDir); installPlugins(List.of(fake1PluginZip, fake2PluginZip), env.v1()); assertPlugin("fake1", pluginDir, env.v2()); assertPlugin("fake2", pluginDir, env.v2()); } public void testDuplicateInstall() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); final UserException e = expectThrows(UserException.class, () -> installPlugins(List.of(pluginZip, pluginZip), env.v1())); - assertThat(e, hasToString(containsString("duplicate plugin id [" + pluginZip + "]"))); + assertThat(e, hasToString(containsString("duplicate plugin id [" + pluginZip.getId() + "]"))); } public void testTransaction() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + PluginDescriptor nonexistentPluginZip = new PluginDescriptor( + pluginZip.getId() + "-does-not-exist", + pluginZip.getUrl() + "-does-not-exist" + ); final FileNotFoundException e = expectThrows( FileNotFoundException.class, - () -> installPlugins(List.of(pluginZip, pluginZip + "does-not-exist"), env.v1()) + () -> installPlugins(List.of(pluginZip, nonexistentPluginZip), env.v1()) ); assertThat(e, hasToString(containsString("does-not-exist"))); final Path fakeInstallPath = env.v2().pluginsFile().resolve("fake"); @@ -435,12 +433,10 @@ public void testTransaction() throws Exception { } public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); final Path removing = env.v2().pluginsFile().resolve(".removing-failed"); Files.createDirectory(removing); - final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip)); final String expected = String.format( Locale.ROOT, "found file [%s] from a failed attempt to remove the plugin [failed]; execute [elasticsearch-plugin remove failed]", @@ -450,69 +446,63 @@ public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { } public void testSpaceInUrl() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); Path pluginZipWithSpaces = createTempFile("foo bar", ".zip"); - try (InputStream in = FileSystemUtils.openFileURLStream(new URL(pluginZip))) { + try (InputStream in = FileSystemUtils.openFileURLStream(new URL(pluginZip.getUrl()))) { Files.copy(in, pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING); } - installPlugin(pluginZipWithSpaces.toUri().toURL().toString(), env.v1()); + PluginDescriptor modifiedPlugin = new PluginDescriptor("fake", pluginZipWithSpaces.toUri().toURL().toString()); + installPlugin(modifiedPlugin); assertPlugin("fake", pluginDir, env.v2()); } - public void testMalformedUrlNotMaven() throws Exception { - Tuple env = createEnv(fs, temp); + public void testMalformedUrlNotMaven() { // has two colons, so it appears similar to maven coordinates - MalformedURLException e = expectThrows(MalformedURLException.class, () -> installPlugin("://host:1234", env.v1())); + PluginDescriptor plugin = new PluginDescriptor("fake", "://host:1234"); + MalformedURLException e = expectThrows(MalformedURLException.class, () -> installPlugin(plugin)); assertTrue(e.getMessage(), e.getMessage().contains("no protocol")); } - public void testFileNotMaven() throws Exception { - Tuple env = createEnv(fs, temp); + public void testFileNotMaven() { String dir = randomAlphaOfLength(10) + ":" + randomAlphaOfLength(5) + "\\" + randomAlphaOfLength(5); Exception e = expectThrows( Exception.class, // has two colons, so it appears similar to maven coordinates - () -> installPlugin("file:" + dir, env.v1()) + () -> installPlugin("file:" + dir) ); assertFalse(e.getMessage(), e.getMessage().contains("maven.org")); assertTrue(e.getMessage(), e.getMessage().contains(dir)); } - public void testUnknownPlugin() throws Exception { - Tuple env = createEnv(fs, temp); - UserException e = expectThrows(UserException.class, () -> installPlugin("foo", env.v1())); + public void testUnknownPlugin() { + UserException e = expectThrows(UserException.class, () -> installPlugin("foo")); assertTrue(e.getMessage(), e.getMessage().contains("Unknown plugin foo")); } public void testPluginsDirReadOnly() throws Exception { assumeTrue("posix and filesystem", isPosix && isReal); - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); try (PosixPermissionsResetter pluginsAttrs = new PosixPermissionsResetter(env.v2().pluginsFile())) { pluginsAttrs.setPermissions(new HashSet<>()); - String pluginZip = createPluginUrl("fake", pluginDir); - IOException e = expectThrows(IOException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + IOException e = expectThrows(IOException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains(env.v2().pluginsFile().toString())); } assertInstallCleaned(env.v2()); } public void testBuiltinModule() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("lang-painless", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("lang-painless", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("is a system module")); assertInstallCleaned(env.v2()); } public void testBuiltinXpackModule() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("x-pack", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("x-pack", pluginDir); + // There is separate handling for installing "x-pack", versus installing a plugin + // whose descriptor contains the name "x-pack". + pluginZip.setId("not-x-pack"); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("is a system module")); assertInstallCleaned(env.v2()); } @@ -520,101 +510,84 @@ public void testBuiltinXpackModule() throws Exception { public void testJarHell() throws Exception { // jar hell test needs a real filesystem assumeTrue("real filesystem", isReal); - Tuple environment = createEnv(fs, temp); Path pluginDirectory = createPluginDir(temp); writeJar(pluginDirectory.resolve("other.jar"), "FakePlugin"); - String pluginZip = createPluginUrl("fake", pluginDirectory); // adds plugin.jar with FakePlugin - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> installPlugin(pluginZip, environment.v1(), defaultCommand) - ); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDirectory); // adds plugin.jar with FakePlugin + IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1(), defaultAction)); assertTrue(e.getMessage(), e.getMessage().contains("jar hell")); - assertInstallCleaned(environment.v2()); + assertInstallCleaned(env.v2()); } public void testIsolatedPlugins() throws Exception { - Tuple env = createEnv(fs, temp); // these both share the same FakePlugin class Path pluginDir1 = createPluginDir(temp); - String pluginZip1 = createPluginUrl("fake1", pluginDir1); - installPlugin(pluginZip1, env.v1()); + PluginDescriptor pluginZip1 = createPluginZip("fake1", pluginDir1); + installPlugin(pluginZip1); Path pluginDir2 = createPluginDir(temp); - String pluginZip2 = createPluginUrl("fake2", pluginDir2); - installPlugin(pluginZip2, env.v1()); + PluginDescriptor pluginZip2 = createPluginZip("fake2", pluginDir2); + installPlugin(pluginZip2); assertPlugin("fake1", pluginDir1, env.v2()); assertPlugin("fake2", pluginDir2, env.v2()); } public void testExistingPlugin() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("already exists")); assertInstallCleaned(env.v2()); } public void testBin() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path binDir = pluginDir.resolve("bin"); Files.createDirectory(binDir); Files.createFile(binDir.resolve("somescript")); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); } public void testBinNotDir() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path binDir = pluginDir.resolve("bin"); Files.createFile(binDir); - String pluginZip = createPluginUrl("fake", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); } public void testBinContainsDir() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path dirInBinDir = pluginDir.resolve("bin").resolve("foo"); Files.createDirectories(dirInBinDir); Files.createFile(dirInBinDir.resolve("somescript")); - String pluginZip = createPluginUrl("fake", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin")); assertInstallCleaned(env.v2()); } public void testBinConflict() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path binDir = pluginDir.resolve("bin"); Files.createDirectory(binDir); Files.createFile(binDir.resolve("somescript")); - String pluginZip = createPluginUrl("elasticsearch", pluginDir); - FileAlreadyExistsException e = expectThrows(FileAlreadyExistsException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("elasticsearch", pluginDir); + FileAlreadyExistsException e = expectThrows(FileAlreadyExistsException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains(env.v2().binFile().resolve("elasticsearch").toString())); assertInstallCleaned(env.v2()); } public void testBinPermissions() throws Exception { assumeTrue("posix filesystem", isPosix); - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path binDir = pluginDir.resolve("bin"); Files.createDirectory(binDir); Files.createFile(binDir.resolve("somescript")); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); try (PosixPermissionsResetter binAttrs = new PosixPermissionsResetter(env.v2().binFile())) { Set perms = binAttrs.getCopyPermissions(); // make sure at least one execute perm is missing, so we know we forced it during installation perms.remove(PosixFilePermission.GROUP_EXECUTE); binAttrs.setPermissions(perms); - installPlugin(pluginZip, env.v1()); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); } } @@ -622,7 +595,6 @@ public void testBinPermissions() throws Exception { public void testPluginPermissions() throws Exception { assumeTrue("posix filesystem", isPosix); - final Tuple env = createEnv(fs, temp); final Path pluginDir = createPluginDir(temp); final Path resourcesDir = pluginDir.resolve("resources"); final Path platformDir = pluginDir.resolve("platform"); @@ -635,9 +607,9 @@ public void testPluginPermissions() throws Exception { Files.createDirectory(resourcesDir); Files.createFile(resourcesDir.resolve("resource")); - final String pluginZip = createPluginUrl("fake", pluginDir); + final PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); final Path fake = env.v2().pluginsFile().resolve("fake"); @@ -681,28 +653,24 @@ private void assert755(final Path path) throws IOException { } public void testConfig() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path configDir = pluginDir.resolve("config"); Files.createDirectory(configDir); Files.createFile(configDir.resolve("custom.yml")); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); } public void testExistingConfig() throws Exception { - Tuple env = createEnv(fs, temp); Path envConfigDir = env.v2().configFile().resolve("fake"); Files.createDirectories(envConfigDir); Files.write(envConfigDir.resolve("custom.yml"), "existing config".getBytes(StandardCharsets.UTF_8)); - Path pluginDir = createPluginDir(temp); Path configDir = pluginDir.resolve("config"); Files.createDirectory(configDir); Files.write(configDir.resolve("custom.yml"), "new config".getBytes(StandardCharsets.UTF_8)); Files.createFile(configDir.resolve("other.yml")); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); assertPlugin("fake", pluginDir, env.v2()); List configLines = Files.readAllLines(envConfigDir.resolve("custom.yml"), StandardCharsets.UTF_8); assertEquals(1, configLines.size()); @@ -711,57 +679,48 @@ public void testExistingConfig() throws Exception { } public void testConfigNotDir() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Files.createDirectories(pluginDir); Path configDir = pluginDir.resolve("config"); Files.createFile(configDir); - String pluginZip = createPluginUrl("fake", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); } public void testConfigContainsDir() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Path dirInConfigDir = pluginDir.resolve("config").resolve("foo"); Files.createDirectories(dirInConfigDir); Files.createFile(dirInConfigDir.resolve("myconfig.yml")); - String pluginZip = createPluginUrl("fake", pluginDir); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in config dir for plugin")); assertInstallCleaned(env.v2()); } public void testMissingDescriptor() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve("fake.yml")); String pluginZip = writeZip(pluginDir, null).toUri().toURL().toString(); - NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> installPlugin(pluginZip, env.v1())); + NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("plugin-descriptor.properties")); assertInstallCleaned(env.v2()); } public void testContainsIntermediateDirectory() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES)); String pluginZip = writeZip(pluginDir, "elasticsearch").toUri().toURL().toString(); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertThat(e.getMessage(), containsString("This plugin was built with an older plugin structure")); assertInstallCleaned(env.v2()); } public void testZipRelativeOutsideEntryName() throws Exception { - Tuple env = createEnv(fs, temp); Path zip = createTempDir().resolve("broken.zip"); try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { stream.putNextEntry(new ZipEntry("../blah")); } String pluginZip = zip.toUri().toURL().toString(); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertTrue(e.getMessage(), e.getMessage().contains("resolving outside of plugin directory")); assertInstallCleaned(env.v2()); } @@ -808,34 +767,31 @@ public void testInstallXPack() throws IOException { private void runInstallXPackTest(final Build.Flavor flavor, final Class clazz, final String expectedMessage) throws IOException { - final InstallPluginCommand flavorCommand = new InstallPluginCommand() { + + final Environment environment = createEnv(temp).v2(); + final InstallPluginAction flavorAction = new InstallPluginAction(terminal, environment, false) { @Override Build.Flavor buildFlavor() { return flavor; } }; - - final Environment environment = createEnv(fs, temp).v2(); - final T exception = expectThrows(clazz, () -> flavorCommand.execute(terminal, List.of("x-pack"), false, environment)); + final T exception = expectThrows(clazz, () -> flavorAction.execute(List.of(new PluginDescriptor("x-pack")))); assertThat(exception, hasToString(containsString(expectedMessage))); } - public void testInstallMisspelledOfficialPlugins() throws Exception { - Tuple env = createEnv(fs, temp); - - UserException e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc", env.v1())); + public void testInstallMisspelledOfficialPlugins() { + UserException e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc")); assertThat(e.getMessage(), containsString("Unknown plugin analysis-smartnc, did you mean [analysis-smartcn]?")); - e = expectThrows(UserException.class, () -> installPlugin("repository", env.v1())); + e = expectThrows(UserException.class, () -> installPlugin("repository")); assertThat(e.getMessage(), containsString("Unknown plugin repository, did you mean any of [repository-s3, repository-gcs]?")); - e = expectThrows(UserException.class, () -> installPlugin("unknown_plugin", env.v1())); + e = expectThrows(UserException.class, () -> installPlugin("unknown_plugin")); assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } public void testBatchFlag() throws Exception { - MockTerminal terminal = new MockTerminal(); - installPlugin(terminal, true); + installPlugin(true); assertThat(terminal.getErrorOutput(), containsString("WARNING: plugin requires additional permissions")); assertThat(terminal.getOutput(), containsString("-> Downloading")); // No progress bar in batch mode @@ -843,27 +799,23 @@ public void testBatchFlag() throws Exception { } public void testQuietFlagDisabled() throws Exception { - MockTerminal terminal = new MockTerminal(); terminal.setVerbosity(randomFrom(Terminal.Verbosity.NORMAL, Terminal.Verbosity.VERBOSE)); - installPlugin(terminal, false); + installPlugin(false); assertThat(terminal.getOutput(), containsString("100%")); } public void testQuietFlagEnabled() throws Exception { - MockTerminal terminal = new MockTerminal(); terminal.setVerbosity(Terminal.Verbosity.SILENT); - installPlugin(terminal, false); + installPlugin(false); assertThat(terminal.getOutput(), not(containsString("100%"))); } public void testPluginAlreadyInstalled() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + installPlugin(pluginZip); final UserException e = expectThrows( UserException.class, - () -> installPlugin(pluginZip, env.v1(), randomFrom(skipJarHellCommand, defaultCommand)) + () -> installPlugin(pluginZip, env.v1(), randomFrom(skipJarHellAction, defaultAction)) ); assertThat( e.getMessage(), @@ -876,17 +828,18 @@ public void testPluginAlreadyInstalled() throws Exception { ); } - private void installPlugin(MockTerminal terminal, boolean isBatch, String... additionalProperties) throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); + private void installPlugin(boolean isBatch, String... additionalProperties) throws Exception { // if batch is enabled, we also want to add a security policy if (isBatch) { writePluginSecurityPolicy(pluginDir, "setFactory"); } - String pluginZip = createPlugin("fake", pluginDir, additionalProperties).toUri().toURL().toString(); - skipJarHellCommand.execute(terminal, List.of(pluginZip), isBatch, env.v2()); + PluginDescriptor pluginZip = createPlugin("fake", pluginDir, additionalProperties); + skipJarHellAction.setEnvironment(env.v2()); + skipJarHellAction.setBatch(isBatch); + skipJarHellAction.execute(List.of(pluginZip)); } + @SuppressForbidden(reason = "Path.of() is OK in this context") void assertInstallPluginFromUrl( final String pluginId, final String name, @@ -898,15 +851,14 @@ void assertInstallPluginFromUrl( final PGPSecretKey secretKey, final BiFunction signature ) throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - Path pluginZip = createPlugin(name, pluginDir); - InstallPluginCommand command = new InstallPluginCommand() { + PluginDescriptor pluginZip = createPlugin(name, pluginDir); + Path pluginZipPath = Path.of(URI.create(pluginZip.getUrl())); + InstallPluginAction action = new InstallPluginAction(terminal, env.v2(), false) { @Override - Path downloadZip(Terminal terminal, String urlString, Path tmpDir, boolean isBatch) throws IOException { + Path downloadZip(String urlString, Path tmpDir) throws IOException { assertEquals(url, urlString); Path downloadedPath = tmpDir.resolve("downloaded.zip"); - Files.copy(pluginZip, downloadedPath); + Files.copy(pluginZipPath, downloadedPath); return downloadedPath; } @@ -915,13 +867,13 @@ URL openUrl(String urlString) throws IOException { if ((url + shaExtension).equals(urlString)) { // calc sha an return file URL to it Path shaFile = temp.apply("shas").resolve("downloaded.zip" + shaExtension); - byte[] zipbytes = Files.readAllBytes(pluginZip); + byte[] zipbytes = Files.readAllBytes(pluginZipPath); String checksum = shaCalculator.apply(zipbytes); Files.write(shaFile, checksum.getBytes(StandardCharsets.UTF_8)); return shaFile.toUri().toURL(); } else if ((url + ".asc").equals(urlString)) { final Path ascFile = temp.apply("asc").resolve("downloaded.zip" + ".asc"); - final byte[] zipBytes = Files.readAllBytes(pluginZip); + final byte[] zipBytes = Files.readAllBytes(pluginZipPath); final String asc = signature.apply(zipBytes, secretKey); Files.write(ascFile, asc.getBytes(StandardCharsets.UTF_8)); return ascFile.toUri().toURL(); @@ -931,7 +883,7 @@ URL openUrl(String urlString) throws IOException { @Override void verifySignature(Path zip, String urlString) throws IOException, PGPException { - if (InstallPluginCommand.OFFICIAL_PLUGINS.contains(name)) { + if (InstallPluginAction.OFFICIAL_PLUGINS.contains(name)) { super.verifySignature(zip, urlString); } else { throw new UnsupportedOperationException("verify signature should not be called for unofficial plugins"); @@ -962,7 +914,7 @@ InputStream getPublicKey() { } @Override - boolean urlExists(Terminal terminal, String urlString) throws IOException { + boolean urlExists(String urlString) { return urlString.equals(url); } @@ -977,11 +929,11 @@ boolean isSnapshot() { } @Override - void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) throws Exception { + void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) { // no jarhell check } }; - installPlugin(pluginId, env.v1(), command); + installPlugin(new PluginDescriptor(name, pluginId), env.v1(), action); assertPlugin(name, pluginDir, env.v2()); } @@ -1161,7 +1113,7 @@ public void testOfficialShaMissing() throws Exception { assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage()); } - public void testMavenShaMissing() throws Exception { + public void testMavenShaMissing() { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows( UserException.class, @@ -1250,7 +1202,7 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file")); } - public void testSha512Mismatch() throws Exception { + public void testSha512Mismatch() { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Build.CURRENT.getQualifiedVersion() + ".zip"; @@ -1272,7 +1224,7 @@ public void testSha512Mismatch() throws Exception { assertTrue(e.getMessage(), e.getMessage().contains("SHA-512 mismatch, expected foobar")); } - public void testSha1Mismatch() throws Exception { + public void testSha1Mismatch() { String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip"; UserException e = expectThrows( UserException.class, @@ -1362,7 +1314,7 @@ public void testFailedSignatureVerification() throws Exception { assertThat(e, hasToString(equalTo("java.lang.IllegalStateException: signature verification for [" + url + "] failed"))); } - public PGPSecretKey newSecretKey() throws NoSuchAlgorithmException, NoSuchProviderException, PGPException { + public PGPSecretKey newSecretKey() throws NoSuchAlgorithmException, PGPException { final KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA"); kpg.initialize(2048); final KeyPair pair = kpg.generateKeyPair(); @@ -1385,7 +1337,7 @@ private Function checksum(final MessageDigest digest) { return checksumAndString(digest, ""); } - private Function checksumAndFilename(final MessageDigest digest, final String url) throws MalformedURLException { + private Function checksumAndFilename(final MessageDigest digest, final String url) { final String[] segments = URI.create(url).getPath().split("/"); return checksumAndString(digest, " " + segments[segments.length - 1]); } @@ -1423,7 +1375,7 @@ private String signature(final byte[] bytes, final PGPSecretKey secretKey) { // checks the plugin requires a policy confirmation, and does not install when that is rejected by the user // the plugin is installed after this method completes - private void assertPolicyConfirmation(Tuple env, String pluginZip, String... warnings) throws Exception { + private void assertPolicyConfirmation(Tuple env, PluginDescriptor pluginZip, String... warnings) throws Exception { for (int i = 0; i < warnings.length; ++i) { String warning = warnings[i]; for (int j = 0; j < i; ++j) { @@ -1431,7 +1383,7 @@ private void assertPolicyConfirmation(Tuple env, String plugi } // default answer, does not install terminal.addTextInput(""); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertEquals("installation aborted by user", e.getMessage()); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); @@ -1445,7 +1397,7 @@ private void assertPolicyConfirmation(Tuple env, String plugi terminal.addTextInput("y"); // accept warnings we have already tested } terminal.addTextInput("n"); - e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertEquals("installation aborted by user", e.getMessage()); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); try (Stream fileStream = Files.list(env.v2().pluginsFile())) { @@ -1458,38 +1410,32 @@ private void assertPolicyConfirmation(Tuple env, String plugi for (int j = 0; j < warnings.length; ++j) { terminal.addTextInput("y"); } - installPlugin(pluginZip, env.v1()); + installPlugin(pluginZip); for (String warning : warnings) { assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); } } public void testPolicyConfirmation() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); writePluginSecurityPolicy(pluginDir, "getClassLoader", "setFactory"); - String pluginZip = createPluginUrl("fake", pluginDir); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions"); assertPlugin("fake", pluginDir, env.v2()); } public void testPluginWithNativeController() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true"); + PluginDescriptor pluginZip = createPluginZip("fake", pluginDir, "has.native.controller", "true"); - final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip)); assertThat(e, hasToString(containsString("plugins can not have native controllers"))); } public void testMultipleJars() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); writeJar(pluginDir.resolve("dep1.jar"), "Dep1"); writeJar(pluginDir.resolve("dep2.jar"), "Dep2"); - String pluginZip = createPluginUrl("fake-with-deps", pluginDir); - installPlugin(pluginZip, env.v1()); + PluginDescriptor pluginZip = createPluginZip("fake-with-deps", pluginDir); + installPlugin(pluginZip); assertPlugin("fake-with-deps", pluginDir, env.v2()); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginActionTests.java similarity index 93% rename from distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java rename to distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginActionTests.java index 78105c3d9d16f..0ecf58d251819 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginActionTests.java @@ -28,21 +28,22 @@ import java.nio.file.Path; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @LuceneTestCase.SuppressFileSystems("*") -public class RemovePluginCommandTests extends ESTestCase { +public class RemovePluginActionTests extends ESTestCase { private Path home; private Environment env; static class MockRemovePluginCommand extends RemovePluginCommand { - final Environment env; private MockRemovePluginCommand(final Environment env) { @@ -53,7 +54,6 @@ private MockRemovePluginCommand(final Environment env) { protected Environment createEnv(Map settings) throws UserException { return env; } - } @Override @@ -69,17 +69,13 @@ public void setUp() throws Exception { } void createPlugin(String name) throws IOException { - createPlugin(env.pluginsFile(), name); + createPlugin(env.pluginsFile(), name, Version.CURRENT); } void createPlugin(String name, Version version) throws IOException { createPlugin(env.pluginsFile(), name, version); } - void createPlugin(Path path, String name) throws IOException { - createPlugin(path, name, Version.CURRENT); - } - void createPlugin(Path path, String name, Version version) throws IOException { PluginTestUtil.writePluginProperties( path.resolve(name), @@ -105,7 +101,10 @@ static MockTerminal removePlugin(String pluginId, Path home, boolean purge) thro static MockTerminal removePlugin(List pluginIds, Path home, boolean purge) throws Exception { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); MockTerminal terminal = new MockTerminal(); - new MockRemovePluginCommand(env).execute(terminal, env, pluginIds, purge); + final List plugins = pluginIds == null + ? null + : pluginIds.stream().map(PluginDescriptor::new).collect(Collectors.toList()); + new RemovePluginAction(terminal, env, purge).execute(plugins); return terminal; } @@ -259,12 +258,13 @@ protected boolean addShutdownHook() { BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput())); BufferedReader errorReader = new BufferedReader(new StringReader(terminal.getErrorOutput())) ) { - assertEquals( - "ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins", - errorReader.readLine() + assertThat(errorReader.readLine(), equalTo("")); + assertThat( + errorReader.readLine(), + equalTo("ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins") ); - assertNull(reader.readLine()); - assertNull(errorReader.readLine()); + assertThat(reader.readLine(), nullValue()); + assertThat(errorReader.readLine(), nullValue()); } } diff --git a/docs/build.gradle b/docs/build.gradle index eb7f2cc22ba3c..ec7168061815e 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import static org.elasticsearch.gradle.testclusters.TestDistribution.DEFAULT @@ -64,7 +65,6 @@ testClusters.matching { it.name == "integTest"}.configureEach { setting 'indices.lifecycle.history_index_enabled', 'false' setting 'ingest.geoip.downloader.enabled', 'false' systemProperty 'es.geoip_v2_feature_flag_enabled', 'true' - systemProperty 'es.shutdown_feature_flag_enabled', 'true' keystorePassword 'keystore-password' } diff --git a/docs/changelog/70635.yaml b/docs/changelog/70635.yaml new file mode 100644 index 0000000000000..d877a7bbba0d5 --- /dev/null +++ b/docs/changelog/70635.yaml @@ -0,0 +1,8 @@ +pr: 70635 +summary: Tighten up write permissions in Docker image +area: Packaging +type: enhancement +issues: [] +versions: + - v8.0.0 + - v7.15.0 diff --git a/docs/changelog/75981.yaml b/docs/changelog/75981.yaml new file mode 100644 index 0000000000000..8b7d8a03136d6 --- /dev/null +++ b/docs/changelog/75981.yaml @@ -0,0 +1,9 @@ +pr: 75981 +summary: Bump bundled JDK to 16.0.2 +area: Packaging +type: upgrade +issues: [] +versions: + - v8.0.0 + - v7.14.1 + - v7.15.0 diff --git a/docs/changelog/76192.yaml b/docs/changelog/76192.yaml new file mode 100644 index 0000000000000..6d0f3d7262065 --- /dev/null +++ b/docs/changelog/76192.yaml @@ -0,0 +1,10 @@ +pr: 76192 +summary: Change env var prefix in Docker settings support +area: Packaging +type: enhancement +issues: + - 76148 + - 74327 +versions: + - v8.0.0 + - v7.15.0 diff --git a/docs/java-rest/high-level/cluster/enroll_node.asciidoc b/docs/java-rest/high-level/cluster/enroll_node.asciidoc index c74276fc6c84e..e8bdaef30850f 100644 --- a/docs/java-rest/high-level/cluster/enroll_node.asciidoc +++ b/docs/java-rest/high-level/cluster/enroll_node.asciidoc @@ -37,8 +37,7 @@ for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the encoded string of the ASN.1 DER encoding of the key. <4> The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. -<5> The name of the cluster the new node is joining -<6> A list of transport addresses in the form of `host:port` for the nodes that are already +<5> A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. diff --git a/docs/java-rest/high-level/security/enroll_kibana.asciidoc b/docs/java-rest/high-level/security/enroll_kibana.asciidoc new file mode 100644 index 0000000000000..6e9111375b9bf --- /dev/null +++ b/docs/java-rest/high-level/security/enroll_kibana.asciidoc @@ -0,0 +1,47 @@ +-- +:api: kibana-enrollment +:request: KibanaEnrollmentRequest +:response: KibanaEnrollmentResponse +-- + +[id="{upid}-{api}"] +=== Enroll Kibana API + +Allows a kibana instance to configure itself to communicate with a secured {es} cluster. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Enroll Kibana Response + +The returned +{response}+ allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api-kibana-response] +-------------------------------------------------- +<1> The password for the `kibana_system` user +<2> The CA certificate that has signed the certificate that the cluster uses for TLS on the HTTP layer, +as a Base64 encoded string of the ASN.1 DER encoding of the certificate. + +[id="{upid}-{api}-execute-async"] +==== Asynchronous Execution + +This request can be executed asynchronously using the `security().enrollClientAsync()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-async] +-------------------------------------------------- + +A typical listener for a `KibanaEnrollmentResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc index 8a79d20f39bf7..c7225e085f53e 100644 --- a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc @@ -7,6 +7,9 @@ [id="{upid}-{api}"] === Get Builtin Privileges API +Retrieves the list of cluster privileges and index privileges that are +available in this version of {es}. + include::../execution-no-req.asciidoc[] [id="{upid}-{api}-response"] diff --git a/docs/java-rest/high-level/security/get-privileges.asciidoc b/docs/java-rest/high-level/security/get-privileges.asciidoc index d63f4774d07e5..9775daddfcaac 100644 --- a/docs/java-rest/high-level/security/get-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-privileges.asciidoc @@ -6,12 +6,14 @@ -- [role="xpack"] [id="{upid}-{api}"] -=== Get Privileges API +=== Get Application Privileges API + +Retrieves application privileges. [id="{upid}-{api}-request"] ==== Get Privileges Request -The +{request}+ supports getting privilege(s) for all or for specific applications. +The +{request}+ supports getting privileges for all or for specific applications. ===== Specific privilege of a specific application diff --git a/docs/java-rest/high-level/security/get-service-account-credentials.asciidoc b/docs/java-rest/high-level/security/get-service-account-credentials.asciidoc index 7611a9a689438..5404c7869e159 100644 --- a/docs/java-rest/high-level/security/get-service-account-credentials.asciidoc +++ b/docs/java-rest/high-level/security/get-service-account-credentials.asciidoc @@ -24,15 +24,19 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Get Service Account Credentials Response -The returned +{response}+ contains a list of service account tokens for the requested service account. +The returned +{response}+ contains service tokens for the requested service account. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Principal of the service account -<2> Name of the node that processed the request. Information of file service tokens is only collected from this node. -<3> List of service token information -<4> Name of the first service account token -<5> Source of the first service account token. The value is either `file` or `index`. +<2> List of index-based service token information +<3> Name of the first service token +<4> Source of the first service token. The value is either `file` or `index`. +<5> For `file` service tokens, names of the nodes where the information is collected. +<6> List of file-based service token information +<7> Response header containing the information about the execution of collecting `file` service tokens. +<8> Number of nodes that successful complete the request of retrieving file-backed service tokens +<9> Number of nodes that fail to complete the request of retrieving file-backed service tokens diff --git a/docs/java-rest/high-level/security/get-user-privileges.asciidoc b/docs/java-rest/high-level/security/get-user-privileges.asciidoc index e445567ce90c9..70536b5715280 100644 --- a/docs/java-rest/high-level/security/get-user-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-user-privileges.asciidoc @@ -7,6 +7,8 @@ [id="{upid}-{api}"] === Get User Privileges API +Retrieves security privileges for the logged in user. + include::../execution-no-req.asciidoc[] [id="{upid}-{api}-response"] diff --git a/docs/java-rest/high-level/security/has-privileges.asciidoc b/docs/java-rest/high-level/security/has-privileges.asciidoc index dfd92be6837fc..5a9914c802b91 100644 --- a/docs/java-rest/high-level/security/has-privileges.asciidoc +++ b/docs/java-rest/high-level/security/has-privileges.asciidoc @@ -7,6 +7,8 @@ [id="{upid}-{api}"] === Has Privileges API +Determines whether the logged in user has a specified list of privileges. + [id="{upid}-{api}-request"] ==== Has Privileges Request The +{request}+ supports checking for any or all of the following privilege types: diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index b112de6af843e..18f96858c7610 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -166,7 +166,7 @@ security policy]. The client sends each request to one of the configured nodes in round-robin fashion. Nodes can optionally be filtered through a node selector that needs to be provided when initializing the client. This is useful when sniffing is -enabled, in case only dedicated master nodes should be hit by HTTP requests. +enabled, in case no dedicated master nodes should be hit by HTTP requests. For each request the client will run the eventually configured node selector to filter the node candidates, then select the next one in the list out of the remaining ones. diff --git a/docs/painless/painless-guide/painless-ingest.asciidoc b/docs/painless/painless-guide/painless-ingest.asciidoc index aa3416294ba72..4a933786e69a4 100644 --- a/docs/painless/painless-guide/painless-ingest.asciidoc +++ b/docs/painless/painless-guide/painless-ingest.asciidoc @@ -107,3 +107,13 @@ corresponding value for that component. ---- String uriParts(String value); ---- + +===== Network community ID +Use the {ref}/community-id-processor.html[community ID processor] to compute the network +community ID for network flow data. + +[source,Painless] +---- +String communityId(String sourceIpAddrString, String destIpAddrString, Object ianaNumber, Object transport, Object sourcePort, Object destinationPort, Object icmpType, Object icmpCode, int seed) +String communityId(String sourceIpAddrString, String destIpAddrString, Object ianaNumber, Object transport, Object sourcePort, Object destinationPort, Object icmpType, Object icmpCode) +---- diff --git a/docs/painless/painless-guide/painless-walkthrough.asciidoc b/docs/painless/painless-guide/painless-walkthrough.asciidoc index f61cc7568dcdf..af2fbdfd2d2a2 100644 --- a/docs/painless/painless-guide/painless-walkthrough.asciidoc +++ b/docs/painless/painless-guide/painless-walkthrough.asciidoc @@ -110,16 +110,23 @@ GET hockey/_search } ---------------------------------------------------------------- - [discrete] -==== Missing values +==== Missing keys -`doc['field'].value` throws an exception if +`doc['myfield'].value` throws an exception if the field is missing in a document. -To check if a document is missing a value, you can call -`doc['field'].size() == 0`. +For more dynamic index mappings, you may consider writting a catch equation + +``` +if (!doc.containsKey('myfield') || doc['myfield'].empty) { return "unavailable" } else { return doc['myfield'].value } +``` +[discrete] +==== Missing values + +To check if a document is missing a value, you can call +`doc['myfield'].size() == 0`. [discrete] ==== Updating Fields with Painless diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index f5c6c76402a31..0c5eab969c9f8 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -114,7 +114,7 @@ The available settings for the EC2 discovery plugin are as follows. `discovery.ec2.read_timeout`:: The socket timeout for connections to EC2, - {ref}/common-options.html#time-units[including the units]. For example, a + {time-units}[including the units]. For example, a value of `60s` specifies a 60-second timeout. Defaults to 50 seconds. `discovery.ec2.groups`:: diff --git a/docs/plugins/filesystem.asciidoc b/docs/plugins/filesystem.asciidoc deleted file mode 100644 index 98a6643ab4420..0000000000000 --- a/docs/plugins/filesystem.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[filesystem]] -== Filesystem Plugins - -Filesystem plugins modify how {es} interacts with the host filesystem. - -[discrete] -=== Core filesystem plugins - -The core filesystem plugins are: - -<>:: - -The Quota-aware Filesystem plugin adds an interface for telling Elasticsearch the disk-quota limits under which it is operating. - -include::quota-aware-fs.asciidoc[] - diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 8d4fef11097f6..705bb3c76f302 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -44,8 +44,6 @@ include::analysis.asciidoc[] include::discovery.asciidoc[] -include::filesystem.asciidoc[] - include::ingest.asciidoc[] include::mapper.asciidoc[] diff --git a/docs/plugins/quota-aware-fs.asciidoc b/docs/plugins/quota-aware-fs.asciidoc deleted file mode 100644 index e89341cfc68b5..0000000000000 --- a/docs/plugins/quota-aware-fs.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -[[quota-aware-fs]] -=== Quota-aware Filesystem Plugin - -NOTE: {cloud-only} - -The Quota-aware Filesystem plugin adds an interface for telling -Elasticsearch the disk-quota limits under which it is operating. - -:plugin_name: quota-aware-fs -include::install_remove.asciidoc[] - -[[quota-aware-fs-usage]] -==== Passing disk quota information to Elasticsearch - -{es} considers the available disk space on a node before deciding whether -to allocate new shards to that node or to actively relocate shards away from that node. -However, while the JVM has support for reporting a filesystem's total space and available -space, it has no knowledge of any quota limits imposed on the user under which {es} is -running. Consequently, the {es} mechanisms for handling low disk space situations cannot -function. To work around this situation, this plugin provides a mechanism for supplying quota-ware -total and available amounts of disk space. - -To use the plugin, install it on all nodes and restart them. You must configure the plugin -by supplying the `es.fs.quota.file` {ref}/jvm-options.html[JVM system property] on startup. This -property specifies a URI to a properties file, which contains the total and available -amounts. - -NOTE: {es} will not start successfully if you install the `quota-aware-fs` plugin, -but you do not supply the `es.fs.quota.file` system property at startup. - -[source,text] ----- --Des.fs.quota.file=file:///path/to/some.properties ----- - -The properties file must contain the keys `total` and `remaining`, both of which contain the respective -number in bytes. You are responsible for writing this file with the correct values, and keeping the -values up-to-date. {es} will poll this file regularly to pick up any changes. - -[source,properties] ----- -total=976490576 -remaining=376785728 ----- diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc index 0dcdbb425955a..b80ee995609bb 100644 --- a/docs/plugins/store-smb.asciidoc +++ b/docs/plugins/store-smb.asciidoc @@ -27,8 +27,12 @@ The Store SMB plugin provides two storage types optimized for SMB: `smb_simple_fs`:: + deprecated::[7.15,"smb_simple_fs is deprecated and will be removed in 8.0. Use smb_nio_fs or other file systems instead."] + +`smb_nio_fs`:: + a SMB specific implementation of the default - {ref}/index-modules-store.html#simplefs[simple fs] + {ref}/index-modules-store.html#niofs[nio fs] To use one of these specific storage types, you need to install the Store SMB plugin and restart the node. Then configure Elasticsearch to set the storage type you want. @@ -37,7 +41,7 @@ This can be configured for all indices by adding this to the `elasticsearch.yml` [source,yaml] ---- -index.store.type: smb_simple_fs +index.store.type: smb_nio_fs ---- Note that setting will be applied for newly created indices. diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index 1d07cc20f03b8..302e196caf3ce 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -13,10 +13,8 @@ aggregated for the buckets created by their "parent" bucket aggregation. There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process. -NOTE: The maximum number of buckets allowed in a single response is limited by a -dynamic cluster setting named -<>. It defaults to 65,536. -Requests that try to return more than the limit will fail with an exception. +NOTE: The <> cluster setting +limits the number of buckets allowed in a single response. include::bucket/adjacency-matrix-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 032cbd6c119fa..405aa68631113 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -35,26 +35,6 @@ be tacked onto a particular year. Fixed intervals are, by contrast, always multiples of SI units and do not change based on calendaring context. -[NOTE] -.Combined `interval` field is deprecated -================================== -deprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed -intervals were configured in a single `interval` field, which led to confusing -semantics. Specifying `1d` would be assumed as a calendar-aware time, -whereas `2d` would be interpreted as fixed time. To get "one day" of fixed time, -the user would need to specify the next smaller unit (in this case, `24h`). - -This combined behavior was often unknown to users, and even when knowledgeable about -the behavior it was difficult to use and confusing. - -This behavior has been deprecated in favor of two new, explicit fields: `calendar_interval` -and `fixed_interval`. - -By forcing a choice between calendar and intervals up front, the semantics of the interval -are clear to the user immediately and there is no ambiguity. The old `interval` field -will be removed in the future. -================================== - [[calendar_intervals]] ==== Calendar intervals diff --git a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc index 1ee9f4f45281d..242c2aec92d72 100644 --- a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc @@ -88,7 +88,7 @@ The specified field must be of type `geo_point` (which can only be set explicitl * Object format: `{ "lat" : 52.3760, "lon" : 4.894 }` - this is the safest format as it is the most explicit about the `lat` & `lon` values * String format: `"52.3760, 4.894"` - where the first number is the `lat` and the second is the `lon` -* Array format: `[4.894, 52.3760]` - which is based on the `GeoJson` standard and where the first number is the `lon` and the second one is the `lat` +* Array format: `[4.894, 52.3760]` - which is based on the GeoJSON standard where the first number is the `lon` and the second one is the `lat` By default, the distance unit is `m` (meters) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters). diff --git a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc index 362eaf51f7a95..b7403599119c2 100644 --- a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc @@ -10,7 +10,7 @@ For example, lets say we have an index of products, and each product holds the l price for the product. The mapping could look like: [source,console,id=nested-aggregation-example] --------------------------------------------------- +---- PUT /products { "mappings": { @@ -18,21 +18,25 @@ PUT /products "resellers": { <1> "type": "nested", "properties": { - "reseller": { "type": "text" }, - "price": { "type": "double" } + "reseller": { + "type": "keyword" + }, + "price": { + "type": "double" + } } } } } } --------------------------------------------------- +---- <1> `resellers` is an array that holds nested documents. The following request adds a product with two resellers: [source,console] --------------------------------------------------- -PUT /products/_doc/0 +---- +PUT /products/_doc/0?refresh { "name": "LED TV", <1> "resellers": [ @@ -46,20 +50,22 @@ PUT /products/_doc/0 } ] } --------------------------------------------------- -// TEST[s/PUT \/products\/_doc\/0/PUT \/products\/_doc\/0\?refresh/] +---- // TEST[continued] + <1> We are using a dynamic mapping for the `name` attribute. The following request returns the minimum price a product can be purchased for: [source,console] --------------------------------------------------- -GET /products/_search +---- +GET /products/_search?size=0 { "query": { - "match": { "name": "led tv" } + "match": { + "name": "led tv" + } }, "aggs": { "resellers": { @@ -67,13 +73,17 @@ GET /products/_search "path": "resellers" }, "aggs": { - "min_price": { "min": { "field": "resellers.price" } } + "min_price": { + "min": { + "field": "resellers.price" + } + } } } } } --------------------------------------------------- -// TEST[s/GET \/products\/_search/GET \/products\/_search\?filter_path=aggregations/] +---- +// TEST[s/size=0/size=0&filter_path=aggregations/] // TEST[continued] As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents. @@ -82,18 +92,84 @@ Then one can define any type of aggregation over these nested documents. Response: [source,console-result] --------------------------------------------------- +---- { ... "aggregations": { "resellers": { "doc_count": 2, "min_price": { - "value": 350 + "value": 350.0 + } + } + } +} +---- +// TESTRESPONSE[s/\.\.\.//] + +You can use a <> +sub-aggregation to return results for a specific reseller. + +[source,console] +---- +GET /products/_search?size=0 +{ + "query": { + "match": { + "name": "led tv" + } + }, + "aggs": { + "resellers": { + "nested": { + "path": "resellers" + }, + "aggs": { + "filter_reseller": { + "filter": { + "bool": { + "filter": [ + { + "term": { + "resellers.reseller": "companyB" + } + } + ] + } + }, + "aggs": { + "min_price": { + "min": { + "field": "resellers.price" + } + } + } + } + } + } + } +} +---- +// TEST[s/size=0/size=0&filter_path=aggregations/] +// TEST[continued] + +The search returns: + +[source,console-result] +---- +{ + ... + "aggregations": { + "resellers": { + "doc_count": 2, + "filter_reseller": { + "doc_count": 1, + "min_price": { + "value": 500.0 + } } } } } --------------------------------------------------- +---- // TESTRESPONSE[s/\.\.\.//] -// TESTRESPONSE[s/: [0-9]+/: $body.$_path/] diff --git a/docs/reference/aggregations/bucket/range-aggregation.asciidoc b/docs/reference/aggregations/bucket/range-aggregation.asciidoc index 0ee2d5460562a..e801030fd5532 100644 --- a/docs/reference/aggregations/bucket/range-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/range-aggregation.asciidoc @@ -324,3 +324,103 @@ Response: } -------------------------------------------------- // TESTRESPONSE[s/\.\.\.//] +[[search-aggregations-bucket-range-aggregation-histogram-fields]] +==== Histogram fields + +Running a range aggregation over histogram fields computes the total number of counts for each configured range. + +This is done without interpolating between the histogram field values. Consequently, it is possible to have a range +that is "in-between" two histogram values. The resulting range bucket would have a zero doc count. + +Here is an example, executing a range aggregation against the following index that stores pre-aggregated histograms +with latency metrics (in milliseconds) for different networks: + +[source,console] +-------------------------------------------------- +PUT metrics_index/_doc/1 +{ + "network.name" : "net-1", + "latency_histo" : { + "values" : [1, 3, 8, 12, 15], + "counts" : [3, 7, 23, 12, 6] + } +} + +PUT metrics_index/_doc/2 +{ + "network.name" : "net-2", + "latency_histo" : { + "values" : [1, 6, 8, 12, 14], + "counts" : [8, 17, 8, 7, 6] + } +} + +POST /metrics_index/_search?size=0&filter_path=aggregations +{ + "aggs": { + "latency_ranges": { + "range": { + "field": "latency_histo", + "ranges": [ + {"to": 2}, + {"from": 2, "to": 3}, + {"from": 3, "to": 10}, + {"from": 10} + ] + } + } + } +} +-------------------------------------------------- + + +The `range` aggregation will sum the counts of each range computed based on the `values` and +return the following output: + +[source,console-result] +-------------------------------------------------- +{ + "aggregations": { + "latency_ranges": { + "buckets": [ + { + "key": "*-2.0", + "to": 2, + "doc_count": 11 + }, + { + "key": "2.0-3.0", + "from": 2, + "to": 3, + "doc_count": 0 + }, + { + "key": "3.0-10.0", + "from": 3, + "to": 10, + "doc_count": 55 + }, + { + "key": "10.0-*", + "from": 10, + "doc_count": 31 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:test not setup] + +[IMPORTANT] +======== +Range aggregation is a bucket aggregation, which partitions documents into buckets rather than calculating metrics over fields like +metrics aggregations do. Each bucket represents a collection of documents which sub-aggregations can run on. +On the other hand, a histogram field is a pre-aggregated field representing multiple values inside a single field: +buckets of numerical data and a count of items/documents for each bucket. This mismatch between the range aggregations expected input +(expecting raw documents) and the histogram field (that provides summary information) limits the outcome of the aggregation +to only the doc counts for each bucket. + + +**Consequently, when executing a range aggregation over a histogram field, no sub-aggregations are allowed.** +======== diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index 221c35020b563..42fec9c9d74ba 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -116,7 +116,7 @@ a bike theft. This is a significant seven-fold increase in frequency and so this The problem with using a query to spot anomalies is it only gives us one subset to use for comparisons. To discover all the other police forces' anomalies we would have to repeat the query for each of the different forces. -This can be a tedious way to look for unusual patterns in an index +This can be a tedious way to look for unusual patterns in an index. @@ -385,6 +385,94 @@ Google normalized distance as described in https://arxiv.org/pdf/cs/0412098v3.pd // NOTCONSOLE `gnd` also accepts the `background_is_superset` parameter. +[role="xpack"] +[[p-value-score]] +===== p-value score + +The p-value is the probability of obtaining test results at least as extreme as +the results actually observed, under the assumption that the null hypothesis is +correct. The p-value is calculated assuming that the foreground set and the +background set are independent +https://en.wikipedia.org/wiki/Bernoulli_trial[Bernoulli trials], with the null +hypothesis that the probabilities are the same. + +====== Example usage + +This example calculates the p-value score for terms `user_agent.version` given +the foreground set of "ended in failure" versus "NOT ended in failure". + +`"background_is_superset": false` indicates that the background set does +not contain the counts of the foreground set as they are filtered out. + +[source,console] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool": { + "filter": [ + { + "term": { + "event.outcome": "failure" + } + }, + { + "range": { + "@timestamp": { + "gte": "2021-02-01", + "lt": "2021-02-04" + } + } + }, + { + "term": { + "service.name": { + "value": "frontend-node" + } + } + } + ] + } + }, + "aggs": { + "failure_p_value": { + "significant_terms": { + "field": "user_agent.version", + "background_filter": { + "bool": { + "must_not": [ + { + "term": { + "event.outcome": "failure" + } + } + ], + "filter": [ + { + "range": { + "@timestamp": { + "gte": "2021-02-01", + "lt": "2021-02-04" + } + } + }, + { + "term": { + "service.name": { + "value": "frontend-node" + } + } + } + ] + } + }, + "p_value": {"background_is_superset": false} + } + } + } +} +-------------------------------------------------- +// TEST[s/_search/_search?size=0/] ===== Percentage A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 2658ab6683d3c..2a192fe33ed0a 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -153,7 +153,7 @@ The response for the above aggregation: [[geocentroid-aggregation-geo-shape]] ==== Geo Centroid Aggregation on `geo_shape` fields -The centroid metric for geo-shapes is more nuanced than for points. The centroid of a specific aggregation bucket +The centroid metric for geoshapes is more nuanced than for points. The centroid of a specific aggregation bucket containing shapes is the centroid of the highest-dimensionality shape type in the bucket. For example, if a bucket contains shapes comprising of polygons and lines, then the lines do not contribute to the centroid metric. Each type of shape's centroid is calculated differently. Envelopes and circles ingested via the <> are treated @@ -233,12 +233,12 @@ POST /places/_search?size=0 .Using `geo_centroid` as a sub-aggregation of `geohash_grid` ==== The <> -aggregation places documents, not individual geo-points, into buckets. If a +aggregation places documents, not individual geopoints, into buckets. If a document's `geo_point` field contains <>, the document -could be assigned to multiple buckets, even if one or more of its geo-points are +could be assigned to multiple buckets, even if one or more of its geopoints are outside the bucket boundaries. If a `geocentroid` sub-aggregation is also used, each centroid is calculated -using all geo-points in a bucket, including those outside the bucket boundaries. +using all geopoints in a bucket, including those outside the bucket boundaries. This can result in centroids outside of bucket boundaries. ==== diff --git a/docs/reference/alias.asciidoc b/docs/reference/alias.asciidoc index 5f2515f2452dd..1e1fd223150b9 100644 --- a/docs/reference/alias.asciidoc +++ b/docs/reference/alias.asciidoc @@ -124,8 +124,8 @@ POST _aliases === Add an alias at index creation You can also use a <> or -<> to add index aliases at index creation. -You cannot use a component or index template to add a data stream alias. +<> to add index or data stream aliases +when they are created. [source,console] ---- @@ -251,7 +251,7 @@ with a write index instead. See === Filter an alias The `filter` option uses <> to limit the documents an alias -can access. Data stream aliases do not support `filter`. +can access. [source,console] ---- diff --git a/docs/reference/analysis/index-search-time.asciidoc b/docs/reference/analysis/index-search-time.asciidoc index 41b922c2e95ca..547829a0435f2 100644 --- a/docs/reference/analysis/index-search-time.asciidoc +++ b/docs/reference/analysis/index-search-time.asciidoc @@ -56,7 +56,7 @@ The user expects this search to match the sentence indexed earlier, However, the query string does not contain the exact words used in the document's original text: -* `quick` vs `QUICK` +* `Quick` vs `QUICK` * `fox` vs `foxes` To account for this, the query string is analyzed using the same analyzer. This diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index c382a7deec36a..304181792cb4f 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -2,81 +2,39 @@ == API conventions The {es} REST APIs are exposed over HTTP. +Except where noted, the following conventions apply across all APIs. -The conventions listed in this chapter can be applied throughout the REST -API, unless otherwise specified. - -* <> -* <> -* <> -* <> -* <> - -[[multi-index]] -=== Multi-target syntax - -Most APIs that accept a ``, ``, or `` request path -parameter also support _multi-target syntax_. - -In multi-target syntax, you can use a comma-separated list to run a request on -multiple resources, such as data streams, indices, or aliases: -`test1,test2,test3`. You can also use {wikipedia}/Glob_(programming)[glob-like] -wildcard (`*`) expressions to target resources that match a pattern: `test*` or -`*test` or `te*t` or `*test*`. - -You can exclude targets using the `-` character: `test*,-test3`. - -IMPORTANT: Aliases are resolved after wildcard expressions. This can result in a -request that targets an excluded alias. For example, if `test3` is an index -alias, the pattern `test*,-test3` still targets the indices for `test3`. To -avoid this, exclude the concrete indices for the alias instead. - -Multi-target APIs that can target indices support the following query -string parameters: - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] - -The defaults settings for the above parameters depend on the API being used. - -Some multi-target APIs that can target indices also support the following query -string parameter: - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] - -NOTE: APIs with a single target, such as the <>, do -not support multi-target syntax. - -[[hidden]] -==== Hidden data streams and indices - -For most APIs, wildcard expressions do not match hidden data streams and indices -by default. To match hidden data streams and indices using a wildcard -expression, you must specify the `expand_wildcards` query parameter. +[discrete] +=== Content-type requirements -You can create hidden data streams by setting `data_stream.hidden` to `true` in -the stream's matching <>. You can hide -indices using the <> index setting. +The type of the content sent in a request body must be specified using +the `Content-Type` header. The value of this header must map to one of +the supported formats that the API supports. Most APIs support JSON, +YAML, CBOR, and SMILE. The bulk and multi-search APIs support NDJSON, +JSON, and SMILE; other types will result in an error response. -The backing indices for data streams are hidden automatically. Some features, -such as {ml}, store information in hidden indices. +When using the `source` query string parameter, the content type must be +specified using the `source_content_type` query string parameter. -Global index templates that match all indices are not applied to hidden indices. +{es} only supports UTF-8-encoded JSON. {es} ignores any other encoding headings +sent with a request. Responses are also UTF-8 encoded. -[[system-indices]] -==== System indices +[discrete] +[[get-requests]] +=== GET and POST requests -{es} modules and plugins can store configuration and state information in internal _system indices_. -You should not directly access or modify system indices -as they contain data essential to the operation of the system. +A number of {es} GET APIs--most notably the search API--support a request body. +While the GET action makes sense in the context of retrieving information, +GET requests with a body are not supported by all HTTP libraries. +All {es} GET APIs that require a body can also be submitted as POST requests. +Alternatively, you can pass the request body as the +<> +when using GET. -IMPORTANT: Direct access to system indices is deprecated and -will no longer be allowed in the next major version. +include::rest-api/cron-expressions.asciidoc[] -[[date-math-index-names]] +[discrete] +[[api-date-math-index-names]] === Date math support in index and index alias names Date math name resolution lets you to search a range of time series indices or @@ -172,345 +130,123 @@ GET /%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogs // TEST[s/^/PUT logstash-2016.09.20\nPUT logstash-2016.09.19\nPUT logstash-2016.09.18\n/] // TEST[s/now/2016.09.20%7C%7C/] -include::rest-api/cron-expressions.asciidoc[] - -[[common-options]] -=== Common options - -The following options can be applied to all of the REST APIs. - -[discrete] -==== Pretty Results - -When appending `?pretty=true` to any request made, the JSON returned -will be pretty formatted (use it for debugging only!). Another option is -to set `?format=yaml` which will cause the result to be returned in the -(sometimes) more readable yaml format. - - [discrete] -==== Human readable output - -Statistics are returned in a format suitable for humans -(e.g. `"exists_time": "1h"` or `"size": "1kb"`) and for computers -(e.g. `"exists_time_in_millis": 3600000` or `"size_in_bytes": 1024`). -The human readable values can be turned off by adding `?human=false` -to the query string. This makes sense when the stats results are -being consumed by a monitoring tool, rather than intended for human -consumption. The default for the `human` flag is -`false`. - -[[date-math]] -[discrete] -==== Date Math - -Most parameters which accept a formatted date value -- such as `gt` and `lt` -in <>, or `from` and `to` -in <> -- understand date maths. - -The expression starts with an anchor date, which can either be `now`, or a -date string ending with `||`. This anchor date can optionally be followed by -one or more maths expressions: - -* `+1h`: Add one hour -* `-1d`: Subtract one day -* `/d`: Round down to the nearest day - -The supported time units differ from those supported by <> for durations. -The supported units are: - -[horizontal] -`y`:: Years -`M`:: Months -`w`:: Weeks -`d`:: Days -`h`:: Hours -`H`:: Hours -`m`:: Minutes -`s`:: Seconds - -Assuming `now` is `2001-01-01 12:00:00`, some examples are: - -[horizontal] -`now+1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 13:00:00` -`now-1h`:: `now` in milliseconds minus one hour. Resolves to: `2001-01-01 11:00:00` -`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00` - `2001.02.01\|\|+1M/d`:: `2001-02-01` in milliseconds plus one month. Resolves to: `2001-03-01 00:00:00` - -[discrete] -[[common-options-response-filtering]] -==== Response Filtering - -All REST APIs accept a `filter_path` parameter that can be used to reduce -the response returned by Elasticsearch. This parameter takes a comma -separated list of filters expressed with the dot notation: - -[source,console] --------------------------------------------------- -GET /_search?q=kimchy&filter_path=took,hits.hits._id,hits.hits._score --------------------------------------------------- -// TEST[setup:my_index] - -Responds: - -[source,console-result] --------------------------------------------------- -{ - "took" : 3, - "hits" : { - "hits" : [ - { - "_id" : "0", - "_score" : 1.6375021 - } - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took" : 3/"took" : $body.took/] -// TESTRESPONSE[s/1.6375021/$body.hits.hits.0._score/] - -It also supports the `*` wildcard character to match any field or part -of a field's name: - -[source,console] --------------------------------------------------- -GET /_cluster/state?filter_path=metadata.indices.*.stat* --------------------------------------------------- -// TEST[s/^/PUT my-index-000001\n/] +[[api-multi-index]] +=== Multi-target syntax -Responds: +Most APIs that accept a ``, ``, or `` request path +parameter also support _multi-target syntax_. -[source,console-result] --------------------------------------------------- -{ - "metadata" : { - "indices" : { - "my-index-000001": {"state": "open"} - } - } -} --------------------------------------------------- +In multi-target syntax, you can use a comma-separated list to run a request on +multiple resources, such as data streams, indices, or aliases: +`test1,test2,test3`. You can also use {wikipedia}/Glob_(programming)[glob-like] +wildcard (`*`) expressions to target resources that match a pattern: `test*` or +`*test` or `te*t` or `*test*`. -And the `**` wildcard can be used to include fields without knowing the -exact path of the field. For example, we can return the Lucene version -of every segment with this request: +You can exclude targets using the `-` character: `test*,-test3`. -[source,console] --------------------------------------------------- -GET /_cluster/state?filter_path=routing_table.indices.**.state --------------------------------------------------- -// TEST[s/^/PUT my-index-000001\n/] +IMPORTANT: Aliases are resolved after wildcard expressions. This can result in a +request that targets an excluded alias. For example, if `test3` is an index +alias, the pattern `test*,-test3` still targets the indices for `test3`. To +avoid this, exclude the concrete indices for the alias instead. -Responds: +Multi-target APIs that can target indices support the following query +string parameters: -[source,console-result] --------------------------------------------------- -{ - "routing_table": { - "indices": { - "my-index-000001": { - "shards": { - "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] - } - } - } - } -} --------------------------------------------------- +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] -It is also possible to exclude one or more fields by prefixing the filter with the char `-`: +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] -[source,console] --------------------------------------------------- -GET /_count?filter_path=-_shards --------------------------------------------------- -// TEST[setup:my_index] +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] -Responds: +The defaults settings for the above parameters depend on the API being used. -[source,console-result] --------------------------------------------------- -{ - "count" : 5 -} --------------------------------------------------- +Some multi-target APIs that can target indices also support the following query +string parameter: -And for more control, both inclusive and exclusive filters can be combined in the same expression. In -this case, the exclusive filters will be applied first and the result will be filtered again using the -inclusive filters: +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -[source,console] --------------------------------------------------- -GET /_cluster/state?filter_path=metadata.indices.*.state,-metadata.indices.logstash-* --------------------------------------------------- -// TEST[s/^/PUT my-index-000001\nPUT my-index-000002\nPUT my-index-000003\nPUT logstash-2016.01\n/] +NOTE: APIs with a single target, such as the <>, do +not support multi-target syntax. -Responds: +[discrete] +[[multi-hidden]] +==== Hidden data streams and indices -[source,console-result] --------------------------------------------------- -{ - "metadata" : { - "indices" : { - "my-index-000001" : {"state" : "open"}, - "my-index-000002" : {"state" : "open"}, - "my-index-000003" : {"state" : "open"} - } - } -} --------------------------------------------------- +For most APIs, wildcard expressions do not match hidden data streams and indices +by default. To match hidden data streams and indices using a wildcard +expression, you must specify the `expand_wildcards` query parameter. -Note that Elasticsearch sometimes returns directly the raw value of a field, -like the `_source` field. If you want to filter `_source` fields, you should -consider combining the already existing `_source` parameter (see -<> for more details) with the `filter_path` -parameter like this: +You can create hidden data streams by setting `data_stream.hidden` to `true` in +the stream's matching <>. You can hide +indices using the <> index setting. -[source,console] --------------------------------------------------- -POST /library/_doc?refresh -{"title": "Book #1", "rating": 200.1} -POST /library/_doc?refresh -{"title": "Book #2", "rating": 1.7} -POST /library/_doc?refresh -{"title": "Book #3", "rating": 0.1} -GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc --------------------------------------------------- - -[source,console-result] --------------------------------------------------- -{ - "hits" : { - "hits" : [ { - "_source":{"title":"Book #1"} - }, { - "_source":{"title":"Book #2"} - }, { - "_source":{"title":"Book #3"} - } ] - } -} --------------------------------------------------- +The backing indices for data streams are hidden automatically. Some features, +such as {ml}, store information in hidden indices. +Global index templates that match all indices are not applied to hidden indices. [discrete] -==== Flat Settings +[[system-indices]] +==== System indices -The `flat_settings` flag affects rendering of the lists of settings. When the -`flat_settings` flag is `true`, settings are returned in a flat format: +{es} modules and plugins can store configuration and state information in internal _system indices_. +You should not directly access or modify system indices +as they contain data essential to the operation of the system. -[source,console] --------------------------------------------------- -GET my-index-000001/_settings?flat_settings=true --------------------------------------------------- -// TEST[setup:my_index] +IMPORTANT: Direct access to system indices is deprecated and +will no longer be allowed in the next major version. -Returns: +[discrete] +[[api-conventions-parameters]] +=== Parameters -[source,console-result] --------------------------------------------------- -{ - "my-index-000001" : { - "settings": { - "index.number_of_replicas": "1", - "index.number_of_shards": "1", - "index.creation_date": "1474389951325", - "index.uuid": "n6gzFZTgS664GUfx0Xrpjw", - "index.version.created": ..., - "index.routing.allocation.include._tier_preference" : "data_content", - "index.provided_name" : "my-index-000001" - } - } -} --------------------------------------------------- -// TESTRESPONSE[s/1474389951325/$body.my-index-000001.settings.index\\\\.creation_date/] -// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.my-index-000001.settings.index\\\\.uuid/] -// TESTRESPONSE[s/"index.version.created": \.\.\./"index.version.created": $body.my-index-000001.settings.index\\\\.version\\\\.created/] +Rest parameters (when using HTTP, map to HTTP URL parameters) follow the +convention of using underscore casing. -When the `flat_settings` flag is `false`, settings are returned in a more -human readable structured format: +[discrete] +[[api-request-body-query-string]] +=== Request body in query string -[source,console] --------------------------------------------------- -GET my-index-000001/_settings?flat_settings=false --------------------------------------------------- -// TEST[setup:my_index] +For libraries that don't accept a request body for non-POST requests, +you can pass the request body as the `source` query string parameter +instead. When using this method, the `source_content_type` parameter +should also be passed with a media type value that indicates the format +of the source, such as `application/json`. -Returns: +[discrete] +[[api-url-access-control]] +=== URL-based access control -[source,console-result] --------------------------------------------------- -{ - "my-index-000001" : { - "settings" : { - "index" : { - "number_of_replicas": "1", - "number_of_shards": "1", - "creation_date": "1474389951325", - "uuid": "n6gzFZTgS664GUfx0Xrpjw", - "version": { - "created": ... - }, - "routing": { - "allocation": { - "include": { - "_tier_preference": "data_content" - } - } - }, - "provided_name" : "my-index-000001" - } - } - } -} --------------------------------------------------- -// TESTRESPONSE[s/1474389951325/$body.my-index-000001.settings.index.creation_date/] -// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.my-index-000001.settings.index.uuid/] -// TESTRESPONSE[s/"created": \.\.\./"created": $body.my-index-000001.settings.index.version.created/] +Many users use a proxy with URL-based access control to secure access to +{es} data streams and indices. For <>, +<>, and <> requests, the user has +the choice of specifying a data stream or index in the URL and on each individual request +within the request body. This can make URL-based access control challenging. -By default `flat_settings` is set to `false`. +To prevent the user from overriding the data stream or index specified in the +URL, set `rest.action.multi.allow_explicit_index` to `false` in `elasticsearch.yml`. -[discrete] -[[api-conventions-parameters]] -==== Parameters -Rest parameters (when using HTTP, map to HTTP URL parameters) follow the -convention of using underscore casing. +This causes {es} to +reject requests that explicitly specify a data stream or index in the request body. [discrete] -==== Boolean Values +=== Boolean Values All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. [discrete] -==== Number Values +=== Number Values All REST APIs support providing numbered parameters as `string` on top of supporting the native JSON number types. -[[time-units]] -[discrete] -==== Time units - -Whenever durations need to be specified, e.g. for a `timeout` parameter, the duration must specify -the unit, like `2d` for 2 days. The supported units are: - -[horizontal] -`d`:: Days -`h`:: Hours -`m`:: Minutes -`s`:: Seconds -`ms`:: Milliseconds -`micros`:: Microseconds -`nanos`:: Nanoseconds - [[byte-units]] [discrete] -==== Byte size units +=== Byte size units Whenever the byte size of data needs to be specified, e.g. when setting a buffer size parameter, the value must specify the unit, like `10kb` for 10 kilobytes. Note that @@ -524,25 +260,9 @@ these units use powers of 1024, so `1kb` means 1024 bytes. The supported units a `tb`:: Terabytes `pb`:: Petabytes -[[size-units]] -[discrete] -==== Unit-less quantities - -Unit-less quantities means that they don't have a "unit" like "bytes" or "Hertz" or "meter" or "long tonne". - -If one of these quantities is large we'll print it out like 10m for 10,000,000 or 7k for 7,000. We'll still print 87 -when we mean 87 though. These are the supported multipliers: - -[horizontal] -`k`:: Kilo -`m`:: Mega -`g`:: Giga -`t`:: Tera -`p`:: Peta - [[distance-units]] [discrete] -==== Distance Units +=== Distance Units Wherever distances need to be specified, such as the `distance` parameter in the <>), the default unit is meters if none is specified. @@ -562,151 +282,37 @@ Centimeter:: `cm` or `centimeters` Millimeter:: `mm` or `millimeters` Nautical mile:: `NM`, `nmi`, or `nauticalmiles` -[[fuzziness]] [discrete] -==== Fuzziness - -Some queries and APIs support parameters to allow inexact _fuzzy_ matching, -using the `fuzziness` parameter. - -When querying `text` or `keyword` fields, `fuzziness` is interpreted as a -{wikipedia}/Levenshtein_distance[Levenshtein Edit Distance] --- the number of one character changes that need to be made to one string to -make it the same as another string. +[[time-units]] +=== Time units -The `fuzziness` parameter can be specified as: +Whenever durations need to be specified, e.g. for a `timeout` parameter, the duration must specify +the unit, like `2d` for 2 days. The supported units are: [horizontal] -`0`, `1`, `2`:: - -The maximum allowed Levenshtein Edit Distance (or number of edits) - -`AUTO`:: -+ --- -Generates an edit distance based on the length of the term. -Low and high distance arguments may be optionally provided `AUTO:[low],[high]`. If not specified, -the default values are 3 and 6, equivalent to `AUTO:3,6` that make for lengths: - -`0..2`:: Must match exactly -`3..5`:: One edit allowed -`>5`:: Two edits allowed - -`AUTO` should generally be the preferred value for `fuzziness`. --- - -[discrete] -[[common-options-error-options]] -==== Enabling stack traces - -By default when a request returns an error Elasticsearch doesn't include the -stack trace of the error. You can enable that behavior by setting the -`error_trace` url parameter to `true`. For example, by default when you send an -invalid `size` parameter to the `_search` API: - -[source,console] ----------------------------------------------------------------------- -POST /my-index-000001/_search?size=surprise_me ----------------------------------------------------------------------- -// TEST[s/surprise_me/surprise_me&error_trace=false/ catch:bad_request] -// Since the test system sends error_trace=true by default we have to override - -The response looks like: - -[source,console-result] ----------------------------------------------------------------------- -{ - "error" : { - "root_cause" : [ - { - "type" : "illegal_argument_exception", - "reason" : "Failed to parse int parameter [size] with value [surprise_me]" - } - ], - "type" : "illegal_argument_exception", - "reason" : "Failed to parse int parameter [size] with value [surprise_me]", - "caused_by" : { - "type" : "number_format_exception", - "reason" : "For input string: \"surprise_me\"" - } - }, - "status" : 400 -} ----------------------------------------------------------------------- - -But if you set `error_trace=true`: - -[source,console] ----------------------------------------------------------------------- -POST /my-index-000001/_search?size=surprise_me&error_trace=true ----------------------------------------------------------------------- -// TEST[catch:bad_request] - -The response looks like: - -[source,console-result] ----------------------------------------------------------------------- -{ - "error": { - "root_cause": [ - { - "type": "illegal_argument_exception", - "reason": "Failed to parse int parameter [size] with value [surprise_me]", - "stack_trace": "Failed to parse int parameter [size] with value [surprise_me]]; nested: IllegalArgumentException..." - } - ], - "type": "illegal_argument_exception", - "reason": "Failed to parse int parameter [size] with value [surprise_me]", - "stack_trace": "java.lang.IllegalArgumentException: Failed to parse int parameter [size] with value [surprise_me]\n at org.elasticsearch.rest.RestRequest.paramAsInt(RestRequest.java:175)...", - "caused_by": { - "type": "number_format_exception", - "reason": "For input string: \"surprise_me\"", - "stack_trace": "java.lang.NumberFormatException: For input string: \"surprise_me\"\n at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)..." - } - }, - "status": 400 -} ----------------------------------------------------------------------- -// TESTRESPONSE[s/"stack_trace": "Failed to parse int parameter.+\.\.\."/"stack_trace": $body.error.root_cause.0.stack_trace/] -// TESTRESPONSE[s/"stack_trace": "java.lang.IllegalArgum.+\.\.\."/"stack_trace": $body.error.stack_trace/] -// TESTRESPONSE[s/"stack_trace": "java.lang.Number.+\.\.\."/"stack_trace": $body.error.caused_by.stack_trace/] - -[discrete] -==== Request body in query string - -For libraries that don't accept a request body for non-POST requests, -you can pass the request body as the `source` query string parameter -instead. When using this method, the `source_content_type` parameter -should also be passed with a media type value that indicates the format -of the source, such as `application/json`. +`d`:: Days +`h`:: Hours +`m`:: Minutes +`s`:: Seconds +`ms`:: Milliseconds +`micros`:: Microseconds +`nanos`:: Nanoseconds +[[size-units]] [discrete] -==== Content-type requirements +=== Unit-less quantities -The type of the content sent in a request body must be specified using -the `Content-Type` header. The value of this header must map to one of -the supported formats that the API supports. Most APIs support JSON, -YAML, CBOR, and SMILE. The bulk and multi-search APIs support NDJSON, -JSON, and SMILE; other types will result in an error response. - -When using the `source` query string parameter, the content type must be -specified using the `source_content_type` query string parameter. - -{es} only supports UTF-8-encoded JSON. {es} ignores any other encoding headings -sent with a request. Responses are also UTF-8 encoded. +Unit-less quantities means that they don't have a "unit" like "bytes" or "Hertz" or "meter" or "long tonne". -[[url-access-control]] -=== URL-based access control +If one of these quantities is large we'll print it out like 10m for 10,000,000 or 7k for 7,000. We'll still print 87 +when we mean 87 though. These are the supported multipliers: -Many users use a proxy with URL-based access control to secure access to -{es} data streams and indices. For <>, -<>, and <> requests, the user has -the choice of specifying a data stream or index in the URL and on each individual request -within the request body. This can make URL-based access control challenging. +[horizontal] +`k`:: Kilo +`m`:: Mega +`g`:: Giga +`t`:: Tera +`p`:: Peta -To prevent the user from overriding the data stream or index specified in the -URL, set `rest.action.multi.allow_explicit_index` to `false` in `elasticsearch.yml`. -This causes {es} to -reject requests that explicitly specify a data stream or index in the request body. diff --git a/docs/reference/autoscaling/autoscaling-deciders.asciidoc b/docs/reference/autoscaling/autoscaling-deciders.asciidoc index 67adff7fbdf3d..bad2b37c015c7 100644 --- a/docs/reference/autoscaling/autoscaling-deciders.asciidoc +++ b/docs/reference/autoscaling/autoscaling-deciders.asciidoc @@ -20,6 +20,10 @@ Estimates required storage capacity as a percentage of the total data set of partially mounted indices. Available for policies governing frozen data nodes. +<>:: +Estimates a minimum require frozen memory and storage capacity when any index is +in the frozen <> phase. + <>:: Estimates required memory capacity based on machine learning jobs. Available for policies governing machine learning nodes. @@ -31,5 +35,6 @@ include::deciders/reactive-storage-decider.asciidoc[] include::deciders/proactive-storage-decider.asciidoc[] include::deciders/frozen-shards-decider.asciidoc[] include::deciders/frozen-storage-decider.asciidoc[] +include::deciders/frozen-existence-decider.asciidoc[] include::deciders/machine-learning-decider.asciidoc[] include::deciders/fixed-decider.asciidoc[] diff --git a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc new file mode 100644 index 0000000000000..832cf330053aa --- /dev/null +++ b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc @@ -0,0 +1,9 @@ +[role="xpack"] +[[autoscaling-frozen-existence-decider]] +=== Frozen existence decider + +The frozen existence decider (`frozen_existence`) ensures that once the first +index enters the frozen ILM phase, the frozen tier is scaled into existence. + +The frozen existence decider is enabled for all policies governing frozen data +nodes and has no configuration options. diff --git a/docs/reference/cat/anomaly-detectors.asciidoc b/docs/reference/cat/anomaly-detectors.asciidoc index 33c5016787cbd..0f274beb7ab24 100644 --- a/docs/reference/cat/anomaly-detectors.asciidoc +++ b/docs/reference/cat/anomaly-detectors.asciidoc @@ -26,10 +26,11 @@ Returns configuration and usage information about {anomaly-jobs}. [[cat-anomaly-detectors-desc]] ==== {api-description-title} -See {ml-docs}/ml-jobs.html[{anomaly-jobs-cap}]. - NOTE: This API returns a maximum of 10,000 jobs. +For more information about {anomaly-detect}, see +{ml-docs}/ml-ad-finding-anomalies.html[Finding anomalies]. + [[cat-anomaly-detectors-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 9ebb0a5af3408..718d9fb6f09a8 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -127,7 +127,7 @@ The API returns the following response: ["source","txt",subs="attributes,callouts"] -------------------------------------------------- index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound -test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true {lucene_version} true +test1 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true {lucene_version} true -------------------------------------------------- // TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ non_json] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index c3017d74bd8b1..b546d398bae7b 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -94,7 +94,7 @@ NOTE: This is only an estimate and does not account for compression if enabled. `failed_read_requests`:: (long) The number of failed reads. -failed_write_requests`:: +`failed_write_requests`:: (long) The number of failed bulk write requests executed on the follower. `follower_aliases_version`:: @@ -134,7 +134,7 @@ task. `operations_read`:: (long) The total number of operations read from the leader. -operations_written`:: +`operations_written`:: (long) The number of operations written on the follower. `outstanding_read_requests`:: @@ -195,7 +195,7 @@ sent to the leader to the time a reply was returned to the follower. `write_buffer_operation_count`:: (integer) The number of write operations queued on the follower. -write_buffer_size_in_bytes`:: +`write_buffer_size_in_bytes`:: (long) The total number of bytes of operations currently queued for writing. ===== //End shards diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index b0fc931386183..67144f464775b 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -68,8 +68,6 @@ TIP: If you want to replicate data across clusters in the cloud, you can link:{cloud}/ec-enable-ccs.html[configure remote clusters on {ess}]. Then, you can <> and set up {ccr}. -video::https://static-www.elastic.co/v3/assets/bltefdd0b53724fa2ce/blt994089f5e841ad69/5f6265de6f40ab4648b5cf9b/ccr-setup-video-edited.mp4[width=700, height=500, options="autoplay,loop"] - [[ccr-getting-started-prerequisites]] ==== Prerequisites To complete this tutorial, you need: @@ -99,12 +97,10 @@ image::images/ccr-tutorial-clusters.png[ClusterA contains the leader index and C To configure a remote cluster from Stack Management in {kib}: . Select *Remote Clusters* from the side navigation. -. Specify the IP address or host name of the remote cluster (`ClusterA`), -followed by the transport port of the remote cluster (defaults to `9300`). For -example, `192.168.1.1:9300`. - -[role="screenshot"] -image::images/ccr-add-remote-cluster.png["The Add remote clusters page in {kib}"] +. Specify the {es} endpoint URL, or the IP address or host name of the remote +cluster (`ClusterA`) followed by the transport port (defaults to `9300`). For +example, `cluster.es.eastus2.staging.azure.foundit.no:9400` or +`192.168.1.1:9300`. [%collapsible] .API example @@ -193,8 +189,6 @@ replicate. `kibana_sample_data_ecommerce` if you are following the tutorial. . Enter a name for your follower index, such as `follower-kibana-sample-data`. -image::images/ccr-add-follower-index.png["Adding a follower index named server-metrics in {kib}"] - {es} initializes the follower using the <> process, which transfers the existing Lucene segment files from the leader diff --git a/docs/reference/ccr/images/ccr-add-follower-index.png b/docs/reference/ccr/images/ccr-add-follower-index.png deleted file mode 100644 index c61ff96776914..0000000000000 Binary files a/docs/reference/ccr/images/ccr-add-follower-index.png and /dev/null differ diff --git a/docs/reference/ccr/images/ccr-add-remote-cluster.png b/docs/reference/ccr/images/ccr-add-remote-cluster.png deleted file mode 100644 index c781b86df44cb..0000000000000 Binary files a/docs/reference/ccr/images/ccr-add-remote-cluster.png and /dev/null differ diff --git a/docs/reference/ccr/managing.asciidoc b/docs/reference/ccr/managing.asciidoc index c1781508c61b2..3124630899d7c 100644 --- a/docs/reference/ccr/managing.asciidoc +++ b/docs/reference/ccr/managing.asciidoc @@ -80,9 +80,6 @@ To recreate a follower index, <> and choose the *Follower indices* tab. -[role="screenshot"] -image::images/ccr-follower-index.png["The Cross-Cluster Replication page in {kib}"] - Select the follower index and pause replication. When the follower index status changes to Paused, reselect the follower index and choose to unfollow the leader index. diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 0a633039c4185..36ef95a0d79da 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -25,6 +25,8 @@ GET _cluster/allocation/explain `GET _cluster/allocation/explain` +`POST _cluster/allocation/explain` + [[cluster-allocation-explain-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 0a764b6095ff1..033f202169985 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -959,6 +959,18 @@ Time by which recovery operations were delayed due to throttling. Time in milliseconds recovery operations were delayed due to throttling. ======= + +`shards`:: +(object) +Contains statistics about all shards assigned to the node. ++ +.Properties of `shards` +[%collapsible%open] +======= +`total_count`:: +(integer) +The total number of shards assigned to the node. +======= ====== [[cluster-nodes-stats-api-response-body-os]] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 97f14c9122214..e9066542b0157 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1317,19 +1317,19 @@ The API returns the following response: "segments": { "count": 4, "memory": "8.6kb", - "memory_in_bytes": 8898, - "terms_memory": "6.3kb", - "terms_memory_in_bytes": 6522, - "stored_fields_memory": "1.2kb", - "stored_fields_memory_in_bytes": 1248, + "memory_in_bytes": 0, + "terms_memory": "0b", + "terms_memory_in_bytes": 0, + "stored_fields_memory": "0b", + "stored_fields_memory_in_bytes": 0, "term_vectors_memory": "0b", "term_vectors_memory_in_bytes": 0, - "norms_memory": "384b", - "norms_memory_in_bytes": 384, + "norms_memory": "0b", + "norms_memory_in_bytes": 0, "points_memory" : "0b", "points_memory_in_bytes" : 0, - "doc_values_memory": "744b", - "doc_values_memory_in_bytes": 744, + "doc_values_memory": "0b", + "doc_values_memory_in_bytes": 0, "index_writer_memory": "0b", "index_writer_memory_in_bytes": 0, "version_map_memory": "0b", diff --git a/docs/reference/commands/create-enrollment-token.asciidoc b/docs/reference/commands/create-enrollment-token.asciidoc new file mode 100644 index 0000000000000..4fd95f1b7bef4 --- /dev/null +++ b/docs/reference/commands/create-enrollment-token.asciidoc @@ -0,0 +1,59 @@ +[roles="xpack"] +[[create-enrollment-token]] + +== elasticsearch-create-enrollment-token + +The `elasticsearch-create-enrollment-token` command creates enrollment tokens for +{es} nodes and {kib} instances. + +[discrete] +=== Synopsis + +[source,shell] +---- +bin/elasticsearch-create-enrollment-token +[-f, --force] [-h, --help] [-E ] [-s, --scope] +---- + +[discrete] +=== Description + +Use this command to create enrollment tokens, which you can use to enroll new +{es} nodes to an existing cluster or configure {kib} instances to communicate +with an existing {es} cluster that has security features enabled. +The command generates (and subsequently removes) a temporary user in the +<> to run the request that creates enrollment tokens. +IMPORTANT: You cannot use this tool if the file realm is disabled in your +`elasticsearch.yml` file. + +This command uses an HTTP connection to connect to the cluster and run the user +management requests. The command automatically attempts to establish the connection +over HTTPS by using the `xpack.security.http.ssl` settings in +the `elasticsearch.yml` file. If you do not use the default configuration directory, +ensure that the `ES_PATH_CONF` environment variable returns the +correct path before you run the `elasticsearch-create-enrollment-token` command. You can +override settings in your `elasticsearch.yml` file by using the `-E` command +option. For more information about debugging connection failures, see +<>. + +[discrete] +[[create-enrollment-token-parameters]] +=== Parameters + +`-E `:: Configures a standard {es} or {xpack} setting. + +`-f, --force`:: Forces the command to run against an unhealthy cluster. + +`-h, --help`:: Returns all of the command parameters. + +`-s, --scope`:: Specifies the scope of the generated token. Supported values are `node` and `kibana`. + +[discrete] +=== Examples + +The following command creates an enrollment token for enrolling an {es} node into a cluster: + +[source,shell] +---- +bin/elasticsearch-create-enrollment-token -s node +---- diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 270667add876c..086ee00f49690 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -9,9 +9,11 @@ tasks from the command line: * <> * <> +* <> * <> * <> * <> +* <> * <> * <> * <> @@ -22,9 +24,11 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] +include::create-enrollment-token.asciidoc[] include::croneval.asciidoc[] include::keystore.asciidoc[] include::node-tool.asciidoc[] +include::reset-elastic-password.asciidoc[] include::saml-metadata.asciidoc[] include::service-tokens-command.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/docs/reference/commands/reset-elastic-password.asciidoc b/docs/reference/commands/reset-elastic-password.asciidoc new file mode 100644 index 0000000000000..a440a43844fc9 --- /dev/null +++ b/docs/reference/commands/reset-elastic-password.asciidoc @@ -0,0 +1,63 @@ +[roles="xpack"] +[[reset-elastic-password]] +== elasticsearch-reset-elastic-password + +The `elasticsearch-reset-elastic-password` command resets the password for the +`elastic` <>. + +[discrete] +=== Synopsis + +[source,shell] +---- +bin/elasticsearch-reset-elastic-password +[-a, --auto] [-b, --batch] [-E > to run the request +that changes the `elastic` user password. +IMPORTANT: You cannot use this tool if the file realm is disabled in your `elasticsearch.yml` file. + +This command uses an HTTP connection to connect to the cluster and run the user +management requests. The command automatically attempts to establish the connection +over HTTPS by using the `xpack.security.http.ssl` settings in +the `elasticsearch.yml` file. If you do not use the default config directory +location, ensure that the `ES_PATH_CONF` environment variable returns the +correct path before you run the `elasticsearch-reset-elastic-password` command. You can +override settings in your `elasticsearch.yml` file by using the `-E` command +option. For more information about debugging connection failures, see +<>. + +[discrete] +[[reset-elastic-password-parameters]] +=== Parameters + +`-a, --auto`:: Resets the password of the `elastic` user to an auto-generated strong password. (Default) + +`-b, --batch`:: Runs the reset password process without prompting the user for verification. + +`-E `:: Configures a standard {es} or {xpack} setting. + +`-f, --force`:: Forces the command to run against an unhealthy cluster. + +`-h, --help`:: Returns all of the command parameters. + +`-i, --interactive`:: Prompts the user for the password of the `elastic` user. Use this option to explicitly set a password. + +[discrete] +=== Examples + +The following example resets the password of the `elastic` user to an auto-generated value and +prints the new password in the console. + +[source,shell] +---- +bin/elasticsearch-reset-elastic-password +---- diff --git a/docs/reference/commands/service-tokens-command.asciidoc b/docs/reference/commands/service-tokens-command.asciidoc index 417767db46cf7..7959a1149c808 100644 --- a/docs/reference/commands/service-tokens-command.asciidoc +++ b/docs/reference/commands/service-tokens-command.asciidoc @@ -3,8 +3,6 @@ [[service-tokens-command]] == elasticsearch-service-tokens -beta::[] - Use the `elasticsearch-service-tokens` command to create, list, and delete file-based service account tokens. [discrete] diff --git a/docs/reference/data-rollup-transform.asciidoc b/docs/reference/data-rollup-transform.asciidoc index e0d5d702c835e..3a7b3bedc7765 100644 --- a/docs/reference/data-rollup-transform.asciidoc +++ b/docs/reference/data-rollup-transform.asciidoc @@ -6,23 +6,10 @@ {es} offers the following methods for manipulating your data: -ifdef::permanently-unreleased-branch[] - -* <> -+ -A rollup aggregates an index's time series data and stores the results in new -read-only index. For example, you can roll up hourly data into daily or weekly -summaries. - -endif::[] -ifndef::permanently-unreleased-branch[] - * <> + include::rollup/index.asciidoc[tag=rollup-intro] -endif::[] - * <> + include::transform/transforms.asciidoc[tag=transform-intro] diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 7f6f8cc9b1342..b17b47120a41a 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -320,6 +320,10 @@ PUT /my-data-stream/_settings ---- -- +IMPORTANT: To change the `index.lifecycle.name` setting, first use the +<> to remove the existing {ilm-init} +policy. See <>. + [discrete] [[change-static-index-setting-for-a-data-stream]] === Change a static index setting for a data stream diff --git a/docs/reference/datatiers.asciidoc b/docs/reference/datatiers.asciidoc index 3611cc10a850c..d64daba307706 100644 --- a/docs/reference/datatiers.asciidoc +++ b/docs/reference/datatiers.asciidoc @@ -44,7 +44,8 @@ While they are also responsible for indexing, content data is generally not inge as time series data such as logs and metrics. From a resiliency perspective the indices in this tier should be configured to use one or more replicas. -New indices are automatically allocated to the <> unless they are part of a data stream. +The content tier is required. System indices and other indices that aren't part +of a data stream are automatically allocated to the content tier. [discrete] [[hot-tier]] @@ -56,8 +57,8 @@ Nodes in the hot tier need to be fast for both reads and writes, which requires more hardware resources and faster storage (SSDs). For resiliency, indices in the hot tier should be configured to use one or more replicas. -New indices that are part of a <> are automatically allocated to the -hot tier. +The hot tier is required. New indices that are part of a <> are automatically allocated to the hot tier. [discrete] [[warm-tier]] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index c967c0c573e49..782c0896900f8 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -89,9 +89,9 @@ has the same semantics as the standard delete API. [NOTE] ==== The final line of data must end with a newline character `\n`. -Each newline character may be preceded by a carriage return `\r`. -When sending requests to the `_bulk` endpoint, -the `Content-Type` header should be set to `application/x-ndjson`. +Each newline character may be preceded by a carriage return `\r`. +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of +`application/json` or `application/x-ndjson`. ==== Because this format uses literal `\n`'s as delimiters, diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 689f89b814dac..9a719e2fcfcc9 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -209,7 +209,9 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=search-q] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=request_cache] -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=refresh] +`refresh`:: +(Optional, Boolean) If `true`, {es} refreshes all shards involved in the +delete by query after the request completes. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=requests_per_second] diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index e0aea7cfd6895..d0d0db332eeef 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -167,9 +167,8 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=preference] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=realtime] `refresh`:: -(Optional, Boolean) -If `true`, {es} refreshes the affected shards to make this operation visible to -search. If `false`, do nothing with refreshes. Defaults to `false`. +(Optional, Boolean) If `true`, the request refreshes the relevant shard before +retrieving the document. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=routing] diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 7189d74352207..ff839d4a662ff 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -68,7 +68,9 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=preference] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=realtime] -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=refresh] +`refresh`:: +(Optional, Boolean) If `true`, the request refreshes relevant shards before +retrieving documents. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=routing] @@ -98,7 +100,7 @@ document: The index that contains the document. Required if no index is specified in the request URI. -`_routing`:: +`routing`:: (Optional, string) The key for the primary shard the document resides on. Required if routing is used during indexing. diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index d66f3561434e4..b5fc7abfbf418 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -221,9 +221,11 @@ Reindex supports <> to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. +// tag::remote-reindex-slicing[] NOTE: Reindexing from remote clusters does not support -<> or -<>. +<> or <>. +// end::remote-reindex-slicing[] [[docs-reindex-manual-slice]] ====== Manual slicing @@ -464,7 +466,9 @@ POST _reindex [[docs-reindex-api-query-params]] ==== {api-query-parms-title} -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=refresh] +`refresh`:: +(Optional, Boolean) If `true`, the request refreshes affected shards to make +this operation visible to search. Defaults to `false`. `timeout`:: + @@ -1014,9 +1018,7 @@ example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch the `query` parameter is sent directly to the remote host without validation or modification. -NOTE: Reindexing from remote clusters does not support -<> or -<>. +include::{es-ref-dir}/docs/reindex.asciidoc[tag=remote-reindex-slicing] Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index cffbe2037c9e7..dfe074fc2c87b 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -102,19 +102,20 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards First, let's index a simple doc: [source,console] --------------------------------------------------- +---- PUT test/_doc/1 { "counter" : 1, "tags" : ["red"] } --------------------------------------------------- +---- +// TESTSETUP To increment the counter, you can submit an update request with the following script: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script" : { @@ -125,14 +126,13 @@ POST test/_update/1 } } } --------------------------------------------------- -// TEST[continued] +---- Similarly, you could use and update script to add a tag to the list of tags (this is just a list, so the tag is added even it exists): [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script": { @@ -143,8 +143,7 @@ POST test/_update/1 } } } --------------------------------------------------- -// TEST[continued] +---- You could also remove a tag from the list of tags. The Painless function to `remove` a tag takes the array index of the element @@ -153,7 +152,7 @@ make sure the tag exists. If the list contains duplicates of the tag, this script just removes one occurrence. [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script": { @@ -164,30 +163,51 @@ POST test/_update/1 } } } --------------------------------------------------- -// TEST[continued] +---- You can also add and remove fields from a document. For example, this script adds the field `new_field`: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script" : "ctx._source.new_field = 'value_of_new_field'" } --------------------------------------------------- -// TEST[continued] +---- Conversely, this script removes the field `new_field`: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script" : "ctx._source.remove('new_field')" } --------------------------------------------------- +---- +// TEST[continued] + +The following script removes a subfield from an object field: + +//// +[source,console] +---- +PUT test/_doc/1?refresh +{ + "my-object": { + "my-subfield": true + } +} +---- +//// + +[source,console] +---- +POST test/_update/1 +{ + "script": "ctx._source['my-object'].remove('my-subfield')" +} +---- // TEST[continued] Instead of updating the document, you can also change the operation that is @@ -195,7 +215,7 @@ executed from within the script. For example, this request deletes the doc if the `tags` field contains `green`, otherwise it does nothing (`noop`): [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script": { @@ -206,8 +226,7 @@ POST test/_update/1 } } } --------------------------------------------------- -// TEST[continued] +---- [discrete] ===== Update part of a document @@ -216,15 +235,14 @@ The following partial update adds a new field to the existing document: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "doc": { "name": "new_name" } } --------------------------------------------------- -// TEST[continued] +---- If both `doc` and `script` are specified, then `doc` is ignored. If you specify a scripted update, include the fields you want to update in the script. @@ -236,21 +254,21 @@ By default updates that don't change anything detect that they don't change anything and return `"result": "noop"`: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "doc": { "name": "new_name" } } --------------------------------------------------- +---- // TEST[continued] If the value of `name` is already `new_name`, the update request is ignored and the `result` element in the response returns `noop`: [source,console-result] --------------------------------------------------- +---- { "_shards": { "total": 0, @@ -259,17 +277,17 @@ request is ignored and the `result` element in the response returns `noop`: }, "_index": "test", "_id": "1", - "_version": 7, + "_version": 2, "_primary_term": 1, - "_seq_no": 6, + "_seq_no": 1, "result": "noop" } --------------------------------------------------- +---- You can disable this behavior by setting `"detect_noop": false`: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "doc": { @@ -277,8 +295,7 @@ POST test/_update/1 }, "detect_noop": false } --------------------------------------------------- -// TEST[continued] +---- [[upserts]] [discrete] @@ -289,7 +306,7 @@ are inserted as a new document. If the document exists, the `script` is executed: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "script": { @@ -303,8 +320,7 @@ POST test/_update/1 "counter": 1 } } --------------------------------------------------- -// TEST[continued] +---- [discrete] [[scripted_upsert]] @@ -314,7 +330,7 @@ To run the script whether or not the document exists, set `scripted_upsert` to `true`: [source,console] --------------------------------------------------- +---- POST sessions/_update/dh3sgudg8gsrgl { "scripted_upsert": true, @@ -330,9 +346,8 @@ POST sessions/_update/dh3sgudg8gsrgl }, "upsert": {} } --------------------------------------------------- +---- // TEST[s/"id": "my_web_session_summariser"/"source": "ctx._source.page_view_event = params.pageViewEvent"/] -// TEST[continued] [discrete] [[doc_as_upsert]] @@ -343,7 +358,7 @@ Instead of sending a partial `doc` plus an `upsert` doc, you can set value: [source,console] --------------------------------------------------- +---- POST test/_update/1 { "doc": { @@ -351,8 +366,8 @@ POST test/_update/1 }, "doc_as_upsert": true } --------------------------------------------------- -// TEST[continued] +---- + [NOTE] ==== Using <> with `doc_as_upsert` is not supported. diff --git a/docs/reference/eql/delete-async-eql-search-api.asciidoc b/docs/reference/eql/delete-async-eql-search-api.asciidoc index ba907cc7d3117..83d57104bbc09 100644 --- a/docs/reference/eql/delete-async-eql-search-api.asciidoc +++ b/docs/reference/eql/delete-async-eql-search-api.asciidoc @@ -25,8 +25,11 @@ DELETE /_eql/search/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZM [[delete-async-eql-search-api-prereqs]] ==== {api-prereq-title} -* If the {es} {security-features} are enabled, only the user who first submitted -the EQL search can delete the search using this API. +* If the {es} {security-features} are enabled, only the following users can +use this API to delete a search: + +** Users with the `cancel_task` <> +** The user who first submitted the search * See <>. diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc new file mode 100644 index 0000000000000..d3f591ccfe6c1 --- /dev/null +++ b/docs/reference/eql/eql-apis.asciidoc @@ -0,0 +1,19 @@ +[[eql-apis]] +== EQL APIs + +Event Query Language (EQL) is a query language for event-based time series data, +such as logs, metrics, and traces. For an overview of EQL and related tutorials, +see <>. + +* <> +* <> +* <> +* <> + +include::delete-async-eql-search-api.asciidoc[] + +include::eql-search-api.asciidoc[] + +include::get-async-eql-search-api.asciidoc[] + +include::get-async-eql-status-api.asciidoc[] diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc index 3e77158ec8d0b..2fc2d46fefe9f 100644 --- a/docs/reference/eql/eql-search-api.asciidoc +++ b/docs/reference/eql/eql-search-api.asciidoc @@ -48,6 +48,9 @@ or alias. * See <>. +* experimental:[] For cross-cluster search, the local and remote clusters must +use the same {es} version. For security, see <>. + [[eql-search-api-limitations]] ===== Limitations @@ -60,6 +63,9 @@ See <>. (Required, string) Comma-separated list of data streams, indices, or aliases used to limit the request. Supports wildcards (`*`). To search all data streams and indices, use `*` or `_all`. ++ +experimental:[] To search a remote cluster, use the `:` syntax. +See <>. [[eql-search-api-query-params]] ==== {api-query-parms-title} @@ -201,22 +207,7 @@ You can specify items in the array as a string or object. .Properties of `fields` objects [%collapsible%open] ==== -`field`:: -(Required, string) -Wildcard pattern. The request returns values for field names matching this -pattern. - -`format`:: -(Optional, string) -Format in which the values are returned. -+ -<> and <> fields accept a -<>. <> -accept either `geojson` for http://www.geojson.org[GeoJSON] (the default) or -`wkt` for {wikipedia}/Well-known_text_representation_of_geometry[Well Known -Text]. -+ -For other field data types, this parameter is not supported. +include::{es-repo-dir}/search/search.asciidoc[tag=fields-api-props] ==== `filter`:: @@ -293,6 +284,7 @@ change the sort order of hits in the response. include::{es-repo-dir}/search/search.asciidoc[tag=runtime-mappings-def] +[[eql-search-api-params-size]] `size`:: (Optional, integer or float) For <>, the maximum number of matching events to diff --git a/docs/reference/eql/eql.asciidoc b/docs/reference/eql/eql.asciidoc index e23891e79cae3..0760f8cf4f05d 100644 --- a/docs/reference/eql/eql.asciidoc +++ b/docs/reference/eql/eql.asciidoc @@ -600,8 +600,7 @@ GET /my-data-stream/_eql/search By default, EQL search requests are synchronous and wait for complete results before returning a response. However, complete results can take longer for -searches across large data sets, <> or <> -data, or <>. +searches across large data sets or <> data. To avoid long waits, run an async EQL search. Set `wait_for_completion_timeout` to a duration you'd like to wait for synchronous results. @@ -733,7 +732,7 @@ search is still ongoing, {es} cancels the search request. [source,console] ---- -DELETE /_eql/search/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=?keep_alive=5d +DELETE /_eql/search/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE= ---- // TEST[skip: no access to search ID] @@ -793,6 +792,99 @@ results by using <>. You can also manually delete saved synchronous searches using the <>. +[discrete] +[[run-eql-search-across-clusters]] +=== Run an EQL search across clusters + +experimental::[] + +The EQL search API supports <>. However, the local and <> +must use the same {es} version. + +The following <> request +adds two remote clusters: `cluster_one` and `cluster_two`. + +[source,console] +---- +PUT /_cluster/settings +{ + "persistent": { + "cluster": { + "remote": { + "cluster_one": { + "seeds": [ + "127.0.0.1:9300" + ] + }, + "cluster_two": { + "seeds": [ + "127.0.0.1:9301" + ] + } + } + } + } +} +---- +// TEST[setup:host] +// TEST[s/127.0.0.1:930\d+/\${transport_host}/] + +To target a data stream or index on a remote cluster, use the +`:` syntax. + +[source,console] +---- +GET /cluster_one:my-data-stream,cluster_two:my-data-stream/_eql/search +{ + "query": """ + process where process.name == "regsvr32.exe" + """ +} +---- +// TEST[continued] +// TEST[setup:sec_logs] +// TEST[teardown:data_stream_cleanup] + +[discrete] +[[eql-circuit-breaker]] +=== EQL circuit breaker settings + +When a <> query is executed, the node handling the query +needs to keep some structures in memory, which are needed by the algorithm +implementing the sequence matching. When large amounts of data need to be processed, +and/or a large amount of matched sequences is requested by the user (by setting the +<> query param), the memory occupied by those +structures could potentially exceed the available memory of the JVM. This would cause +an `OutOfMemory` exception which would bring down the node. + +To prevent this from happening, a special <> is used, +which limits the memory allocation during the execution of a <> +query. When the breaker is triggered, an `org.elasticsearch.common.breaker.CircuitBreakingException` +is thrown and a descriptive error message is returned to the user. + +This <> can be configured using the following settings: + +`breaker.eql_sequence.limit`:: +(<>) The limit for circuit breaker used to restrict +the memory utilisation during the execution of an EQL sequence query. This value is +defined as a percentage of the JVM heap. Defaults to `50%`. If the +<> is set to a value less than `50%`, +this setting uses that value as its default instead. + +`breaker.eql_sequence.overhead`:: +(<>) A constant that sequence query memory +estimates are multiplied by to determine a final estimate. Defaults to `1`. + +`breaker.eql_sequence.type`:: +(<>) Circuit breaker type. Valid values are: + +`memory` (Default)::: +The breaker limits memory usage for EQL sequence queries. + +`noop`::: +Disables the breaker. + include::syntax.asciidoc[] include::functions.asciidoc[] include::pipes.asciidoc[] diff --git a/docs/reference/eql/syntax.asciidoc b/docs/reference/eql/syntax.asciidoc index 68275b0da30ca..482a90f7bb8b9 100644 --- a/docs/reference/eql/syntax.asciidoc +++ b/docs/reference/eql/syntax.asciidoc @@ -792,7 +792,7 @@ For a list of supported pipes, see <>. [[eql-syntax-limitations]] === Limitations -EQL does not support the following features and syntax. +EQL has the following limitations. [discrete] [[eql-uses-fields-parameter]] @@ -835,13 +835,6 @@ You cannot use EQL to search the values of a <> field or the sub-fields of a `nested` field. However, data streams and indices containing `nested` field mappings are otherwise supported. -[discrete] -[[eql-ccs-support]] -==== {ccs-cap} is not supported - -EQL search APIs do not support <>. - [discrete] [[eql-unsupported-syntax]] ==== Differences from Endgame EQL syntax diff --git a/docs/reference/fleet/index.asciidoc b/docs/reference/fleet/index.asciidoc index 5cc91b4e74ddb..2581c226fd31e 100644 --- a/docs/reference/fleet/index.asciidoc +++ b/docs/reference/fleet/index.asciidoc @@ -2,12 +2,14 @@ [[fleet-apis]] == Fleet APIs -The following APIs are design to support fleet-server's usage of Elasticsearch as -a datastore for internal agent and action data. These APIS are currently intended -for internal use only and should be considered experimental. +TIP: For the {kib} {fleet} APIs, see the +{fleet-guide}/fleet-api-docs.html[Fleet API Documentation]. + +The following APIs support {fleet}'s use of {es} as a data store for internal +agent and action data. These APIs are experimental and for internal use by +{fleet} only. * <> // top-level include::get-global-checkpoints.asciidoc[] - diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc deleted file mode 100644 index 2dd64a6ebeaa0..0000000000000 --- a/docs/reference/glossary.asciidoc +++ /dev/null @@ -1,507 +0,0 @@ -//// -============ -IMPORTANT -Add new terms to the Stack Docs glossary: -https://github.com/elastic/stack-docs/tree/master/docs/en/glossary -============ -//// - -[glossary] -[[glossary]] -= Glossary - -[glossary] -[[glossary-alias]] alias:: -// tag::alias-def[] -An alias is a secondary name for a group of <> or <>. Most {es} APIs accept an alias in place -of a data stream or index name. See {ref}/alias.html[Aliases]. -// end::alias-def[] - -[[glossary-analysis]] analysis:: -// tag::analysis-def[] -Process of converting unstructured <> into a format -optimized for search. See {ref}/analysis.html[Text analysis]. -// end::analysis-def[] - -[[glossary-api-key]] API key:: -// tag::api-key-def[] -Unique identifier for authentication in {es}. When -{ref}/encrypting-communications.html[transport layer security (TLS)] is enabled, -all requests must be authenticated using an API key or a username and password. -See the {ref}/security-api-create-api-key.html[Create API key API]. -// end::api-key-def[] - -[[glossary-auto-follow-pattern]] auto-follow pattern:: -// tag::auto-follow-pattern-def[] -<> that automatically configures new -<> as <> for -<>. See {ref}/ccr-auto-follow.html[Manage auto-follow -patterns]. -// end::auto-follow-pattern-def[] - -[[glossary-cluster]] cluster:: -// tag::cluster-def[] -A group of one or more connected {es} <>. See -{ref}/scalability.html[Clusters, nodes, and shards]. -// end::cluster-def[] - -[[glossary-cold-phase]] cold phase:: -// tag::cold-phase-def[] -Third possible phase in the <>. In the -cold phase, data is no longer updated and seldom <>. The -data still needs to be searchable, but it’s okay if those queries are slower. -See {ref}/ilm-index-lifecycle.html[Index lifecycle]. -// end::cold-phase-def[] - -[[glossary-cold-tier]] cold tier:: -// tag::cold-tier-def[] -<> that contains <> that hold -time series data that is accessed occasionally and not normally updated. See -{ref}/data-tiers.html[Data tiers]. -// end::cold-tier-def[] - -[[glossary-component-template]] component template:: -// tag::component-template-def[] -Building block for creating <>. A -component template can specify <>, -{ref}/index-modules.html[index settings], and <>. See -{ref}/index-templates.html[index templates]. -// end::component-template-def[] - -[[glossary-content-tier]] content tier:: -// tag::content-tier-def[] -<> that contains <> that -handle the <> and <> load for -content, such as a product catalog. See {ref}/data-tiers.html[Data tiers]. -// end::content-tier-def[] - -[[glossary-ccr]] {ccr} ({ccr-init}):: -// tag::ccr-def[] -Replicates <> and <> -from <> in a -<>. See {ref}/xpack-ccr.html[{ccr-cap}]. -// end::ccr-def[] - -[[glossary-ccs]] {ccs} (CCS):: -// tag::ccs-def[] -Searches <> and <> on -<> from a -<>. See -{ref}/modules-cross-cluster-search.html[Search across clusters]. -// end::ccs-def[] - -[[glossary-data-stream]] data stream:: -// tag::data-stream-def[] -Named resource used to manage time series data. A data stream stores data across -multiple backing <>. See {ref}/data-streams.html[Data -streams]. -// end::data-stream-def[] - -[[glossary-data-tier]] data tier:: -// tag::data-tier-def[] -Collection of <> with the same {ref}/modules-node.html[data -role] that typically share the same hardware profile. Data tiers include the -<>, <>, -<>, <>, and -<>. See {ref}/data-tiers.html[Data tiers]. -// end::data-tier-def[] - -[[glossary-delete-phase]] delete phase:: -// tag::delete-phase-def[] -Last possible phase in the <>. In the -delete phase, an <> is no longer needed and can safely be -deleted. See {ref}/ilm-index-lifecycle.html[Index lifecycle]. -// end::delete-phase-def[] - -[[glossary-document]] document:: -// tag::document-def[] -JSON object containing data stored in {es}. See -{ref}/documents-indices.html[Documents and indices]. -// end::document-def[] - -[[glossary-eql]] -Event Query Language (EQL):: -// tag::eql-def[] -<> language for event-based time series data, such as -logs, metrics, and traces. EQL supports matching for event sequences. See -{ref}/eql.html[EQL]. -// end::eql-def[] - -[[glossary-field]] field:: -// tag::field-def[] -Key-value pair in a <>. See -{ref}/mapping.html[Mapping]. -// end::field-def[] - -[[glossary-filter]] filter:: -// tag::filter-def[] -<> that does not score matching documents. See -{ref}/query-filter-context.html[filter context]. -// end::filter-def[] - -[[glossary-flush]] flush:: -// tag::flush-def[] -Writes data from the {ref}/index-modules-translog.html[transaction log] to disk -for permanent storage. See the {ref}/indices-flush.html[flush API]. -// end::flush-def[] - -[[glossary-follower-index]] follower index:: -// tag::follower-index-def[] -Target <> for <>. A follower index -exists in a <> and replicates a -<>. See {ref}/xpack-ccr.html[{ccr-cap}]. -// end::follower-index-def[] - -[[glossary-force-merge]] force merge:: -// tag::force-merge-def[] -// tag::force-merge-def-short[] -Manually triggers a <> to reduce the number of -<> in an index's <>. -// end::force-merge-def-short[] -See the {ref}/indices-forcemerge.html[force merge API]. -// end::force-merge-def[] - -[[glossary-frozen-phase]] frozen phase:: -// tag::frozen-phase-def[] -Fourth possible phase in the <>. In -the frozen phase, an <> is no longer updated and -<> rarely. The information still needs to be searchable, -but it’s okay if those queries are extremely slow. See -{ref}/ilm-index-lifecycle.html[Index lifecycle]. -// end::frozen-phase-def[] - -[[glossary-frozen-tier]] frozen tier:: -// tag::frozen-tier-def[] -<> that contains <> that -hold time series data that is accessed rarely and not normally updated. See -{ref}/data-tiers.html[Data tiers]. -// end::frozen-tier-def[] - -[[glossary-hidden-index]] hidden data stream or index:: -// tag::hidden-index-def[] -<> or <> excluded from -most <> by default. See -{ref}/multi-index.html#hidden[Hidden data streams and indices]. -// end::hidden-index-def[] - -[[glossary-hot-phase]] hot phase:: -// tag::hot-phase-def[] -First possible phase in the <>. In -the hot phase, an <> is actively updated and queried. See -{ref}/ilm-index-lifecycle.html[Index lifecycle]. -// end::hot-phase-def[] - -[[glossary-hot-tier]] hot tier:: -// tag::hot-tier-def[] -<> that contains <> that -handle the <> load for time series data, such as logs or -metrics. This tier holds your most recent, most frequently accessed data. See -{ref}/data-tiers.html[Data tiers]. -// end::hot-tier-def[] - -[[glossary-id]] ID:: -// tag::id-def[] -Identifier for a <>. Document IDs must be unique -within an <>. See the {ref}/mapping-id-field.html[`_id` -field]. -// end::id-def[] - -[[glossary-index]] index:: -// tag::index-def[] -. Collection of JSON <>. See -{ref}/documents-indices.html[Documents and indices]. - -. To add one or more JSON documents to {es}. This process is called indexing. -// end::index-def[] - -[[glossary-index-lifecycle]] index lifecycle:: -// tag::index-lifecycle-def[] -Five phases an <> can transition through: -<>, <>, -<>, <>, -and <>. See {ref}/ilm-policy-definition.html[Index -lifecycle]. -// end::index-lifecycle-def[] - -[[glossary-index-lifecycle-policy]] index lifecycle policy:: -// tag::index-lifecycle-policy-def[] -Specifies how an <> moves between phases in the -<> and what actions to perform during -each phase. See {ref}/ilm-policy-definition.html[Index lifecycle]. -// end::index-lifecycle-policy-def[] - -[[glossary-index-pattern]] index pattern:: -// tag::index-pattern-def[] -String containing a wildcard (`*`) pattern that can match multiple -<>, <>, or -<>. See {ref}/multi-index.html[Multi-target syntax]. -// end::index-pattern-def[] - -[[glossary-index-template]] index template:: -// tag::index-template-def[] -Automatically configures the <>, -{ref}/index-modules.html[index settings], and <> -of new <> that match its <>. You can also use index templates to create -<>. See {ref}/index-templates.html[Index -templates]. -// end::index-template-def[] - -[[glossary-leader-index]] leader index:: -// tag::leader-index-def[] -Source <> for <>. A leader index -exists on a <> and is replicated to -<>. See -{ref}/xpack-ccr.html[{ccr-cap}]. -// end::leader-index-def[] - -[[glossary-local-cluster]] local cluster:: -// tag::local-cluster-def[] -<> that pulls data from a -<> in <> or -<>. See {ref}/modules-remote-clusters.html[Remote clusters]. -// end::local-cluster-def[] - -[[glossary-mapping]] mapping:: -// tag::mapping-def[] -Defines how a <>, its <>, and -its metadata are stored in {es}. Similar to a schema definition. See -{ref}/mapping.html[Mapping]. -// end::mapping-def[] - -[[glossary-merge]] merge:: -// tag::merge-def[] -Process of combining a <>'s smaller Lucene -<> into a larger one. {es} manages merges -automatically. -// end::merge-def[] - -[[glossary-multi-field]] multi-field:: -// tag::multi-field-def[] -A <> that's <> in multiple ways. -See the {ref}/multi-fields.html[`fields` mapping parameter]. -// end::multi-field-def[] - -[[glossary-node]] node:: -// tag::node-def[] -A single {es} server. One or more nodes can form a <>. -See {ref}/scalability.html[Clusters, nodes, and shards]. -// end::node-def[] - -[[glossary-primary-shard]] primary shard:: -// tag::primary-shard-def[] -Lucene instance containing some or all data for an <>. -When you index a <>, {es} adds the document to -primary shards before <>. See -{ref}/scalability.html[Clusters, nodes, and shards]. -// end::primary-shard-def[] - -[[glossary-query]] query:: -// tag::query-def[] -Request for information about your data. You can think of a query as a -question, written in a way {es} understands. See -{ref}/search-your-data.html[Search your data]. -// end::query-def[] - -[[glossary-recovery]] recovery:: -// tag::recovery-def[] -Process of syncing a <> from a -<>. Upon completion, the replica shard is -available for searches. See the {ref}/indices-recovery.html[index recovery API]. -// end::recovery-def[] - -[[glossary-reindex]] reindex:: -// tag::reindex-def[] -Copies documents from a source to a destination. The source and destination can -be a <>, <>, or -<>. See the {ref}/docs-reindex.html[Reindex API]. -// end::reindex-def[] - -[[glossary-remote-cluster]] remote cluster:: -// tag::remote-cluster-def[] -A separate <>, often in a different data center or -locale, that contains <> that can be replicated or -searched by the <>. The connection to a -remote cluster is unidirectional. See {ref}/modules-remote-clusters.html[Remote -clusters]. -// end::remote-cluster-def[] - -[[glossary-replica-shard]] replica shard:: -// tag::replica-shard-def[] -Copy of a <>. Replica shards can improve -search performance and resiliency by distributing data across multiple -<>. See {ref}/scalability.html[Clusters, nodes, and -shards]. -// end::replica-shard-def[] - -[[glossary-rollover]] rollover:: -// tag::rollover-def[] -// tag::rollover-def-short[] -Creates a new write index when the current one reaches a certain size, number of -docs, or age. -// end::rollover-def-short[] -A rollover can target a <> or an -<> with a write index. -// end::rollover-def[] - -[[glossary-rollup]] rollup:: -// tag::rollup-def[] -Summarizes high-granularity data into a more compressed format to maintain access -to historical data in a cost-effective way. See -{ref}/xpack-rollup.html[Roll up your data]. -// end::rollup-def[] - -[[glossary-rollup-index]] rollup index:: -// tag::rollup-index-def[] -Special type of <> for storing historical data at reduced -granularity. Documents are summarized and indexed into a rollup index by a -<>. See {ref}/xpack-rollup.html[Rolling up -historical data]. -// end::rollup-index-def[] - -[[glossary-rollup-job]] rollup job:: -// tag::rollup-job-def[] -Background task that runs continuously to summarize documents in an -<> and index the summaries into a separate rollup index. -The job configuration controls what data is rolled up and how often. See -{ref}/xpack-rollup.html[Rolling up historical data]. -// end::rollup-job-def[] - -[[glossary-routing]] routing:: -// tag::routing-def[] -Process of sending and retrieving data from a specific -<>. {es} uses a hashed routing value to -choose this shard. You can provide a routing value in -<> and search requests to take advantage of caching. -See the {ref}/mapping-routing-field.html[`_routing` field]. -// end::routing-def[] - -[[glossary-runtime-fields]] runtime field:: -// tag::runtime-fields-def[] -<> that is evaluated at query time. You access runtime -fields from the search API like any other field, and {es} sees runtime fields no -differently. See {ref}/runtime.html[Runtime fields]. -// end::runtime-fields-def[] - -[[glossary-searchable-snapshot]] searchable snapshot:: -// tag::searchable-snapshot-def[] -<> of an <> mounted as a -<>. You can search -this index like a regular index. See {ref}/searchable-snapshots.html[searchable -snapshots]. -// end::searchable-snapshot-def[] - -[[glossary-searchable-snapshot-index]] searchable snapshot index:: -// tag::searchable-snapshot-index-def[] -<> whose data is stored in a -<>. Searchable snapshot indices do not need -<> for resilience, since their data is -reliably stored outside the cluster. See -{ref}/searchable-snapshots.html[searchable snapshots]. -// end::searchable-snapshot-index-def[] - -[[glossary-segment]] segment:: -// tag::segment-def[] -Data file in a <>'s Lucene instance. {es} manages Lucene -segments automatically. -// end::segment-def[] - -[[glossary-shard]] shard:: -// tag::shard-def[] -Lucene instance containing some or all data for an <>. -{es} automatically creates and manages these Lucene instances. There are two -types of shards: <> and -<>. See {ref}/scalability.html[Clusters, nodes, -and shards]. -// end::shard-def[] - -[[glossary-shrink]] shrink:: -// tag::shrink-def[] -// tag::shrink-def-short[] -Reduces the number of <> in an index. -// end::shrink-def-short[] -See the {ref}/indices-shrink-index.html[shrink index API]. -// end::shrink-def[] - -[[glossary-snapshot]] snapshot:: -// tag::snapshot-def[] -Backup taken of a running <>. You can take snapshots -of the entire cluster or only specific <> and -<>. See {ref}/snapshot-restore.html[Snapshot and -restore]. -// end::snapshot-def[] - -[[glossary-snapshot-lifecycle-policy]] snapshot lifecycle policy:: -// tag::snapshot-lifecycle-policy-def[] -Specifies how frequently to perform automatic backups of a cluster and how long -to retain the resulting <>. See -{ref}/snapshot-lifecycle-management.html[Manage the snapshot lifecycle] -// end::snapshot-lifecycle-policy-def[] - -[[glossary-snapshot-repository]] snapshot repository:: -// tag::snapshot-repository-def[] -Location where <> are stored. A snapshot repository -can be a shared filesystem or a remote repository, such as Azure or Google Cloud -Storage. See {ref}/snapshot-restore.html[Snapshot and restore]. -// end::snapshot-repository-def[] - -[[glossary-source_field]] source field:: -// tag::source-field-def[] -Original JSON object provided during <>. See the -{ref}/mapping-source-field.html[`_source` field]. -// end::source-field-def[] - -[[glossary-split]] split:: -// tag::split-def[] -Adds more <> to an -<>. See the {ref}/indices-split-index.html[split index -API]. -// end::split-def[] - -[[glossary-system-index]] system index:: -// tag::system-index-def[] -<> containing configurations and other data used -internally by the {stack}. System index names start with a dot (`.`), such as -`.security`. Do not directly access or change system indices. -// end::system-index-def[] - -[[glossary-term]] term:: -// tag::term-def[] -See {ref}/glossary.html#glossary-token[token]. -// end::term-def[] - -[[glossary-text]] text:: -// tag::text-def[] -Unstructured content, such as a product description or log message. You -typically <> text for better search. See -{ref}/analysis.html[Text analysis]. -// end::text-def[] - -[[glossary-token]] token:: -// tag::token-def[] -A chunk of unstructured <> that's been optimized for search. -In most cases, tokens are individual words. Tokens are also called terms. See -{ref}/analysis.html[Text analysis]. -// end::token-def[] - -[[glossary-tokenization]] tokenization:: -// tag::tokenization-def[] -Process of breaking unstructured text down into smaller, searchable chunks -called <>. See -{ref}/analysis-overview.html#tokenization[Tokenization]. -// end::tokenization-def[] - -[[glossary-warm-phase]] warm phase:: -// tag::warm-phase-def[] -Second possible phase in the <>. In -the warm phase, an <> is generally optimized for search -and no longer updated. See {ref}/ilm-policy-definition.html[Index lifecycle]. -// end::warm-phase-def[] - -[[glossary-warm-tier]] warm tier:: -// tag::warm-tier-def[] -<> that contains <> that hold -time series data that is accessed less frequently and rarely needs to be -updated. See {ref}/data-tiers.html[Data tiers]. -// end::warm-tier-def[] diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index efabcfc46a688..2552ac8bad7fc 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -4,14 +4,14 @@ [discrete] === Disable the features you do not need -By default Elasticsearch indexes and adds doc values to most fields so that they -can be searched and aggregated out of the box. For instance if you have a numeric +By default, {es} indexes and adds doc values to most fields so that they +can be searched and aggregated out of the box. For instance, if you have a numeric field called `foo` that you need to run histograms on but that you never need to filter on, you can safely disable indexing on this field in your <>: [source,console] --------------------------------------------------- +---- PUT index { "mappings": { @@ -23,68 +23,13 @@ PUT index } } } --------------------------------------------------- - -<> fields store normalization factors in the index in order to be -able to score documents. If you only need matching capabilities on a `text` -field but do not care about the produced scores, you can configure Elasticsearch -to not write norms to the index: - -[source,console] --------------------------------------------------- -PUT index -{ - "mappings": { - "properties": { - "foo": { - "type": "text", - "norms": false - } - } - } -} --------------------------------------------------- - -<> fields also store frequencies and positions in the index by -default. Frequencies are used to compute scores and positions are used to run -phrase queries. If you do not need to run phrase queries, you can tell -Elasticsearch to not index positions: +---- -[source,console] --------------------------------------------------- -PUT index -{ - "mappings": { - "properties": { - "foo": { - "type": "text", - "index_options": "freqs" - } - } - } -} --------------------------------------------------- - -Furthermore if you do not care about scoring either, you can configure -Elasticsearch to just index matching documents for every term. You will -still be able to search on this field, but phrase queries will raise errors -and scoring will assume that terms appear only once in every document. - -[source,console] --------------------------------------------------- -PUT index -{ - "mappings": { - "properties": { - "foo": { - "type": "text", - "norms": false, - "index_options": "freqs" - } - } - } -} --------------------------------------------------- +<> fields store normalization factors in the index to facilitate +document scoring. If you only need matching capabilities on a `text` +field but do not care about the produced scores, you can use the +<> type instead. This field type +saves significant space by dropping scoring and positional information. [discrete] [[default-dynamic-string-mapping]] diff --git a/docs/reference/how-to/fix-common-cluster-issues.asciidoc b/docs/reference/how-to/fix-common-cluster-issues.asciidoc index 3eb67477c1e55..f5e58d1adc234 100644 --- a/docs/reference/how-to/fix-common-cluster-issues.asciidoc +++ b/docs/reference/how-to/fix-common-cluster-issues.asciidoc @@ -100,6 +100,108 @@ POST _cache/clear?fielddata=true ---- // TEST[s/^/PUT my-index\n/] +[discrete] +[[high-cpu-usage]] +=== High CPU usage + +{es} uses <> to manage CPU resources for +concurrent operations. High CPU usage typically means one or more thread pools +are running low. + +If a thread pool is depleted, {es} will <> +related to the thread pool. For example, if the `search` thread pool is +depleted, {es} will reject search requests until more threads are available. + +[discrete] +[[diagnose-high-cpu-usage]] +==== Diagnose high CPU usage + +**Check CPU usage** + +include::{es-repo-dir}/tab-widgets/cpu-usage-widget.asciidoc[] + +**Check hot threads** + +If a node has high CPU usage, use the <> to check for resource-intensive threads running on the node. + +[source,console] +---- +GET _nodes/my-node,my-other-node/hot_threads +---- +// TEST[s/\/my-node,my-other-node//] + +This API returns a breakdown of any hot threads in plain text. + +[discrete] +[[reduce-cpu-usage]] +==== Reduce CPU usage + +The following tips outline the most common causes of high CPU usage and their +solutions. + +**Scale your cluster** + +Heavy indexing and search loads can deplete smaller thread pools. To better +handle heavy workloads, add more nodes to your cluster or upgrade your existing +nodes to increase capacity. + +**Spread out bulk requests** + +While more efficient than individual requests, large <> +or <> requests still require CPU resources. If +possible, submit smaller requests and allow more time between them. + +**Cancel long-running searches** + +Long-running searches can block threads in the `search` thread pool. To check +for these searches, use the <>. + +[source,console] +---- +GET _tasks?actions=*search&detailed +---- + +The response's `description` contains the search request and its queries. +`running_time_in_nanos` shows how long the search has been running. + +[source,console-result] +---- +{ + "nodes" : { + "oTUltX4IQMOUUVeiohTt8A" : { + "name" : "my-node", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "tasks" : { + "oTUltX4IQMOUUVeiohTt8A:464" : { + "node" : "oTUltX4IQMOUUVeiohTt8A", + "id" : 464, + "type" : "transport", + "action" : "indices:data/read/search", + "description" : "indices[my-index], search_type[QUERY_THEN_FETCH], source[{\"query\":...}]", + "start_time_in_millis" : 4081771730000, + "running_time_in_nanos" : 13991383, + "cancellable" : true + } + } + } + } +} +---- +// TESTRESPONSE[skip: no way to get tasks] + +To cancel a search and free up resources, use the API's `_cancel` endpoint. + +[source,console] +---- +POST _tasks/oTUltX4IQMOUUVeiohTt8A:464/_cancel +---- + +For additional tips on how to track and avoid resource-intensive searches, see +<>. + [discrete] [[high-jvm-memory-pressure]] === High JVM memory pressure @@ -141,6 +243,7 @@ Every shard uses memory. In most cases, a small set of large shards uses fewer resources than many small shards. For tips on reducing your shard count, see <>. +[[avoid-expensive-searches]] **Avoid expensive searches** Expensive searches can use large amounts of memory. To better track expensive @@ -439,3 +542,47 @@ POST _cluster/reroute If you backed up the missing index data to a snapshot, use the <> to restore the individual index. Alternatively, you can index the missing data from the original data source. + +[discrete] +[[rejected-requests]] +=== Rejected requests + +When {es} rejects a request, it stops the operation and returns an error with a +`429` response code. Rejected requests are commonly caused by: + +* A <>. A depleted `search` or `write` +thread pool returns a `TOO_MANY_REQUESTS` error message. + +* A <>. + +* High <> that exceeds the +<>. + +[discrete] +[[check-rejected-tasks]] +==== Check rejected tasks + +To check the number of rejected tasks for each thread pool, use the +<>. A high ratio of `rejected` to +`completed` tasks, particularly in the `search` and `write` thread pools, means +{es} regularly rejects requests. + +[source,console] +---- +GET /_cat/thread_pool?v=true&h=id,name,active,rejected,completed +---- + +[discrete] +[[prevent-rejected-requests]] +==== Prevent rejected requests + +**Fix high CPU and memory usage** + +If {es} regularly rejects requests and other tasks, your cluster likely has high +CPU usage or high JVM memory pressure. For tips, see <> and +<>. + +**Prevent circuit breaker errors** + +If you regularly trigger circuit breaker errors, see <> +for tips on diagnosing and preventing them. diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 154c8361cb89b..0ccd0ad9ef7b2 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -356,7 +356,7 @@ There are multiple caches that can help with search performance, such as the {wikipedia}/Page_cache[filesystem cache], the <> or the <>. Yet all these caches are maintained at the node level, meaning that if you run the -same request twice in a row, have 1 <> or more +same request twice in a row, have 1 replica or more and use {wikipedia}/Round-robin_DNS[round-robin], the default routing algorithm, then those two requests will go to different shard copies, preventing node-level caches from helping. diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index a46720b0eeee7..9484e9004faf8 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -97,9 +97,6 @@ deleted indices directly from the file system and free up resources. time-based backing indices. You can use <> to automatically manage these backing indices. -[role="screenshot"] -image:images/ilm/index-lifecycle-policies.png[] - One advantage of this setup is <>, which creates a new write index when the current one meets a defined `max_primary_shard_size`, @@ -131,8 +128,10 @@ Large shards may make a cluster less likely to recover from failure. When a node fails, {es} rebalances the node's shards across the data tier's remaining nodes. Large shards can be harder to move across a network and may tax node resources. -While not a hard limit, shards between 10GB and 50GB tend to work well. You may -be able to use larger shards depending on your network and use case. +While not a hard limit, shards between 10GB and 50GB tend to work well for logs +and time series data. You may be able to use larger shards depending on +your network and use case. Smaller shards may be appropriate for +{enterprise-search-ref}/index.html[Enterprise Search] and similar use cases. If you use {ilm-init}, set the <>'s `max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB. @@ -168,6 +167,10 @@ have at most 600 shards. The further below this limit you can keep your nodes, the better. If you find your nodes exceeding more than 20 shards per GB, consider adding another node. +Some system indices for {enterprise-search-ref}/index.html[Enterprise Search] +are nearly empty and rarely used. Due to their low overhead, you shouldn't count +shards for these indices toward a node's shard limit. + To check the current size of each node's heap, use the <>. diff --git a/docs/reference/how-to/use-elasticsearch-for-time-series-data.asciidoc b/docs/reference/how-to/use-elasticsearch-for-time-series-data.asciidoc index 8ad092bf9e007..9ba484958f2bc 100644 --- a/docs/reference/how-to/use-elasticsearch-for-time-series-data.asciidoc +++ b/docs/reference/how-to/use-elasticsearch-for-time-series-data.asciidoc @@ -13,10 +13,16 @@ using {kib} and other {stack} features. tiers>> to automatically move older data to nodes with less expensive hardware as it ages. This helps improve performance and reduce storage costs. -The hot tier is required. The warm, cold, and frozen tiers are optional. Use -high-performance nodes in the hot and warm tiers for faster indexing and faster -searches on your most recent data. Use slower, less expensive nodes in the cold -and frozen tiers to reduce costs. +The hot and content tiers are required. The warm, cold, and frozen tiers are +optional. + +Use high-performance nodes in the hot and warm tiers for faster +indexing and faster searches on your most recent data. Use slower, less +expensive nodes in the cold and frozen tiers to reduce costs. + +The content tier is not typically used for time series data. However, it's +required to create system indices and other indices that aren't part of a data +stream. The steps for setting up data tiers vary based on your deployment type: diff --git a/docs/reference/ilm/actions/ilm-allocate.asciidoc b/docs/reference/ilm/actions/ilm-allocate.asciidoc index fbb9a7e075786..f89cfa064605c 100644 --- a/docs/reference/ilm/actions/ilm-allocate.asciidoc +++ b/docs/reference/ilm/actions/ilm-allocate.asciidoc @@ -32,6 +32,11 @@ see <>. (Optional, integer) Number of replicas to assign to the index. +`total_shards_per_node`:: +(Optional, integer) +The maximum number of shards for the index on a single {es} node. A value of `-1` is +interpreted as unlimited. See <>. + `include`:: (Optional, object) Assigns an index to nodes that have at least _one_ of the specified custom attributes. @@ -48,7 +53,8 @@ Assigns an index to nodes that have _all_ of the specified custom attributes. ==== Example The allocate action in the following policy changes the index's number of replicas to `2`. -The index allocation rules are not changed. +No more than 200 shards for the index will be placed on any single node. Otherwise the index +allocation rules are not changed. [source,console] -------------------------------------------------- @@ -59,7 +65,8 @@ PUT _ilm/policy/my_policy "warm": { "actions": { "allocate" : { - "number_of_replicas" : 2 + "number_of_replicas" : 2, + "total_shards_per_node" : 200 } } } diff --git a/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc b/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc index 9dd8bb53e95aa..7e08947e7f624 100644 --- a/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc +++ b/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc @@ -5,17 +5,21 @@ Phases allowed: hot, cold, frozen. Takes a snapshot of the managed index in the configured repository and mounts it -as a <>. - -In the frozen phase, the action mounts a <>. In other phases, the action mounts a <>. If the original index is part of a +as a <>. If the index is part of a <>, the mounted index replaces the original index in -the data stream. +the stream. + +The `searchable_snapshot` action requires <>. The action +uses the +<> +setting to mount the index directly to the phase's corresponding data tier. In +the frozen phase, the action mounts a <> to the frozen tier. In other phases, the action mounts a +<> to the corresponding data tier. IMPORTANT: If the `searchable_snapshot` action is used in the hot phase the -subsequent phases cannot define any of the `shrink`, `forcemerge`, `freeze` or -`searchable_snapshot` (also available in the cold and frozen phases) actions. +subsequent phases cannot include the `shrink`, `forcemerge`, or `freeze` +actions. [NOTE] This action cannot be performed on a data stream's write index. Attempts to do diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 005dad56fc594..34dc8e91df22d 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -107,6 +107,11 @@ If the request succeeds, the body of the response contains the policy definition } } } + }, + "in_use_by" : { <3> + "indices" : [], + "data_streams" : [], + "composable_templates" : [] } } } @@ -115,3 +120,4 @@ If the request succeeds, the body of the response contains the policy definition <1> The policy version is incremented whenever the policy is updated <2> When this policy was last modified +<3> Which indices, data streams, or templates currently use this policy diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 97a00e2c3fa73..149ba2a6b4491 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -1,7 +1,7 @@ [[index-lifecycle-management-api]] == {ilm-cap} APIs -You use the following APIs to set up policies to automatically manage the index lifecycle. +You use the following APIs to set up policies to automatically manage the index lifecycle. For more information about {ilm} ({ilm-init}), see <>. [discrete] @@ -28,6 +28,7 @@ For more information about {ilm} ({ilm-init}), see < * <> * <> * <> +* <> include::put-lifecycle.asciidoc[] @@ -42,3 +43,4 @@ include::get-status.asciidoc[] include::explain.asciidoc[] include::start.asciidoc[] include::stop.asciidoc[] +include::migrate-to-data-tiers.asciidoc[] diff --git a/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc new file mode 100644 index 0000000000000..b7caf586586d0 --- /dev/null +++ b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc @@ -0,0 +1,141 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-migrate-to-data-tiers]] +=== Migrate to data tiers routing API +++++ +Migrate indices and ILM policies to data tiers routing +++++ + +Switches the indices and ILM policies from using custom node attributes and +<> to using <>, and +optionally deletes one legacy index template. +Using node roles enables {ilm-init} to <> between +data tiers. + +Migrating away from custom node attributes routing can be manually performed +as indicated in the <> page. + +This API provides an automated way of executing three out of the four manual steps listed +in the <>: + +. <> +. <> +. <> with the corresponding <> + +[[ilm-migrate-to-data-tiers-request]] +==== {api-request-title} + +`POST /_ilm/migrate_to_data_tiers` + +The API accepts an optional body that allows you to specify: + +- The legacy index template name to delete. Defaults to none. +- The name of the custom node attribute used for the indices and ILM policies allocation filtering. +Defaults to `data`. + +[[ilm-migrate-to-data-tiers-prereqs]] +==== {api-prereq-title} + +* {ilm-init} must be stopped before performing the migration. Use the <> +to stop {ilm-init} and <> to wait until the +reported operation mode is `STOPPED`. + +[[ilm-migrate-to-data-tiers-query-params]] +==== {api-query-parms-title} + +`dry_run`:: +(Optional, Boolean) +If `true`, simulates the migration from node attributes based allocation filters to data tiers, but does +not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be +migrated. +Defaults to `false`. + +[[ilm-migrate-to-data-tiers-example]] +==== {api-examples-title} + +The following example migrates the indices and ILM policies away from defining +custom allocation filtering using the `custom_attribute_name` node attribute, and +deletes legacy template with name `global-template` if it exists in the system. + +//// +[source,console] +---- +POST _ilm/stop + +PUT _template/global-template +{ + "index_patterns": ["migrate-to-tiers-*"], + "settings": { + "index.routing.allocation.require.custom_attribute_name": "hot" + } +} + +PUT warm-index-to-migrate-000001 +{ + "settings": { + "index.routing.allocation.require.custom_attribute_name": "warm" + } +} + +PUT _ilm/policy/policy_with_allocate_action +{ + "policy": { + "phases": { + "warm": { + "actions": { + "allocate": { + "require": { + "custom_attribute_name": "warm" + } + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +---- +// TESTSETUP + +[source,console] +---- +DELETE warm-index-to-migrate-000001 + +DELETE _ilm/policy/policy_with_allocate_action + +POST _ilm/start +---- +// TEARDOWN +//// + +[source,console] +---------------------------------------------------------------- +POST /_ilm/migrate_to_data_tiers +{ + "legacy_template_to_delete": "global-template", + "node_attribute": "custom_attribute_name" +} +---------------------------------------------------------------- + +If the request succeeds, a response like the following will be received: + +[source,console-result] +------------------------------------------------------------------------------ +{ + "dry_run": false, + "removed_legacy_template":"global-template", <1> + "migrated_ilm_policies":["policy_with_allocate_action"], <2> + "migrated_indices":["warm-index-to-migrate-000001"] <3> +} +------------------------------------------------------------------------------ + +<1> Shows the name of the legacy index template that was deleted. This will be missing +if no legacy index template was deleted. +<2> The ILM policies that were updated. +<3> The indices that were migrated to <> routing. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 575a405b69763..ac116bc63ad43 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -36,6 +36,12 @@ The request will fail if the current step does not match the step currently being executed for the index. This is to prevent the index from being moved from an unexpected step into the next step. +When specifying the target (`next_step`) to which the index will be moved, either the `name` or both +the `action` and `name` fields are optional. If only the phase is specified, the index will move to +the first step of the first action in the target phase. If the phase and action are specified, the index will move to +the first step of the specified action in the specified phase. Only actions specified in the ILM +policy are considered valid, an index cannot move to a step that is not part of its policy. + [[ilm-move-to-step-path-params]] ==== {api-path-parms-title} @@ -152,14 +158,16 @@ POST _ilm/move/my-index-000001 }, "next_step": { <2> "phase": "warm", - "action": "forcemerge", - "name": "forcemerge" + "action": "forcemerge", <3> + "name": "forcemerge" <4> } } -------------------------------------------------- // TEST[continued] <1> The step that the index is expected to be in <2> The step that you want to execute +<3> The optional action to which the index will be moved +<4> The optional step name to which the index will be moved If the request succeeds, you receive the following result: diff --git a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc index 230748afdf4f7..472b04288384a 100644 --- a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc @@ -29,7 +29,7 @@ the data. This data has the following retention requirements: * After rollover, keep indices in the hot data tier for 30 days. * 30 days after rollover: ** Move indices to the warm data tier. -** Set <> to 1. +** Set replica shards to 1. ** <> multiple index segments to free up the space used by deleted documents. * Delete indices 90 days after rollover. diff --git a/docs/reference/ilm/ilm-overview.asciidoc b/docs/reference/ilm/ilm-overview.asciidoc index ced0e05e4868b..bc00a43bcabff 100644 --- a/docs/reference/ilm/ilm-overview.asciidoc +++ b/docs/reference/ilm/ilm-overview.asciidoc @@ -12,14 +12,14 @@ according to your performance, resiliency, and retention requirements. Index lifecycle policies can trigger actions such as: -* **Rollover**: -include::../glossary.asciidoc[tag=rollover-def-short] -* **Shrink**: -include::../glossary.asciidoc[tag=shrink-def-short] -* **Force merge**: -include::../glossary.asciidoc[tag=force-merge-def-short] +* **Rollover**: Creates a new write index when the current one reaches a certain +size, number of docs, or age. +* **Shrink**: Reduces the number of primary shards in an index. +* **Force merge**: Triggers a <> to reduce the +number of segments in an index's shards. * **Freeze**: <> an index and makes it read-only. -* **Delete**: Permanently remove an index, including all of its data and metadata. +* **Delete**: Permanently remove an index, including all of its data and +metadata. {ilm-init} makes it easier to manage indices in hot-warm-cold architectures, which are common when you're working with time series data such as logs and metrics. diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index c22135fce8c91..92511ab52cd75 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -268,3 +268,70 @@ PUT mylogs-pre-ilm*/_settings <1> // TEST[continued] <1> Updates all indices with names that start with `mylogs-pre-ilm` + +[discrete] +[[switch-lifecycle-policies]] +==== Switch lifecycle policies + +To switch an index's lifecycle policy, follow these steps: + +. Remove the existing policy using the <>. +Target a data stream or alias to remove the policies of all its indices. ++ +[source,console] +---- +POST logs-my_app-default/_ilm/remove +---- +// TEST[continued] +// TEST[s/^/PUT _data_stream\/logs-my_app-default\n/] + +. The remove policy API removes all {ilm-init} metadata from the index and +doesn't consider the index's lifecycle status. This can leave indices in an +undesired state. ++ +-- +For example, the <> action temporarily closes an +index before reopening it. Removing an index's {ilm-init} policy during a +`forcemerge` can leave the index closed indefinitely. + +After policy removal, use the <> to check an +index's state . Target a data stream or alias to get the state of all its +indices. + +[source,console] +---- +GET logs-my_app-default +---- +// TEST[continued] + +You can then change the index as needed. For example, you can re-open any +closed indices using the <>. + +[source,console] +---- +POST logs-my_app-default/_open +---- +// TEST[continued] +-- + +. Assign a new policy using the <>. +Target a data stream or alias to assign a policy to all its indices. ++ +-- +WARNING: Don't assign a new policy without first removing the existing policy. +This can cause <> to silently fail. + +[source,console] +---- +PUT logs-my_app-default/_settings +{ + "index": { + "lifecycle": { + "name": "new-lifecycle-policy" + } + } +} +---- +// TEST[continued] +// TEST[s/new-lifecycle-policy/mylogs_policy_existing/] +-- diff --git a/docs/reference/images/index-mgmt/management-index-templates-mappings.png b/docs/reference/images/index-mgmt/management-index-templates-mappings.png index beb964b348171..5ec34c7797e4d 100644 Binary files a/docs/reference/images/index-mgmt/management-index-templates-mappings.png and b/docs/reference/images/index-mgmt/management-index-templates-mappings.png differ diff --git a/docs/reference/images/index-mgmt/management-index-templates.png b/docs/reference/images/index-mgmt/management-index-templates.png index 07f1fb9a7add1..9188aa85e68cd 100644 Binary files a/docs/reference/images/index-mgmt/management-index-templates.png and b/docs/reference/images/index-mgmt/management-index-templates.png differ diff --git a/docs/reference/images/index-mgmt/management_index_component_template.png b/docs/reference/images/index-mgmt/management_index_component_template.png index c03029fd172f0..766bfb8d71b3b 100644 Binary files a/docs/reference/images/index-mgmt/management_index_component_template.png and b/docs/reference/images/index-mgmt/management_index_component_template.png differ diff --git a/docs/reference/images/index-mgmt/management_index_create_wizard.png b/docs/reference/images/index-mgmt/management_index_create_wizard.png index bff1dd4cd0e7a..0b5147e21257d 100644 Binary files a/docs/reference/images/index-mgmt/management_index_create_wizard.png and b/docs/reference/images/index-mgmt/management_index_create_wizard.png differ diff --git a/docs/reference/images/index-mgmt/management_index_data_stream_backing_index.png b/docs/reference/images/index-mgmt/management_index_data_stream_backing_index.png index a5c577affbbb2..ecb7a3d9a1dac 100644 Binary files a/docs/reference/images/index-mgmt/management_index_data_stream_backing_index.png and b/docs/reference/images/index-mgmt/management_index_data_stream_backing_index.png differ diff --git a/docs/reference/images/index-mgmt/management_index_data_stream_stats.png b/docs/reference/images/index-mgmt/management_index_data_stream_stats.png index a67ab4a7deb32..d22e3a8bd1bb1 100644 Binary files a/docs/reference/images/index-mgmt/management_index_data_stream_stats.png and b/docs/reference/images/index-mgmt/management_index_data_stream_stats.png differ diff --git a/docs/reference/images/index-mgmt/management_index_details.png b/docs/reference/images/index-mgmt/management_index_details.png index b199d13218f5a..13d607b061d4a 100644 Binary files a/docs/reference/images/index-mgmt/management_index_details.png and b/docs/reference/images/index-mgmt/management_index_details.png differ diff --git a/docs/reference/images/index-mgmt/management_index_labels.png b/docs/reference/images/index-mgmt/management_index_labels.png index a89c32e08beff..caec6d5afc96c 100644 Binary files a/docs/reference/images/index-mgmt/management_index_labels.png and b/docs/reference/images/index-mgmt/management_index_labels.png differ diff --git a/docs/reference/images/ingest/custom-logs-pipeline.png b/docs/reference/images/ingest/custom-logs-pipeline.png index e0806d58e28af..215038a457eb7 100644 Binary files a/docs/reference/images/ingest/custom-logs-pipeline.png and b/docs/reference/images/ingest/custom-logs-pipeline.png differ diff --git a/docs/reference/images/ingest/custom-logs.png b/docs/reference/images/ingest/custom-logs.png index aa8f9ca1f650d..d1350ae065575 100644 Binary files a/docs/reference/images/ingest/custom-logs.png and b/docs/reference/images/ingest/custom-logs.png differ diff --git a/docs/reference/images/ingest/ingest-pipeline-list.png b/docs/reference/images/ingest/ingest-pipeline-list.png index ccc52878975e7..1d3281e669224 100644 Binary files a/docs/reference/images/ingest/ingest-pipeline-list.png and b/docs/reference/images/ingest/ingest-pipeline-list.png differ diff --git a/docs/reference/images/ingest/ingest-pipeline-processor.png b/docs/reference/images/ingest/ingest-pipeline-processor.png index 1dbce0f27e98d..5fff064253a8e 100644 Binary files a/docs/reference/images/ingest/ingest-pipeline-processor.png and b/docs/reference/images/ingest/ingest-pipeline-processor.png differ diff --git a/docs/reference/images/ingest/test-a-pipeline.png b/docs/reference/images/ingest/test-a-pipeline.png index a5b83e200ff07..65ee2815ebe1d 100644 Binary files a/docs/reference/images/ingest/test-a-pipeline.png and b/docs/reference/images/ingest/test-a-pipeline.png differ diff --git a/docs/reference/images/kibana-console.png b/docs/reference/images/kibana-console.png index d53a1d50c6208..ac0c39049a1f4 100644 Binary files a/docs/reference/images/kibana-console.png and b/docs/reference/images/kibana-console.png differ diff --git a/docs/reference/images/sql/rest/console-triple-quotes.png b/docs/reference/images/sql/rest/console-triple-quotes.png index 4a13acb986114..444b7823a6ca2 100644 Binary files a/docs/reference/images/sql/rest/console-triple-quotes.png and b/docs/reference/images/sql/rest/console-triple-quotes.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 19c5be0f449fe..63932c4d5a72a 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -47,8 +47,10 @@ NOTE: The number of shards are limited to `1024` per index. This limitation is a `index.number_of_routing_shards`:: + ==== -Number of routing shards used to <> an index. +Integer value used with <> to +route documents to a primary shard. See <>. +{es} uses this value when <> an index. For example, a 5 shard index with `number_of_routing_shards` set to `30` (`5 x 2 x 3`) could be split by a factor of `2` or `3`. In other words, it could be split as follows: @@ -168,6 +170,7 @@ specific index module: The number of replicas each primary shard has. Defaults to 1. +[[dynamic-index-auto-expand-replicas]] `index.auto_expand_replicas`:: Auto-expand the number of replicas based on the number of data nodes in the cluster. Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all` @@ -183,6 +186,7 @@ awareness>> and <> are ignored for this index. +[[dynamic-index-search-idle-after]] `index.search.idle.after`:: How long a shard can not receive a search or get request until it's considered search idle. (default is `30s`) @@ -329,6 +333,13 @@ Defaults to `*`, which matches all fields eligible for the default pipeline (if it exists). The special pipeline name `_none` indicates no ingest pipeline will run. +[[index-mapping-dimension-fields-limit]] +`index.mapping.dimension_fields.limit`:: +For internal use by Elastic only. Maximum number of time series dimensions for +the index. Defaults to `16`. ++ +You can mark a field as a dimension using the `dimension` mapping parameter. + [discrete] === Settings in other index modules diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index e32684c8264d0..dd355eccbca2a 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -104,7 +104,7 @@ before activating this feature. [[early-terminate]] === Early termination of search request -By default in Elasticsearch a search request must visit every document that match a query to +By default in Elasticsearch a search request must visit every document that matches a query to retrieve the top documents sorted by a specified sort. Though when the index sort and the search sort are the same it is possible to limit the number of documents that should be visited per segment to retrieve the N top ranked documents globally. diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 0e77400b2b2e4..80f5921bb4138 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -129,3 +129,12 @@ the original document format is important, you can turn off reformatting by sett logged "as is" and can potentially span multiple log lines. The index slow log file is configured in the `log4j2.properties` file. + +[discrete] +=== Slow log levels + +You can mimic the search or indexing slow log level by setting appropriate +threshold making "more verbose" loggers to be switched off. +If for instance we want to simulate index.indexing.slowlog.level = INFO +then all we need to do is to set +index.indexing.slowlog.threshold.index.debug and index.indexing.slowlog.threshold.index.trace to -1 diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 98f61e0ddab05..59a5305900930 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -48,6 +48,8 @@ supported systems but is subject to change. [[simplefs]]`simplefs`:: +deprecated::[7.15,"simplefs is deprecated and will be removed in 8.0. Use niofs or other file systems instead. Elasticsearch 7.15 or later uses niofs for the simplefs store type as it offers superior or equivalent performance to simplefs."] + The Simple FS type is a straightforward implementation of file system storage (maps to Lucene `SimpleFsDirectory`) using a random access file. This implementation has poor concurrent performance (multiple threads diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index d9238af7032ed..4ac5c9bd6dc02 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -69,8 +69,6 @@ include::commands/index.asciidoc[] include::how-to.asciidoc[] -include::glossary.asciidoc[] - include::rest-api/index.asciidoc[] include::migration/index.asciidoc[] diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index dfc4cfa99f99d..650a6239cdbab 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -30,6 +30,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> [discrete] [[alias-management]] @@ -70,6 +71,7 @@ For more information, see <>. [[monitoring]] === Monitoring: * <> +* <> * <> * <> * <> @@ -92,6 +94,7 @@ For more information, see <>. include::indices/alias-exists.asciidoc[] include::indices/aliases.asciidoc[] include::indices/analyze.asciidoc[] +include::indices/diskusage.asciidoc[] include::indices/clearcache.asciidoc[] include::indices/clone-index.asciidoc[] include::indices/close.asciidoc[] @@ -107,6 +110,7 @@ include::indices/delete-index.asciidoc[] include::indices/delete-index-template.asciidoc[] include::indices/delete-index-template-v1.asciidoc[] include::indices/indices-exists.asciidoc[] +include::indices/field-usage-stats.asciidoc[] include::indices/flush.asciidoc[] include::indices/forcemerge.asciidoc[] include::indices/apis/freeze.asciidoc[] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index a23998926f961..198d6aff8b7fb 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -80,32 +80,36 @@ The object body contains options for the alias. Supports an empty object. `alias`:: (Required*, string) Alias for the action. Index alias names support <>. If `aliases` is not specified, the `add` -and `remove` actions require this parameter. The `remove_index` action doesn't -support this parameter. +and `remove` actions require this parameter. For the `remove` action, this +parameter supports wildcards (`*`). The `remove_index` action doesn't support +this parameter. `aliases`:: (Required*, array of strings) Aliases for the action. Index alias names support <>. If `alias` is not specified, the `add` and -`remove` actions require this parameter. The `remove_index` action doesn't -support this parameter. +`remove` actions require this parameter. For the `remove` action, this parameter +supports wildcards (`*`). The `remove_index` action doesn't support this +parameter. // tag::alias-options[] `filter`:: (Optional, <> Query used to limit documents the -alias can access. Data stream aliases don't support this parameter. +alias can access. // end::alias-options[] + Only the `add` action supports this parameter. `index`:: (Required*, string) Data stream or index for the action. Supports wildcards -(`*`). Wildcard patterns that match both data streams and indices return an -error. If `indices` is not specified, this parameter is required. +(`*`). If `indices` is not specified, this parameter is required. For the `add` +and `remove_index` actions, wildcard patterns that match both data streams and +indices return an error. `indices`:: (Required*, array of strings) Data streams or indices for the action. Supports -wildcards (`*`). Wildcard patterns that match both data streams and indices -return an error. If `index` is not specified, this parameter is required. +wildcards (`*`). If `index` is not specified, this parameter is required. For +the `add` and `remove_index` actions, wildcard patterns that match both data +streams and indices return an error. // tag::alias-options[] `index_routing`:: @@ -136,7 +140,7 @@ Only the `add` action supports this parameter. `must_exist`:: (Optional, Boolean) If `true`, the alias must exist to perform the action. Defaults to `false`. Only -the `remove` action supports this parameter. +the `remove` action supports this parameter. // tag::alias-options[] `routing`:: diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index e81e52aaea664..d2e973261f4ea 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -77,7 +77,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] .Properties of `aliases` objects ======= ``:: -(Required, object) The key is the alias name. Supports +(Required, object) The key is the alias name. Index alias names support <>. + The object body contains options for the alias. Supports an empty object. diff --git a/docs/reference/indices/delete-alias.asciidoc b/docs/reference/indices/delete-alias.asciidoc index ce5bfded37f6f..8d9cf98f59735 100644 --- a/docs/reference/indices/delete-alias.asciidoc +++ b/docs/reference/indices/delete-alias.asciidoc @@ -37,8 +37,7 @@ indices. ``:: (Required, string) Comma-separated list of data streams or indices used to limit -the request. Supports wildcards (`*`). Wildcard patterns that match both data -streams and indices return an error. +the request. Supports wildcards (`*`). [[delete-alias-api-query-params]] ==== {api-query-parms-title} diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index ac5bc98ce1442..1e081eba62da0 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -30,17 +30,14 @@ or `manage` <> for the target index. ``:: + -- -(Request, string) Comma-separated list or wildcard expression of indices to -delete. - -In this parameter, wildcard expressions match only open, concrete indices. You -cannot delete an index using an <>. - -By default, you must explicitly name the indices you are deleting. -To specify indices to delete with `_all`, `*`, or other wildcard -expressions, change the `action.destructive_requires_name` setting to `false`. -You can update this setting in the `elasticsearch.yml` file or using the -<> API. +(Required, string) Comma-separated list of indices to delete. You cannot specify +<>. + +By default, this parameter does not support wildcards (`*`) or `_all`. To use +wildcards or `_all`, change the `action.destructive_requires_name` setting to +`false`. You can update this setting in the `elasticsearch.yml` file or using +the <> API. Wildcard patterns +only match open, concrete indices. NOTE: You cannot delete the current write index of a data stream. To delete the index, you must <> the data stream so a new diff --git a/docs/reference/indices/diskusage.asciidoc b/docs/reference/indices/diskusage.asciidoc new file mode 100644 index 0000000000000..c8a9f27fb3649 --- /dev/null +++ b/docs/reference/indices/diskusage.asciidoc @@ -0,0 +1,184 @@ +[[indices-disk-usage]] +=== Analyze index disk usage API +++++ +Analyze index disk usage +++++ + +experimental[] + +Analyzes the disk usage of each field of an index or data stream. +This API might not support indices created in previous {es} versions. +The result of a small index can be inaccurate as some parts of an index +might not be analyzed by the API. + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_disk_usage?run_expensive_tasks=true +-------------------------------------------------- +// TEST[setup:messages] + +[[analyze-index-disk-usage-api-request]] +==== {api-request-title} + +`POST //_disk_usage` + +[[analyze-index-disk-usage-api-request-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `manage` +<> for the target index, data stream, +or alias. + +[[analyze-index-disk-usage-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Comma-separated list of data streams, indices, and aliases +used to limit the request. It's recommended to execute this API with a single +index (or the latest backing index of a data stream) as the API consumes +resources significantly. + +[[analyze-index-disk-usage-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] ++ +Defaults to `true`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +`flush`:: +(Optional, Boolean) If `true`, the API performs a flush before analysis. If +`false`, the response may not include uncommitted data. Defaults to `true`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +`run_expensive_tasks`:: +(Required, Boolean) Analyzing field disk usage is resource-intensive. To use the +API, this parameter must be set to `true`. Defaults to `false`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + + +[[analyze-index-disk-usage-api-example]] +==== {api-examples-title} + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_disk_usage?run_expensive_tasks=true +-------------------------------------------------- +// TEST[setup:messages] + +The API returns: + +[source,console-response] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "my-index-000001": { + "store_size": "929mb", <1> + "store_size_in_bytes": 974192723, + "all_fields": { + "total": "928.9mb", <2> + "total_in_bytes": 973977084, + "inverted_index": { + "total": "107.8mb", + "total_in_bytes": 113128526 + }, + "stored_fields": "623.5mb", + "stored_fields_in_bytes": 653819143, + "doc_values": "125.7mb", + "doc_values_in_bytes": 131885142, + "points": "59.9mb", + "points_in_bytes": 62885773, + "norms": "2.3kb", + "norms_in_bytes": 2356, + "term_vectors": "2.2kb", + "term_vectors_in_bytes": 2310 + }, + "fields": { + "_id": { + "total": "49.3mb", + "total_in_bytes": 51709993, + "inverted_index": { + "total": "29.7mb", + "total_in_bytes": 31172745 + }, + "stored_fields": "19.5mb", <3> + "stored_fields_in_bytes": 20537248, + "doc_values": "0b", + "doc_values_in_bytes": 0, + "points": "0b", + "points_in_bytes": 0, + "norms": "0b", + "norms_in_bytes": 0, + "term_vectors": "0b", + "term_vectors_in_bytes": 0 + }, + "_primary_term": {...}, + "_seq_no": {...}, + "_version": {...}, + "_source": { + "total": "603.9mb", + "total_in_bytes": 633281895, + "inverted_index": {...}, + "stored_fields": "603.9mb", <4> + "stored_fields_in_bytes": 633281895, + "doc_values": "0b", + "doc_values_in_bytes": 0, + "points": "0b", + "points_in_bytes": 0, + "norms": "0b", + "norms_in_bytes": 0, + "term_vectors": "0b", + "term_vectors_in_bytes": 0 + }, + "context": { + "total": "28.6mb", + "total_in_bytes": 30060405, + "inverted_index": { + "total": "22mb", + "total_in_bytes": 23090908 + }, + "stored_fields": "0b", + "stored_fields_in_bytes": 0, + "doc_values": "0b", + "doc_values_in_bytes": 0, + "points": "0b", + "points_in_bytes": 0, + "norms": "2.3kb", + "norms_in_bytes": 2356, + "term_vectors": "2.2kb", + "term_vectors_in_bytes": 2310 + }, + "context.keyword": {...}, + "message": {...}, + "message.keyword": {...} + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/: \{\.\.\.\}/: $body.$_path/] +// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] +// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] + +<1> The store size of only analyzed shards of the index. + +<2> The total size of fields of the analyzed shards of the index. This total +is usually smaller than the index size specified in <1> as some small metadata +files are ignored and some parts of data files might not be scanned by the API. + +<3> The stored size of the `_id` field + +<4> The stored size of the `_source` field. As stored fields are stored +together in a compressed format, the estimated sizes of stored fields are +best efforts and can be inaccurate. The stored size of the `_id` field +is likely underestimated while the `_source` field is overestimated. diff --git a/docs/reference/indices/field-usage-stats.asciidoc b/docs/reference/indices/field-usage-stats.asciidoc new file mode 100644 index 0000000000000..599f82aabbfd9 --- /dev/null +++ b/docs/reference/indices/field-usage-stats.asciidoc @@ -0,0 +1,167 @@ +[[field-usage-stats]] +=== Field usage stats API +++++ +Field usage stats +++++ + +experimental[] + +Returns field usage information for each shard and field +of an index. +Field usage statistics are automatically captured when +queries are running on a cluster. A shard-level search +request that accesses a given field, even if multiple times +during that request, is counted as a single use. + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_field_usage_stats +-------------------------------------------------- +// TEST[setup:messages] + +[[field-usage-stats-api-request]] +==== {api-request-title} + +`GET //_field_usage_stats + +[[field-usage-stats-api-request-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `manage` +<> for the target index or index alias. + +[[field-usage-stats-api-path-params]] +==== {api-path-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index] + +[[field-usage-stats-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +`fields`:: ++ +-- +(Optional, string) +Comma-separated list or wildcard expressions of fields +to include in the statistics. +-- + +[[field-usage-stats-api-example]] +==== {api-examples-title} + +////////////////////////// + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_search +{ + "query" : { + "match" : { "context" : "bar" } + }, + "aggs": { + "message_stats": { + "string_stats": { + "field": "message.keyword", + "show_distribution": true + } + } + } +} +-------------------------------------------------- +// TEST[setup:messages] + +////////////////////////// + +The following request retrieves field usage information of index `my-index-000001` +on the currently available shards. + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_field_usage_stats +-------------------------------------------------- +// TEST[continued] + +The API returns the following response: + +[source,console-response] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "my-index-000001": { + "shards": [ + { + "tracking_id": "MpOl0QlTQ4SYYhEe6KgJoQ", + "tracking_started_at_millis": 1625558985010, + "routing": { + "state": "STARTED", + "primary": true, + "node": "gA6KeeVzQkGURFCUyV-e8Q", + "relocating_node": null + }, + "stats" : { + "all_fields": { + "any": "6", <1> + "inverted_index": { + "terms" : 1, + "postings" : 1, + "proximity" : 1, <2> + "positions" : 0, + "term_frequencies" : 1, + "offsets" : 0, + "payloads" : 0 + }, + "stored_fields" : 2, + "doc_values" : 1, + "points" : 0, + "norms" : 1, + "term_vectors" : 0 + }, + "fields": { + "_id": { + "any" : 1, + "inverted_index": { + "terms" : 1, + "postings" : 1, + "proximity" : 1, + "positions" : 0, + "term_frequencies" : 1, + "offsets" : 0, + "payloads" : 0 + }, + "stored_fields" : 1, + "doc_values" : 0, + "points" : 0, + "norms" : 0, + "term_vectors" : 0 + }, + "_source": {...}, + "context": {...}, + "message.keyword": {...} + } + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/: \{\.\.\.\}/: $body.$_path/] +// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] +// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] +<1> denotes any kind of use of the field, either inverted index, + or stored fields, or doc values, etc. +<2> denotes any kind of use of either positions, offsets or + payloads. diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc index 0cda6dbe7ab09..0543da9b2a562 100644 --- a/docs/reference/indices/index-mgmt.asciidoc +++ b/docs/reference/indices/index-mgmt.asciidoc @@ -144,7 +144,7 @@ image::images/index-mgmt/management_index_component_template.png[Component templ section blank. . Define a mapping that contains an <> field named `geo` with a -child <> field named `coordinates`: +child <> field named `coordinates`: + [role="screenshot"] image::images/index-mgmt/management-index-templates-mappings.png[Mapped fields page] diff --git a/docs/reference/indices/put-component-template.asciidoc b/docs/reference/indices/put-component-template.asciidoc index 31de9002a33ee..8c6756c496a18 100644 --- a/docs/reference/indices/put-component-template.asciidoc +++ b/docs/reference/indices/put-component-template.asciidoc @@ -132,8 +132,9 @@ This is the template to be applied, may optionally include a `mappings`, [%collapsible%open] ==== `aliases`:: -(Optional, object of objects) Aliases for the index. If an index template -includes `data_stream`, this parameter is not supported. +(Optional, object of objects) Aliases to add. ++ +include::{es-repo-dir}/indices/put-index-template.asciidoc[tag=template-ds-alias] + include::{es-repo-dir}/indices/create-index.asciidoc[tag=aliases-props] diff --git a/docs/reference/indices/put-index-template.asciidoc b/docs/reference/indices/put-index-template.asciidoc index 8968cccbb9118..4b830c6b2f083 100644 --- a/docs/reference/indices/put-index-template.asciidoc +++ b/docs/reference/indices/put-index-template.asciidoc @@ -137,8 +137,13 @@ Template to be applied. It may optionally include an `aliases`, `mappings`, or [%collapsible%open] ==== `aliases`:: -(Optional, object of objects) Aliases for the index. If the index template -includes `data_stream`, this parameter is not supported. +(Optional, object of objects) Aliases to add. ++ +// tag::template-ds-alias[] +If the index template includes a `data_stream` object, these are data stream +aliases. Otherwise, these are index aliases. Data stream aliases ignore the +`index_routing`, `routing`, and `search_routing` options. +// end::template-ds-alias[] + include::{es-repo-dir}/indices/create-index.asciidoc[tag=aliases-props] diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 270d77c5b3133..0d9d2668d2e37 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -46,7 +46,7 @@ Recovery automatically occurs during the following processes: * Node startup or failure. This type of recovery is called a local store recovery. -* <>. +* Primary shard replication. * Relocation of a shard to a different node in the same cluster. * <>. @@ -94,8 +94,7 @@ The recovery is related to a <>. `REPLICA`:: -The recovery is related to -a <>. +The recovery is related to a primary shard replication. `RELOCATING`:: The recovery is related to @@ -281,6 +280,8 @@ The API returns the following response: "reused_in_bytes" : 0, "recovered" : "65.7mb", "recovered_in_bytes" : 68891939, + "recovered_from_snapshot" : "0b", + "recovered_from_snapshot_in_bytes" : 0, "percent" : "87.1%" }, "files" : { @@ -381,6 +382,8 @@ The API returns the following response: "reused_in_bytes" : 26001617, "recovered" : "0b", "recovered_in_bytes" : 0, + "recovered_from_snapshot" : "0b", + "recovered_from_snapshot_in_bytes" : 0, "percent" : "100.0%" }, "files" : { @@ -395,11 +398,13 @@ The API returns the following response: }, { "name" : "_0.cfs", "length" : 135306, - "recovered" : 135306 + "recovered" : 135306, + "recovered_from_snapshot": 0 }, { "name" : "segments_2", "length" : 251, - "recovered" : 251 + "recovered" : 251, + "recovered_from_snapshot": 0 } ] }, diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index 29c40e444354a..aa432e5b7fe80 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -161,7 +161,7 @@ The API returns the following response: "num_docs": 1, "deleted_docs": 0, "size_in_bytes": 3800, - "memory_in_bytes": 1410, + "memory_in_bytes": 0, "committed": false, "search": true, "version": "7.0.0", diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 83d4183dcc2f4..06f048856348e 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -192,7 +192,7 @@ the cluster state -- it doesn't wait for the split operation to start. Indices can only be split if they satisfy the following requirements: -* the target index must not exist +* The target index must not exist * The source index must have fewer primary shards than the target index. diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index dd5273c46cfd5..0eaec88027533 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -323,7 +323,7 @@ template's index pattern should match `logs--*`. -- You can create this template using {kib}'s <> feature or the <>. +API>>. For example, the following request creates a template matching `logs-my_app-*`. The template uses a component template that contains the @@ -550,7 +550,7 @@ PUT _ingest/pipeline/my-pipeline "description": "Use geo_point dynamic template for address field", "field": "_dynamic_templates", "value": { - "address": "geo_point" + "address": "geo_point" } } } @@ -560,8 +560,8 @@ PUT _ingest/pipeline/my-pipeline The set processor above tells ES to use the dynamic template named `geo_point` for the field `address` if this field is not defined in the mapping of the index -yet. This processor overrides the dynamic template for the field `address` if -already defined in the bulk request, but has no effect on other dynamic +yet. This processor overrides the dynamic template for the field `address` if +already defined in the bulk request, but has no effect on other dynamic templates defined in the bulk request. WARNING: If you <> @@ -717,6 +717,32 @@ PUT _ingest/pipeline/my-pipeline ---- // TEST[s/\.\.\./{"lowercase": {"field":"my-keyword-field"}}/] +Additional information about the pipeline failure may be available in the +document metadata fields `on_failure_message`, `on_failure_processor_type`, +`on_failure_processor_tag`, and `on_failure_pipeline`. These fields are +accessible only from within an `on_failure` block. + +The following example uses the metadata fields to include information about +pipeline failures in documents. + +[source,console] +---- +PUT _ingest/pipeline/my-pipeline +{ + "processors": [ ... ], + "on_failure": [ + { + "set": { + "description": "Record error information", + "field": "error_information", + "value": "Processor {{ _ingest.on_failure_processor_type }} with tag {{ _ingest.on_failure_processor_tag }} in pipeline {{ _ingest.on_failure_pipeline }} failed with message {{ _ingest.on_failure_message }}" + } + } + ] +} +---- +// TEST[s/\.\.\./{"lowercase": {"field":"my-keyword-field"}}/] + [discrete] [[conditionally-run-processor]] === Conditionally run a processor @@ -744,10 +770,10 @@ PUT _ingest/pipeline/my-pipeline } ---- -If the static `script.painless.regex.enabled` cluster setting is enabled, you -can use regular expressions in your `if` condition scripts. For supported -syntax, see the {painless}/painless-regexes.html[Painless regexes] -documentation. +If the <> cluster +setting is enabled, you can use regular expressions in your `if` condition +scripts. For supported syntax, see {painless}/painless-regexes.html[Painless +regular expressions]. TIP: If possible, avoid using regular expressions. Expensive regular expressions can slow indexing speeds. diff --git a/docs/reference/ingest/apis/enrich/index.asciidoc b/docs/reference/ingest/apis/enrich/index.asciidoc index 6e013a3b4a5f0..a17c8179af1b1 100644 --- a/docs/reference/ingest/apis/enrich/index.asciidoc +++ b/docs/reference/ingest/apis/enrich/index.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[enrich-apis]] == Enrich APIs diff --git a/docs/reference/ingest/apis/geoip-stats-api.asciidoc b/docs/reference/ingest/apis/geoip-stats-api.asciidoc index dda1f1a2d1492..6ef0db546342b 100644 --- a/docs/reference/ingest/apis/geoip-stats-api.asciidoc +++ b/docs/reference/ingest/apis/geoip-stats-api.asciidoc @@ -23,6 +23,9 @@ GET _ingest/geoip/stats * If the {es} {security-features} are enabled, you must have the `monitor` or `manage` <> to use this API. +* If <> is +disabled, this API returns zero values and an empty `nodes` object. + [role="child_attributes"] [[geoip-stats-api-response-body]] ==== {api-response-body-title} @@ -87,4 +90,4 @@ Downloaded database files, including related license files. {es} stores these files in the node's <>: `$ES_TMPDIR/geoip-databases/`. ===== -==== +==== \ No newline at end of file diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 090a39e3834a5..772c35d542c2f 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -4,6 +4,7 @@ Use ingest APIs to manage tasks and resources related to <> and processors. +[discrete] [[ingest-pipeline-apis]] === Ingest pipeline APIs @@ -14,6 +15,7 @@ Use the following APIs to create, manage, and test ingest pipelines: * <> to delete a pipeline * <> to test a pipeline +[discrete] [[ingest-stat-apis]] === Stat APIs @@ -24,6 +26,6 @@ the <>. include::put-pipeline.asciidoc[] include::delete-pipeline.asciidoc[] -include::get-pipeline.asciidoc[] include::geoip-stats-api.asciidoc[] +include::get-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] diff --git a/docs/reference/ingest/apis/put-pipeline.asciidoc b/docs/reference/ingest/apis/put-pipeline.asciidoc index 4f9cbe702361e..7bab977266c07 100644 --- a/docs/reference/ingest/apis/put-pipeline.asciidoc +++ b/docs/reference/ingest/apis/put-pipeline.asciidoc @@ -48,7 +48,7 @@ PUT _ingest/pipeline/my-pipeline-id [[put-pipeline-api-query-params]] ==== {api-query-parms-title} -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[put-pipeline-api-request-body]] @@ -70,7 +70,7 @@ specified. {es} will not attempt to run the pipeline's remaining processors. `processors`:: (Required, array of <> objects) -Processors used to preform transformations on documents before indexing. +Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. `version`:: @@ -79,4 +79,49 @@ Version number used by external systems to track ingest pipelines. + This parameter is intended for external systems only. {es} does not use or validate pipeline version numbers. + +`_meta`:: +(Optional, object) +Optional metadata about the ingest pipeline. May have any contents. This +map is not automatically generated by {es}. // end::pipeline-object[] + +[[put-pipeline-api-example]] +==== {api-examples-title} + +[[pipeline-metadata]] +===== Pipeline metadata + +You can use the `_meta` parameter to add arbitrary metadata to a pipeline. +This user-defined object is stored in the cluster state, +so keeping it short is preferable. + +The `_meta` parameter is optional and not automatically generated or used by {es}. + +To unset `_meta`, replace the pipeline without specifying one. + +[source,console] +-------------------------------------------------- +PUT /_ingest/pipeline/my-pipeline-id +{ + "description" : "My optional pipeline description", + "processors" : [ + { + "set" : { + "description" : "My optional processor description", + "field": "my-keyword-field", + "value": "foo" + } + } + ], + "_meta": { + "reason": "set my-keyword-field to foo", + "serialization": { + "class": "MyPipeline", + "id": 10 + } + } +} +-------------------------------------------------- + +To check the `_meta`, use the <> API. diff --git a/docs/reference/ingest/common-log-format-example.asciidoc b/docs/reference/ingest/common-log-format-example.asciidoc index d763d6d5ab13e..05f012ab53448 100644 --- a/docs/reference/ingest/common-log-format-example.asciidoc +++ b/docs/reference/ingest/common-log-format-example.asciidoc @@ -39,7 +39,7 @@ image::images/ingest/ingest-pipeline-list.png[Kibana's Ingest Node Pipelines lis + [source,grok] ---- -%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \[%{HTTPDATE:@timestamp}\] "%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} +%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \\[%{HTTPDATE:@timestamp}\\] \"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}\" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} ---- // NOTCONSOLE + diff --git a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc index ff79cf890df5e..9432c9c06dec8 100644 --- a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc @@ -82,13 +82,13 @@ pipeline. In the pipeline, add an <> that includes: * Your enrich policy. -* The `field` of incoming documents used to match the geo_shape of documents +* The `field` of incoming documents used to match the geoshape of documents from the enrich index. * The `target_field` used to store appended enrich data for incoming documents. This field contains the `match_field` and `enrich_fields` specified in your enrich policy. -* The `shape_relation`, which indicates how the processor matches geo_shapes in - incoming documents to geo_shapes in documents from the enrich index. See +* The `shape_relation`, which indicates how the processor matches geoshapes in + incoming documents to geoshapes in documents from the enrich index. See <<_spatial_relations>> for valid options and more information. [source,console] diff --git a/docs/reference/ingest/processors/append.asciidoc b/docs/reference/ingest/processors/append.asciidoc index 919cf92ec2ec6..c2d082c59c85c 100644 --- a/docs/reference/ingest/processors/append.asciidoc +++ b/docs/reference/ingest/processors/append.asciidoc @@ -15,10 +15,13 @@ Accepts a single value or an array of values. [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The field to be appended to. Supports <>. -| `value` | yes | - | The value to be appended. Supports <>. +| `field` | yes | - | The field to be appended to. Supports <>. +| `value` | yes | - | The value to be appended. Supports <>. | `allow_duplicates` | no | true | If `false`, the processor does not append values already present in the field. +| `media_type` | no | `application/json` | The media type for encoding `value`. Applies only when `value` is a +<>. Must be one of `application/json`, `text/plain`, or +`application/x-www-form-urlencoded`. include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc index 6dff72af18481..8733f9f99c2b8 100644 --- a/docs/reference/ingest/processors/dissect.asciidoc +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -194,7 +194,7 @@ Reference key modifier example | *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}` | *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED | *Result* a| -* ts = 1998-08-10T17:15:42,466 +* ts = 2018-08-10T17:15:42,466 * level = ERR * ip = 1.2.3.4 * error = REFUSED diff --git a/docs/reference/ingest/processors/dot-expand.asciidoc b/docs/reference/ingest/processors/dot-expand.asciidoc index 4d6eb6106cc31..1b45b1ff185d7 100644 --- a/docs/reference/ingest/processors/dot-expand.asciidoc +++ b/docs/reference/ingest/processors/dot-expand.asciidoc @@ -12,9 +12,10 @@ Otherwise these fields can't be accessed by any processor. .Dot Expand Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field to expand into an object field -| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +| Name | Required | Default | Description +| `field` | yes | - | The field to expand into an object field. If set to `*`, all top-level fields will be expanded. +| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +| `override`| no | false | Controls the behavior when there is already an existing nested object that conflicts with the expanded field. When `false`, the processor will merge conflicts by combining the old and the new values into an array. When `true`, the value from the expanded field will overwrite the existing value. include::common-options.asciidoc[] |====== @@ -79,6 +80,73 @@ is transformed by the `dot_expander` processor into: -------------------------------------------------- // NOTCONSOLE +Contrast that with when the `override` option is set to `true`. + +[source,js] +-------------------------------------------------- +{ + "dot_expander": { + "field": "foo.bar", + "override": true + } +} +-------------------------------------------------- +// NOTCONSOLE + +In that case, the value of the expanded field overrides the value of the nested object. + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : "value2" + } +} +-------------------------------------------------- +// NOTCONSOLE + +''' + +The value of `field` can also be set to a `*` to expand all top-level dotted field names: + +[source,js] +-------------------------------------------------- +{ + "dot_expander": { + "field": "*" + } +} +-------------------------------------------------- +// NOTCONSOLE + +The dot expand processor would turn this document: + +[source,js] +-------------------------------------------------- +{ + "foo.bar" : "value", + "baz.qux" : "value" +} +-------------------------------------------------- +// NOTCONSOLE + +into: + +[source,js] +-------------------------------------------------- +{ + "foo" : { + "bar" : "value" + }, + "baz" : { + "qux" : "value" + } +} +-------------------------------------------------- +// NOTCONSOLE + +''' + If any field outside of the leaf field conflicts with a pre-existing field of the same name, then that field needs to be renamed first. @@ -105,7 +173,7 @@ pipeline should be used: { "rename" : { "field" : "foo", - "target_field" : "foo.bar"" + "target_field" : "foo.bar" } }, { diff --git a/docs/reference/ingest/processors/enrich.asciidoc b/docs/reference/ingest/processors/enrich.asciidoc index a7cd21a408663..85c4e25d8d0be 100644 --- a/docs/reference/ingest/processors/enrich.asciidoc +++ b/docs/reference/ingest/processors/enrich.asciidoc @@ -20,7 +20,7 @@ See <> section for more information about how | `ignore_missing` | no | false | If `true` and `field` does not exist, the processor quietly exits without modifying the document | `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. | `max_matches` | no | 1 | The maximum number of matched documents to include under the configured target field. The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. In order to avoid documents getting too large, the maximum allowed value is 128. -| `shape_relation` | no | `INTERSECTS` | A spatial relation operator used to match the <> of incoming documents to documents in the enrich index. This option is only used for `geo_match` enrich policy types. The <> mapping parameter determines which spatial relation operators are available. See <<_spatial_relations>> for operators and more information. +| `shape_relation` | no | `INTERSECTS` | A spatial relation operator used to match the <> of incoming documents to documents in the enrich index. This option is only used for `geo_match` enrich policy types. See <<_spatial_relations>> for operators and more information. include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index c83a44dbcd5e2..023894da48a2d 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -4,24 +4,24 @@ GeoIP ++++ -The `geoip` processor adds information about the geographical location of IP addresses, based on data from the Maxmind databases. -This processor adds this information by default under the `geoip` field. The `geoip` processor can resolve both IPv4 and -IPv6 addresses. +The `geoip` processor adds information about the geographical location of an +IPv4 or IPv6 address. -The `ingest-geoip` module ships by default with the GeoLite2 City, GeoLite2 Country and GeoLite2 ASN GeoIP2 databases from Maxmind made available -under the CCA-ShareAlike 4.0 license. For more details see, http://dev.maxmind.com/geoip/geoip2/geolite2/ +[[geoip-automatic-updates]] +By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 +ASN GeoIP2 databases from +http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the +CCA-ShareAlike 4.0 license. {es} automatically downloads updates for +these databases from the Elastic GeoIP endpoint: +https://geoip.elastic.co/v1/database. To get download statistics for these +updates, use the <>. -The `geoip` processor can run with other city, country and ASN GeoIP2 databases -from Maxmind. On {ess} deployments, custom database files must be uploaded using -a {cloud}/ec-custom-bundles.html[custom bundle]. On self-managed deployments, -custom database files must be copied into the `ingest-geoip` config -directory located at `$ES_CONFIG/ingest-geoip`. +If your cluster can't connect to the Elastic GeoIP endpoint or you want to +manage your own updates, see <>. -Custom database files must be -stored uncompressed and the extension must be `-City.mmdb`, `-Country.mmdb`, or -`-ASN.mmdb` to indicate the type of the database. The -`database_file` processor option is used to specify the filename of the custom -database to use for the processor. +If {es} can't connect to the endpoint for 30 days all updated databases will become +invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` +field instead. [[using-ingest-geoip]] ==== Using the `geoip` Processor in a Pipeline @@ -32,7 +32,7 @@ database to use for the processor. |====== | Name | Required | Default | Description | `field` | yes | - | The field to get the ip address from for the geographical lookup. -| `target_field` | no | geoip | The field that will hold the geographical information looked up from the Maxmind database. +| `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. | `database_file` | no | GeoLite2-City.mmdb | The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the `ingest-geoip` config directory. | `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document @@ -303,6 +303,82 @@ GET /my_ip_locations/_search // TESTRESPONSE[s/"took" : 3/"took" : $body.took/] //// +[[manage-geoip-database-updates]] +==== Manage your own GeoIP2 database updates + +If you can't <> your GeoIP2 +databases from the Elastic endpoint, you have a few other options: + +* <> +* <> +* <> + +[[use-proxy-geoip-endpoint]] +**Use a proxy endpoint** + +If you can't connect directly to the Elastic GeoIP endpoint, consider setting up +a secure proxy. You can then specify the proxy endpoint URL in the +<> setting +of each node’s `elasticsearch.yml` file. + +[[use-custom-geoip-endpoint]] +**Use a custom endpoint** + +You can create a service that mimics the Elastic GeoIP endpoint. You can then +get automatic updates from this service. + +. Download your `.mmdb` database files from the +http://dev.maxmind.com/geoip/geoip2/geolite2[MaxMind site]. + +. Copy your database files to a single directory. + +. From your {es} directory, run: ++ +[source,sh] +---- +./bin/elasticsearch-geoip -s my/source/dir [-t target/directory] +---- + +. Serve the static database files from your directory. For example, you can use +Docker to serve the files from an nginx server: ++ +[source,sh] +---- +docker run -v my/source/dir:/usr/share/nginx/html:ro nginx +---- + +. Specify the service's endpoint URL in the +<> setting +of each node’s `elasticsearch.yml` file. ++ +By default, {es} checks the endpoint for updates every three days. To use +another polling interval, use the <> to set +<>. + +[[manually-update-geoip-databases]] +**Manually update your GeoIP2 databases** + +. Use the <> to set +`ingest.geoip.downloader.enabled` to `false`. This disables automatic updates +that may overwrite your database changes. This also deletes all downloaded +databases. + +. Download your `.mmdb` database files from the +http://dev.maxmind.com/geoip/geoip2/geolite2[MaxMind site]. ++ +You can also use custom city, country, and ASN `.mmdb` files. These files must +be uncompressed and use the respective `-City.mmdb`, `-Country.mmdb`, or +`-ASN.mmdb` extensions. + +. On {ess} deployments upload database using +a {cloud}/ec-custom-bundles.html[custom bundle]. + +. On self-managed deployments copy the database files to `$ES_CONFIG/ingest-geoip`. + +. In your `geoip` processors, configure the `database_file` parameter to use a +custom database file. + [[ingest-geoip-settings]] ===== Node Settings @@ -313,3 +389,28 @@ The `geoip` processor supports the following setting: The maximum number of results that should be cached. Defaults to `1000`. Note that these settings are node settings and apply to all `geoip` processors, i.e. there is one cache for all defined `geoip` processors. + +[[geoip-cluster-settings]] +===== Cluster settings + +[[ingest-geoip-downloader-enabled]] +`ingest.geoip.downloader.enabled`:: +(<>, Boolean) +If `true`, {es} automatically downloads and manages updates for GeoIP2 databases +from the `ingest.geoip.downloader.endpoint`. If `false`, {es} does not download +updates and deletes all downloaded databases. Defaults to `true`. + +[[ingest-geoip-downloader-endpoint]] +`ingest.geoip.downloader.endpoint`:: +(<>, string) +Endpoint URL used to download updates for GeoIP2 databases. Defaults to +`https://geoip.elastic.co/v1/database`. {es} stores downloaded database files in +each node's <> at +`$ES_TMPDIR/geoip-databases/`. + +[[ingest-geoip-downloader-poll-interval]] +`ingest.geoip.downloader.poll.interval`:: +(<>, <>) +How often {es} checks for GeoIP2 database updates at the +`ingest.geoip.downloader.endpoint`. Must be greater than `1d` (one day). Defaults +to `3d` (three days). diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 5b5cf66444eb0..8b2a234bd9b7a 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -125,7 +125,7 @@ classes to the `probabilities` field. Both fields are contained in the `target_field` results object. Refer to the -{ml-docs}/ml-lang-ident.html#ml-lang-ident-example[language identification] +{ml-docs}/ml-dfa-lang-ident.html#ml-lang-ident-example[language identification] trained model documentation for a full example. diff --git a/docs/reference/ingest/processors/json.asciidoc b/docs/reference/ingest/processors/json.asciidoc index 2ccefbb0ef17c..faf93f67b854f 100644 --- a/docs/reference/ingest/processors/json.asciidoc +++ b/docs/reference/ingest/processors/json.asciidoc @@ -10,10 +10,12 @@ Converts a JSON string into a structured JSON object. .Json Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field to be parsed. -| `target_field` | no | `field` | The field that the converted structured object will be written into. Any existing content in this field will be overwritten. -| `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. +| Name | Required | Default | Description +| `field` | yes | - | The field to be parsed. +| `target_field` | no | `field` | The field that the converted structured object will be written into. Any existing content in this field will be overwritten. +| `add_to_root` | no | false | Flag that forces the parsed JSON to be added at the top level of the document. `target_field` must not be set when this option is chosen. +| `add_to_root_conflict_strategy` | no | `replace` | When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. When set to `merge`, conflicting fields will be merged. Only applicable if `add_to_root` is set to `true`. +| `allow_duplicate_keys` | no | false | When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. Instead, the last encountered value for any duplicate key wins. include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/kv.asciidoc b/docs/reference/ingest/processors/kv.asciidoc index f8e251925af6e..b128be27cd475 100644 --- a/docs/reference/ingest/processors/kv.asciidoc +++ b/docs/reference/ingest/processors/kv.asciidoc @@ -27,10 +27,10 @@ TIP: Using the KV Processor can result in field names that you cannot control. C [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The field to be parsed +| `field` | yes | - | The field to be parsed. Supports <>. | `field_split` | yes | - | Regex pattern to use for splitting key-value pairs | `value_split` | yes | - | Regex pattern to use for splitting the key from the value within a key-value pair -| `target_field` | no | `null` | The field to insert the extracted keys into. Defaults to the root of the document +| `target_field` | no | `null` | The field to insert the extracted keys into. Defaults to the root of the document. Supports <>. | `include_keys` | no | `null` | List of keys to filter and insert into document. Defaults to including all keys | `exclude_keys` | no | `null` | List of keys to exclude from document | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index a86ef841756e0..dc069d54d53e8 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -164,7 +164,7 @@ embroidery_ needles. ===== But wait, there’s more Want to automate the analysis of your time series data? You can use -{ml-docs}/ml-overview.html[machine learning] features to create accurate +{ml-docs}/ml-ad-overview.html[machine learning] features to create accurate baselines of normal behavior in your data and identify anomalous patterns. With machine learning, you can detect: diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index a53d460e4f36c..75760943e353c 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -2,12 +2,17 @@ === `_routing` field A document is routed to a particular shard in an index using the following -formula: +formulas: + + routing_factor = num_routing_shards / num_primary_shards + shard_num = (hash(_routing) % num_routing_shards) / routing_factor - shard_num = hash(_routing) % num_primary_shards - -The default value used for `_routing` is the document's <>. +`num_routing_shards` is the value of the +<> index +setting. `num_primary_shards` is the value of the +<> index setting. +The default `_routing` value is the document's <>. Custom routing patterns can be implemented by specifying a custom `routing` value per document. For instance: @@ -118,9 +123,10 @@ This is done by providing the index level setting <> setting, which -limits the maximum number of <> in a query. +limits the maximum number of clauses in a query. ==== + [TIP] diff --git a/docs/reference/mapping/params/eager-global-ordinals.asciidoc b/docs/reference/mapping/params/eager-global-ordinals.asciidoc index 61ab357e09c0b..27c6f94183134 100644 --- a/docs/reference/mapping/params/eager-global-ordinals.asciidoc +++ b/docs/reference/mapping/params/eager-global-ordinals.asciidoc @@ -27,8 +27,8 @@ ordinal for each segment. Global ordinals are used if a search contains any of the following components: * Certain bucket aggregations on `keyword`, `ip`, and `flattened` fields. This -includes `terms` aggregations as mentioned above, as well as `composite`, -`diversified_sampler`, and `significant_terms`. +includes `terms` aggregations as mentioned above, as well as +`diversified_sampler` and `significant_terms`. * Bucket aggregations on `text` fields that require <> to be enabled. * Operations on parent and child documents from a `join` field, including diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index e18ee3b1823c2..35d448ec3a267 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -58,10 +58,12 @@ GET my-index-000001/_search <2> The `city` field can be used for full text search. <3> The `city.raw` field can be used for sorting and aggregations -NOTE: Multi-fields do not change the original `_source` field. +You can add multi-fields to an existing field using the +<>. -TIP: New multi-fields can be added to existing -fields using the <>. +A multi-field mapping is completely separate from the parent field's mapping. A +multi-field doesn't inherit any mapping options from its parent field. +Multi-fields don't change the original `_source` field. ==== Multi-fields with multiple analyzers diff --git a/docs/reference/mapping/runtime.asciidoc b/docs/reference/mapping/runtime.asciidoc index 5e71e1b447da9..2c084b3501041 100644 --- a/docs/reference/mapping/runtime.asciidoc +++ b/docs/reference/mapping/runtime.asciidoc @@ -14,6 +14,11 @@ sees runtime fields no differently. You can define runtime fields in the <>. Your choice, which is part of the inherent flexibility of runtime fields. +Use the <> parameter on the `_search` API to +<>. Runtime +fields won't display in `_source`, but the `fields` API works for all fields, +even those that were not sent as part of the original `_source`. + Runtime fields are useful when working with log data (see <>), especially when you're unsure about the data structure. Your search speed decreases, but your index size is much @@ -42,6 +47,38 @@ front, and can use runtime fields to amend the mapping at any time. Using runtime fields allows for a smaller index and faster ingest time, which combined use less resources and reduce your operating costs. +[discrete] +[[runtime-incentives]] +=== Incentives +Runtime fields can replace many of the ways you can use scripting with the +`_search` API. How you use a runtime field is impacted by the number of +documents that the included script runs against. For example, if you're using +the `fields` parameter on the `_search` API to +<>, the script +runs only against the top hits just like script fields do. + +You can use <> to access values in `_source` and +return calculated values based on a script valuation. Runtime fields have these +same capabilities, but provide greater flexibility because you can query and +aggregate on runtime fields in a search request. Script fields can only fetch +values. + +Similarly, you could write a <> that +filters documents in a search request based on a script. Runtime fields provide +a very similar feature that is more flexible. You write a script to create +field values and they are available everywhere, such as +<>, <>, and +<>. + +You can also use scripts to <>, but +that same script works exactly the same in a runtime field. + +If you move a script from any of these sections in a search request to a +runtime field that is computing values from the same number of documents, the +performance should be about the same. The performance for these features is +largely dependent upon the calculations that the included script is running and +how many documents the script runs against. + [discrete] [[runtime-compromises]] === Compromises @@ -592,6 +629,7 @@ which still returns in the response: [[runtime-retrieving-fields]] === Retrieve a runtime field + Use the <> parameter on the `_search` API to retrieve the values of runtime fields. Runtime fields won't display in `_source`, but the `fields` API works for all fields, even those that were not sent as part of diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 03d9c731d93b8..dd878aa595170 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -1,20 +1,20 @@ [[geo-point]] -=== Geo-point field type +=== Geopoint field type ++++ -Geo-point +Geopoint ++++ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: -* to find geo-points within a <>, +* to find geopoints within a <>, within a certain <> of a central point, - or within a <> or within a <>. + or within a <> or within a <>. * to aggregate documents <> or by <> from a central point. * to integrate distance into a document's <>. * to <> documents by distance. -There are five ways that a geo-point may be specified, as demonstrated below: +There are five ways that a geopoint may be specified, as demonstrated below: [source,console] -------------------------------------------------- @@ -31,7 +31,7 @@ PUT my-index-000001 PUT my-index-000001/_doc/1 { - "text": "Geo-point as an object", + "text": "Geopoint as an object", "location": { <1> "lat": 41.12, "lon": -71.34 @@ -40,25 +40,25 @@ PUT my-index-000001/_doc/1 PUT my-index-000001/_doc/2 { - "text": "Geo-point as a string", + "text": "Geopoint as a string", "location": "41.12,-71.34" <2> } PUT my-index-000001/_doc/3 { - "text": "Geo-point as a geohash", + "text": "Geopoint as a geohash", "location": "drm3btev3e86" <3> } PUT my-index-000001/_doc/4 { - "text": "Geo-point as an array", + "text": "Geopoint as an array", "location": [ -71.34, 41.12 ] <4> } PUT my-index-000001/_doc/5 { - "text": "Geo-point as a WKT POINT primitive", + "text": "Geopoint as a WKT POINT primitive", "location" : "POINT (-71.34 41.12)" <5> } @@ -81,20 +81,20 @@ GET my-index-000001/_search } -------------------------------------------------- -<1> Geo-point expressed as an object, with `lat` and `lon` keys. -<2> Geo-point expressed as a string with the format: `"lat,lon"`. -<3> Geo-point expressed as a geohash. -<4> Geo-point expressed as an array with the format: [ `lon`, `lat`] -<5> Geo-point expressed as a https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] +<1> Geopoint expressed as an object, with `lat` and `lon` keys. +<2> Geopoint expressed as a string with the format: `"lat,lon"`. +<3> Geopoint expressed as a geohash. +<4> Geopoint expressed as an array with the format: [ `lon`, `lat`] +<5> Geopoint expressed as a https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] POINT with the format: `"POINT(lon lat)"` -<6> A geo-bounding box query which finds all geo-points that fall inside the box. +<6> A geo-bounding box query which finds all geopoints that fall inside the box. [IMPORTANT] -.Geo-points expressed as an array or string +.Geopoints expressed as an array or string ================================================== -Please note that string geo-points are ordered as `lat,lon`, while array -geo-points are ordered as the reverse: `lon,lat`. +Please note that string geopoints are ordered as `lat,lon`, while array +geopoints are ordered as the reverse: `lon,lat`. Originally, `lat,lon` was used for both array and string, but the array format was changed early on to conform to the format used by GeoJSON. @@ -121,9 +121,9 @@ The following parameters are accepted by `geo_point` fields: <>:: - If `true`, malformed geo-points are ignored. If `false` (default), - malformed geo-points throw an exception and reject the whole document. - A geo-point is considered malformed if its latitude is outside the range + If `true`, malformed geopoints are ignored. If `false` (default), + malformed geopoints throw an exception and reject the whole document. + A geopoint is considered malformed if its latitude is outside the range -90 <= latitude <= 90, or if its longitude is outside the range -180 <= longitude <= 180. Note that this cannot be set if the `script` parameter is used. @@ -131,10 +131,14 @@ The following parameters are accepted by `geo_point` fields: If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is - ignored. If `false`, geo-points containing any more than latitude and longitude + ignored. If `false`, geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. Note that this cannot be set if the `script` parameter is used. +<>:: + + Should the field be searchable? Accepts `true` (default) and `false`. + <>:: Accepts an geopoint value which is substituted for any explicit `null` values. @@ -161,9 +165,9 @@ The following parameters are accepted by `geo_point` fields: <>, and should emit points as a pair of (lat, lon) double values. -==== Using geo-points in scripts +==== Using geopoints in scripts -When accessing the value of a geo-point in a script, the value is returned as +When accessing the value of a geopoint in a script, the value is returned as a `GeoPoint` object, which allows access to the `.lat` and `.lon` values respectively: diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index caea17a6121a6..89fc7dc20f527 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -1,7 +1,7 @@ [[geo-shape]] -=== Geo-shape field type +=== Geoshape field type ++++ -Geo-shape +Geoshape ++++ The `geo_shape` data type facilitates the indexing of and searching @@ -10,75 +10,20 @@ used when either the data being indexed or the queries being executed contain shapes other than just points. You can query documents using this type using -<>. +a <>. [[geo-shape-mapping-options]] [discrete] ==== Mapping Options -The geo_shape mapping maps geo_json geometry objects to the geo_shape -type. To enable it, users must explicitly map fields to the geo_shape +The `geo_shape` mapping maps GeoJSON geometry objects to the `geo_shape` +type. To enable it, users must explicitly map fields to the `geo_shape` type. [cols="<,<,<",options="header",] |======================================================================= |Option |Description| Default -|`tree` |deprecated[6.6, PrefixTrees no longer used] Name of the PrefixTree -implementation to be used: `geohash` for GeohashPrefixTree and `quadtree` -for QuadPrefixTree. Note: This parameter is only relevant for `term` and -`recursive` strategies. -| `quadtree` - -|`precision` |deprecated[6.6, PrefixTrees no longer used] This parameter may -be used instead of `tree_levels` to set an appropriate value for the -`tree_levels` parameter. The value specifies the desired precision and -Elasticsearch will calculate the best tree_levels value to honor this -precision. The value should be a number followed by an optional distance -unit. Valid distance units include: `in`, `inch`, `yd`, `yard`, `mi`, -`miles`, `km`, `kilometers`, `m`,`meters`, `cm`,`centimeters`, `mm`, -`millimeters`. Note: This parameter is only relevant for `term` and -`recursive` strategies. -| `50m` - -|`tree_levels` |deprecated[6.6, PrefixTrees no longer used] Maximum number -of layers to be used by the PrefixTree. This can be used to control the -precision of shape representations andtherefore how many terms are -indexed. Defaults to the default value of the chosen PrefixTree -implementation. Since this parameter requires a certain level of -understanding of the underlying implementation, users may use the -`precision` parameter instead. However, Elasticsearch only uses the -tree_levels parameter internally and this is what is returned via the -mapping API even if you use the precision parameter. Note: This parameter -is only relevant for `term` and `recursive` strategies. -| various - -|`strategy` |deprecated[6.6, PrefixTrees no longer used] The strategy -parameter defines the approach for how to represent shapes at indexing -and search time. It also influences the capabilities available so it -is recommended to let Elasticsearch set this parameter automatically. -There are two strategies available: `recursive`, and `term`. -Recursive and Term strategies are deprecated and will be removed in a -future version. While they are still available, the Term strategy -supports point types only (the `points_only` parameter will be -automatically set to true) while Recursive strategy supports all -shape types. (IMPORTANT: see <> for more -detailed information about these strategies) -| `recursive` - -|`distance_error_pct` |deprecated[6.6, PrefixTrees no longer used] Used as a -hint to the PrefixTree about how precise it should be. Defaults to 0.025 (2.5%) -with 0.5 as the maximum supported value. PERFORMANCE NOTE: This value will -default to 0 if a `precision` or `tree_level` definition is explicitly defined. -This guarantees spatial precision at the level defined in the mapping. This can -lead to significant memory usage for high resolution shapes with low error -(e.g., large shapes at 1m with < 0.001 error). To improve indexing performance -(at the cost of query accuracy) explicitly define `tree_level` or `precision` -along with a reasonable `distance_error_pct`, noting that large shapes will have -greater false positives. Note: This parameter is only relevant for `term` and -`recursive` strategies. -| `0.025` - |`orientation` a|Optional. Vertex order for the shape's coordinates list. @@ -106,15 +51,6 @@ ring (hole) vertices in clockwise order. Individual GeoJSON or WKT documents can override this parameter. | `RIGHT` -|`points_only` |deprecated[6.6, PrefixTrees no longer used] Setting this option to -`true` (defaults to `false`) configures the `geo_shape` field type for point -shapes only (NOTE: Multi-Points are not yet supported). This optimizes index and -search performance for the `geohash` and `quadtree` when it is known that only points -will be indexed. At present geo_shape queries can not be executed on `geo_point` -field types. This option bridges the gap by improving point performance on a -`geo_shape` field so that `geo_shape` queries are optimal on a point only field. -| `false` - |`ignore_malformed` |If true, malformed GeoJSON or WKT shapes are ignored. If false (default), malformed GeoJSON and WKT shapes throw an exception and reject the entire document. @@ -122,7 +58,7 @@ entire document. |`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, -geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. | `true` @@ -139,86 +75,8 @@ GeoShape types are indexed by decomposing the shape into a triangular mesh and indexing each triangle as a 7 dimension point in a BKD tree. This provides near perfect spatial resolution (down to 1e-7 decimal degree precision) since all spatial relations are computed using an encoded vector representation of the -original shape instead of a raster-grid representation as used by the -<> indexing approach. Performance of the tessellator primarily -depends on the number of vertices that define the polygon/multi-polygon. While -this is the default indexing technique prefix trees can still be used by setting -the `tree` or `strategy` parameters according to the appropriate -<>. Note that these parameters are now deprecated -and will be removed in a future version. - -*IMPORTANT NOTES* - -`CONTAINS` relation query - when using the new default vector indexing strategy, `geo_shape` -queries with `relation` defined as `contains` are supported for indices created with -ElasticSearch 7.5.0 or higher. - - -[[prefix-trees]] -[discrete] -==== Prefix trees - -deprecated[6.6, PrefixTrees no longer used] To efficiently represent shapes in -an inverted index, Shapes are converted into a series of hashes representing -grid squares (commonly referred to as "rasters") using implementations of a -PrefixTree. The tree notion comes from the fact that the PrefixTree uses multiple -grid layers, each with an increasing level of precision to represent the Earth. -This can be thought of as increasing the level of detail of a map or image at higher -zoom levels. Since this approach causes precision issues with indexed shape, it has -been deprecated in favor of a vector indexing approach that indexes the shapes as a -triangular mesh (see <>). - -Multiple PrefixTree implementations are provided: - -* GeohashPrefixTree - Uses -{wikipedia}/Geohash[geohashes] for grid squares. -Geohashes are base32 encoded strings of the bits of the latitude and -longitude interleaved. So the longer the hash, the more precise it is. -Each character added to the geohash represents another tree level and -adds 5 bits of precision to the geohash. A geohash represents a -rectangular area and has 32 sub rectangles. The maximum number of levels -in Elasticsearch is 24; the default is 9. -* QuadPrefixTree - Uses a -{wikipedia}/Quadtree[quadtree] for grid squares. -Similar to geohash, quad trees interleave the bits of the latitude and -longitude the resulting hash is a bit set. A tree level in a quad tree -represents 2 bits in this bit set, one for each coordinate. The maximum -number of levels for the quad trees in Elasticsearch is 29; the default is 21. - -[[spatial-strategy]] -[discrete] -===== Spatial strategies -deprecated[6.6, PrefixTrees no longer used] The indexing implementation -selected relies on a SpatialStrategy for choosing how to decompose the shapes -(either as grid squares or a tessellated triangular mesh). Each strategy -answers the following: - -* What type of Shapes can be indexed? -* What types of Query Operations and Shapes can be used? -* Does it support more than one Shape per field? - -The following Strategy implementations (with corresponding capabilities) -are provided: - -[cols="<,<,<,<",options="header",] -|======================================================================= -|Strategy |Supported Shapes |Supported Queries |Multiple Shapes - -|`recursive` |<> |`INTERSECTS`, `DISJOINT`, `WITHIN`, `CONTAINS` |Yes -|`term` |<> |`INTERSECTS` |Yes - -|======================================================================= - -[discrete] -===== Accuracy - -`Recursive` and `Term` strategies do not provide 100% accuracy and depending on -how they are configured it may return some false positives for `INTERSECTS`, -`WITHIN` and `CONTAINS` queries, and some false negatives for `DISJOINT` queries. -To mitigate this, it is important to select an appropriate value for the tree_levels -parameter and to adjust expectations accordingly. For example, a point may be near -the border of a particular grid cell and may thus not match a query that only matches -the cell right next to it -- even though the shape is very close to the point. +original shape. Performance of the tessellator primarily +depends on the number of vertices that define the polygon/multi-polygon. [discrete] ===== Example @@ -238,33 +96,6 @@ PUT /example -------------------------------------------------- // TESTSETUP -This mapping definition maps the location field to the geo_shape -type using the default vector implementation. It provides -approximately 1e-7 decimal degree precision. - -[discrete] -===== Performance considerations with Prefix Trees - -deprecated[6.6, PrefixTrees no longer used] With prefix trees, -Elasticsearch uses the paths in the tree as terms in the inverted index -and in queries. The higher the level (and thus the precision), the more -terms are generated. Of course, calculating the terms, keeping them in -memory, and storing them on disk all have a price. Especially with higher -tree levels, indices can become extremely large even with a modest amount -of data. Additionally, the size of the features also matters. Big, complex -polygons can take up a lot of space at higher tree levels. Which setting -is right depends on the use case. Generally one trades off accuracy against -index size and query performance. - -The defaults in Elasticsearch for both implementations are a compromise -between index size and a reasonable level of precision of 50m at the -equator. This allows for indexing tens of millions of shapes without -overly bloating the resulting index too much relative to the input size. - -[NOTE] -Geo-shape queries on geo-shapes implemented with PrefixTrees will not be executed if -<> is set to false. - [[input-structure]] [discrete] ==== Input Structure @@ -292,8 +123,6 @@ points. and a LineString). |`N/A` |`BBOX` |`envelope` |A bounding rectangle, or envelope, specified by specifying only the top left and bottom right points. -|`N/A` |`N/A` |`circle` |A circle specified by a center point and radius with -units, which default to `METERS`. |======================================================================= [NOTE] @@ -462,8 +291,8 @@ POST /example/_doc -------------------------------------------------- // TEST[catch:/mapper_parsing_exception/] -An `orientation` parameter can be defined when setting the geo_shape mapping (see <>). This will define vertex -order for the coordinate list on the mapped geo_shape field. It can also be overridden on each document. The following is an example for +An `orientation` parameter can be defined when setting the `geo_shape` mapping (see <>). This will define vertex +order for the coordinate list on the mapped `geo_shape` field. It can also be overridden on each document. The following is an example for overriding the orientation on a document: [source,console] @@ -484,7 +313,7 @@ POST /example/_doc [[geo-multipoint]] ===== http://geojson.org/geojson-spec.html#id5[MultiPoint] -The following is an example of a list of geojson points: +The following is an example of a list of GeoJSON points: [source,console] -------------------------------------------------- @@ -513,7 +342,7 @@ POST /example/_doc [[geo-multilinestring]] ===== http://geojson.org/geojson-spec.html#id6[MultiLineString] -The following is an example of a list of geojson linestrings: +The following is an example of a list of GeoJSON linestrings: [source,console] -------------------------------------------------- @@ -544,7 +373,7 @@ POST /example/_doc [[geo-multipolygon]] ===== http://geojson.org/geojson-spec.html#id7[MultiPolygon] -The following is an example of a list of geojson polygons (second polygon contains a hole): +The following is an example of a list of GeoJSON polygons (second polygon contains a hole): [source,console] -------------------------------------------------- @@ -575,7 +404,7 @@ POST /example/_doc [[geo-geometry_collection]] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] -The following is an example of a collection of geojson geometry objects: +The following is an example of a collection of GeoJSON geometry objects: [source,console] -------------------------------------------------- @@ -641,20 +470,14 @@ POST /example/_doc [discrete] ===== Circle -Elasticsearch supports a `circle` type, which consists of a center -point with a radius. - -IMPORTANT: You cannot index the `circle` type using the default -<>. Instead, use a +Neither GeoJSON nor WKT supports a point-radius circle type. Instead, use a <> to approximate the circle as a <>. -*NOTE:* Neither GeoJSON or WKT support a point-radius circle type. - [discrete] ==== Sorting and Retrieving index Shapes Due to the complex input structure and index representation of shapes, it is not currently possible to sort shapes or retrieve their fields -directly. The geo_shape value is only retrievable through the `_source` +directly. The `geo_shape` value is only retrievable through the `_source` field. diff --git a/docs/reference/mapping/types/histogram.asciidoc b/docs/reference/mapping/types/histogram.asciidoc index 3fd0f604bbdb8..0e4b56df53e01 100644 --- a/docs/reference/mapping/types/histogram.asciidoc +++ b/docs/reference/mapping/types/histogram.asciidoc @@ -44,6 +44,7 @@ following aggregations and queries: * <> aggregation * <> aggregation * <> aggregation +* <> aggregation * <> query [[mapping-types-histogram-building-histogram]] diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 322c58fad96ff..173f27df08ce5 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -45,6 +45,8 @@ The following parameters are accepted by `ip` fields: [horizontal] +include::keyword.asciidoc[tag=dimension] + <>:: Should the field be stored on disk in a column-stride fashion, so that it diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index b32c04d43ef79..15852f8a7daff 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -56,6 +56,22 @@ The following parameters are accepted by `keyword` fields: [horizontal] +// tag::dimension[] +`dimension`:: +For internal use by Elastic only. Marks the field as a time series dimension. +Accepts `true` or `false` (default). ++ +The <> +index setting limits the number of dimensions in an index. ++ +Dimension fields have the following constraints: ++ +* The `doc_values` and `index` mapping parameters must be `true`. +* Field values cannot be an <>. +// end::dimension[] +* Field values cannot be larger than 1024 bytes. +* The field cannot use a <>. + <>:: Should the field be stored on disk in a column-stride fashion, so that it @@ -152,4 +168,3 @@ The following parameters are accepted by `keyword` fields: include::constant-keyword.asciidoc[] include::wildcard.asciidoc[] - diff --git a/docs/reference/mapping/types/match-only-text.asciidoc b/docs/reference/mapping/types/match-only-text.asciidoc index b3afa99ae54a4..0cb52586f41f0 100644 --- a/docs/reference/mapping/types/match-only-text.asciidoc +++ b/docs/reference/mapping/types/match-only-text.asciidoc @@ -20,10 +20,10 @@ Analysis is not configurable: text is always analyzed with the <> field type if you absolutely need span queries. Other than that, `match_only_text` supports the same queries as `text`. And -like `text`, it does not support sorting and has only limited support for aggretations. +like `text`, it does not support sorting and has only limited support for aggregations. [source,console] --------------------------------- +---- PUT logs { "mappings": { @@ -37,7 +37,7 @@ PUT logs } } } --------------------------------- +---- [discrete] [[match-only-text-params]] diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 0389900ae5114..4b4be55a4947a 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -119,6 +119,11 @@ The following parameters are accepted by numeric types: Accepts `true` (default) and `false`. Not applicable for `unsigned_long`. Note that this cannot be set if the `script` parameter is used. +include::keyword.asciidoc[tag=dimension] ++ +Of the numeric field types, only `byte`, `short`, `integer`, and `long` fields +support this parameter. + <>:: Should the field be stored on disk in a column-stride fashion, so that it diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index 7aa276393af8d..ab20f8b22f557 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -82,7 +82,7 @@ The following parameters are accepted by `object` fields: <>:: Whether or not new `properties` should be added dynamically - to an existing object. Accepts `true` (default), `false` + to an existing object. Accepts `true` (default), `runtime`, `false` and `strict`. <>:: diff --git a/docs/reference/mapping/types/shape.asciidoc b/docs/reference/mapping/types/shape.asciidoc index a2236ffc8b38f..0251c11e0d3d5 100644 --- a/docs/reference/mapping/types/shape.asciidoc +++ b/docs/reference/mapping/types/shape.asciidoc @@ -18,7 +18,7 @@ You can query documents using this type using [discrete] ==== Mapping Options -Like the <> field type, the `shape` field mapping maps +Like the <> field type, the `shape` field mapping maps http://geojson.org[GeoJSON] or https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] (WKT) geometry objects to the shape type. To enable it, users must explicitly map fields to the shape type. @@ -34,8 +34,8 @@ different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`, 2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation (`counterclockwise`) complies with the OGC standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes) -in clockwise order. Setting this parameter in the geo_shape mapping explicitly -sets vertex order for the coordinate list of a geo_shape field but can be +in clockwise order. Setting this parameter in the `geo_shape` mapping explicitly +sets vertex order for the coordinate list of a `geo_shape` field but can be overridden in each individual GeoJSON or WKT document. | `ccw` @@ -46,7 +46,7 @@ entire document. |`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, -geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. | `true` @@ -279,7 +279,7 @@ POST /example/_doc [[multipoint]] ===== http://geojson.org/geojson-spec.html#id5[MultiPoint] -The following is an example of a list of geojson points: +The following is an example of a list of GeoJSON points: [source,console] -------------------------------------------------- @@ -308,7 +308,7 @@ POST /example/_doc [[multilinestring]] ===== http://geojson.org/geojson-spec.html#id6[MultiLineString] -The following is an example of a list of geojson linestrings: +The following is an example of a list of GeoJSON linestrings: [source,console] -------------------------------------------------- @@ -339,7 +339,7 @@ POST /example/_doc [[multipolygon]] ===== http://geojson.org/geojson-spec.html#id7[MultiPolygon] -The following is an example of a list of geojson polygons (second polygon contains a hole): +The following is an example of a list of GeoJSON polygons (second polygon contains a hole): [source,console] -------------------------------------------------- @@ -370,7 +370,7 @@ POST /example/_doc [[geometry_collection]] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] -The following is an example of a collection of geojson geometry objects: +The following is an example of a collection of GeoJSON geometry objects: [source,console] -------------------------------------------------- diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index 6a368ae903b44..38f8c02cadf11 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -16,6 +16,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> * <> * <> @@ -91,6 +92,7 @@ include::migrate_8_0/allocation.asciidoc[] include::migrate_8_0/analysis.asciidoc[] include::migrate_8_0/breaker.asciidoc[] include::migrate_8_0/cluster.asciidoc[] +include::migrate_8_0/ccr.asciidoc[] include::migrate_8_0/discovery.asciidoc[] include::migrate_8_0/eql.asciidoc[] include::migrate_8_0/http.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/aggregations.asciidoc b/docs/reference/migration/migrate_8_0/aggregations.asciidoc index afcbd6a5bf69c..2daaac9a27076 100644 --- a/docs/reference/migration/migrate_8_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_8_0/aggregations.asciidoc @@ -20,4 +20,22 @@ Use unique values in the `percents` parameter of the `percentiles` aggregation. Requests containing duplicate values in the `percents` parameter will return an error. ==== +[[date-histogram-interval]] +.The `date_histogram` aggregation's `interval` parameter is no longer valid. +[%collapsible] +==== +*Details* + +It is now an error to specify the `interval` parameter to the +{ref}/search-aggregations-bucket-datehistogram-aggregation.html[`date_histogram` +aggregation] or the +{ref}/search-aggregations-bucket-composite-aggregation.html#_date_histogram[`composite +date_histogram` source. Instead, please use either `calendar_interval` or +`fixed_interval` as appropriate. + +*Impact* + +Uses of the `interval` parameter in either the `date_histogram` aggregation or +the `date_histogram` composite source will now generate an error. Instead +please use the more specific `fixed_interval` or `calendar_interval` +parameters. +==== // end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/allocation.asciidoc b/docs/reference/migration/migrate_8_0/allocation.asciidoc index 43b92012833e7..0b0bf42e96597 100644 --- a/docs/reference/migration/migrate_8_0/allocation.asciidoc +++ b/docs/reference/migration/migrate_8_0/allocation.asciidoc @@ -6,9 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - [[breaking_80_allocation_change_flood_stage_block_always_removed]] .The automatic removal of flood-stage blocks is no longer optional. [%collapsible] @@ -46,3 +43,4 @@ Discontinue use of the `cluster.routing.allocation.disk.include_relocations` setting. Specifying this setting in `elasticsearch.yml` will result in an error on startup. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/analysis.asciidoc b/docs/reference/migration/migrate_8_0/analysis.asciidoc index d9473aedfe38c..9397dc317d367 100644 --- a/docs/reference/migration/migrate_8_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_8_0/analysis.asciidoc @@ -6,9 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - [[ngram-edgengram-filter-names-removed]] .The `nGram` and `edgeNGram` token filter names have been removed. [%collapsible] @@ -37,3 +34,4 @@ emit a deprecation warning. The tokenizer name should be changed to the fully eq Use the `ngram` and `edge_ngram` tokenizers. Requests to create new indices using the `nGram` and `edgeNGram` tokenizer names will return an error. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/api.asciidoc b/docs/reference/migration/migrate_8_0/api.asciidoc index 174d490b98742..5560cd8aba11a 100644 --- a/docs/reference/migration/migrate_8_0/api.asciidoc +++ b/docs/reference/migration/migrate_8_0/api.asciidoc @@ -4,10 +4,8 @@ //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -//tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] +//tag::notable-breaking-changes[] .The cat node API's `local` query parameter has been removed. [%collapsible] ==== @@ -74,10 +72,10 @@ include this parameter will return an error. [%collapsible] ==== *Details* + -The {ml} <> is deprecated starting in 7.11.0 +The {ml} {ref}/ml-post-data.html[post data to jobs API] is deprecated starting in 7.11.0 and will be removed in a future major version. *Impact* + -Use <> instead. - +Use {ref}/ml-apis.html#ml-api-datafeed-endpoint[{dfeeds}] instead. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/breaker.asciidoc b/docs/reference/migration/migrate_8_0/breaker.asciidoc index f72d2b91d1b5e..d3c1be8d0dac2 100644 --- a/docs/reference/migration/migrate_8_0/breaker.asciidoc +++ b/docs/reference/migration/migrate_8_0/breaker.asciidoc @@ -2,6 +2,9 @@ [[breaking_80_breaker_changes]] ==== Circuit breaker changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + //tag::notable-breaking-changes[] .The `in_flight_requests` stat has been renamed `inflight_requests` in logs and diagnostic APIs. [%collapsible] diff --git a/docs/reference/migration/migrate_8_0/ccr.asciidoc b/docs/reference/migration/migrate_8_0/ccr.asciidoc new file mode 100644 index 0000000000000..f0c50afc94560 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/ccr.asciidoc @@ -0,0 +1,20 @@ +[discrete] +[[breaking_80_ccr_changes]] +==== {ccr-cap} ({ccr-init}) changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] +.Remote system indices are not followed automatically if they match an auto-follow pattern. +[%collapsible] +==== +*Details* + +Remote system indices matching an {ref}/ccr-auto-follow.html[auto-follow +pattern] won't be configured as a follower index automatically. + +*Impact* + +Explicitly {ref}/ccr-put-follow.html[create a follower index] to follow a remote +system index if that's the wanted behaviour. +==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/cluster.asciidoc b/docs/reference/migration/migrate_8_0/cluster.asciidoc index d486464f96940..6e25193c6d82e 100644 --- a/docs/reference/migration/migrate_8_0/cluster.asciidoc +++ b/docs/reference/migration/migrate_8_0/cluster.asciidoc @@ -6,9 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - .The voting configuration exclusions API endpoint has changed. [%collapsible] ==== @@ -35,3 +32,4 @@ time out. *Impact* + Do not set `cluster.join.timeout` in your `elasticsearch.yml` file. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/discovery.asciidoc b/docs/reference/migration/migrate_8_0/discovery.asciidoc index a1fc0eaf8b2fe..447e819b8bb01 100644 --- a/docs/reference/migration/migrate_8_0/discovery.asciidoc +++ b/docs/reference/migration/migrate_8_0/discovery.asciidoc @@ -6,10 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - - .`discovery.zen` settings have been removed. [%collapsible] ==== @@ -46,3 +42,4 @@ are no longer supported. In particular, this includes: Discontinue use of the `discovery.zen` settings. Specifying these settings in `elasticsearch.yml` will result in an error on startup. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/eql.asciidoc b/docs/reference/migration/migrate_8_0/eql.asciidoc index 5b38a9610804b..ffd58c960032b 100644 --- a/docs/reference/migration/migrate_8_0/eql.asciidoc +++ b/docs/reference/migration/migrate_8_0/eql.asciidoc @@ -2,6 +2,9 @@ [[breaking_80_eql_changes]] ==== EQL changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + //tag::notable-breaking-changes[] .The `wildcard` function has been removed. [%collapsible] @@ -10,7 +13,6 @@ The `wildcard` function was deprecated in {es} 7.13.0 and has been removed. *Impact* + -Use the <> or -<> keyword instead. +Use the `like` or `regex` {ref}/eql-syntax.html#eql-syntax-pattern-comparison-keywords[keywords] instead. ==== -// end::notable-breaking-changes[] \ No newline at end of file +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/http.asciidoc b/docs/reference/migration/migrate_8_0/http.asciidoc index 590133ae7646a..dc009f9e43a08 100644 --- a/docs/reference/migration/migrate_8_0/http.asciidoc +++ b/docs/reference/migration/migrate_8_0/http.asciidoc @@ -4,10 +4,8 @@ //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -//tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] +//tag::notable-breaking-changes[] .The `http.content_type.required` setting has been removed. [%collapsible] ==== @@ -51,3 +49,4 @@ Update your workflow and applications to assume `+` in a URL is encoded as Specifying this property in `elasticsearch.yml` will result in an error on startup. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/ilm.asciidoc b/docs/reference/migration/migrate_8_0/ilm.asciidoc index 033e8f05b6027..b41947528c4ff 100644 --- a/docs/reference/migration/migrate_8_0/ilm.asciidoc +++ b/docs/reference/migration/migrate_8_0/ilm.asciidoc @@ -1,14 +1,11 @@ [discrete] [[breaking_80_ilm_changes]] -==== {ilm-cap} changes +==== {ilm-cap} ({ilm-init}) changes //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - [[ilm-poll-interval-limit]] .The `indices.lifecycle.poll_interval` setting must be greater than `1s`. [%collapsible] @@ -41,3 +38,4 @@ renamed to `ilm` to match the package rename inside the {es} code. Update your workflow and applications to use the `ilm` package in place of `indexlifecycle`. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/indices.asciidoc b/docs/reference/migration/migrate_8_0/indices.asciidoc index 7ba4155a5f085..2bfa618fbbf77 100644 --- a/docs/reference/migration/migrate_8_0/indices.asciidoc +++ b/docs/reference/migration/migrate_8_0/indices.asciidoc @@ -6,8 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] -//end::notable-breaking-changes[] - .The deprecated `_upgrade` API has been removed. [%collapsible] ==== @@ -110,10 +108,12 @@ include these settings will return an error. When closing an index in earlier versions, by default {es} would not wait for the shards of the closed index to be properly assigned before returning. From version 8.0 onwards the default behaviour is to wait for shards to be assigned -according to the <>. +according to the +{ref}/docs-index_.html#index-wait-for-active-shards[`index.write.wait_for_active_shards` +index setting]. *Impact* + Accept the new behaviour, or specify `?wait_for_active_shards=0` to preserve the old behaviour if needed. ==== +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/java.asciidoc b/docs/reference/migration/migrate_8_0/java.asciidoc index 4b436bac0333c..ec5b9f7617542 100644 --- a/docs/reference/migration/migrate_8_0/java.asciidoc +++ b/docs/reference/migration/migrate_8_0/java.asciidoc @@ -6,9 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - .Changes to `Fuzziness`. [%collapsible] ==== @@ -42,3 +39,4 @@ testability. *Impact* + No action needed. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/mappings.asciidoc b/docs/reference/migration/migrate_8_0/mappings.asciidoc index 3d6ac3e24f4a5..3e88cc6f65c72 100644 --- a/docs/reference/migration/migrate_8_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_8_0/mappings.asciidoc @@ -6,10 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - - .The maximum number of completion contexts per field is now 10. [%collapsible] ==== @@ -79,7 +75,6 @@ The `boost` setting should be removed from templates and mappings. Use boosts directly on queries instead. ==== -//tag::notable-breaking-changes[] .Java-time date formats replace joda-time formats. [%collapsible] ==== @@ -94,22 +89,23 @@ mappings with java-time formats. For a detailed migration guide, see the {ref}/migrate-to-java-time.html[Java time migration guide]. ==== -// end::notable-breaking-changes[] [[geo-shape-strategy]] -.The `strategy` parameter on `geo_shape` mappings now rejects unknown strategies. +.Several `geo_shape` mapping parameters have been removed. [%collapsible] ==== *Details* + -The only permissible values for the `strategy` parameter on `geo_shape` mappings -are `term` and `recursive`. In 7.x if a non-permissible value was used as a -parameter here, the mapping would silently fall back to using `recursive`. The -mapping will now be rejected instead. +The following `geo_shape` mapping parameters were deprecated in 6.6: + +* `tree` +* `tree_levels` +* `strategy` +* `distance_error_pct` + +These parameters have been removed in 8.0.0. *Impact* + -This will have no impact on existing mappings created with non-permissible -strategy values, as they will already be serializing themselves as if they -had been configured as `recursive`. New indexes will need to use one of the -permissible strategies, or preferably not define a strategy at all and use -the far more efficient BKD index. +In 8.0, you can no longer create mappings that include these parameters. +However, 7.x indices that use these mapping parameters will continue to work. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/network.asciidoc b/docs/reference/migration/migrate_8_0/network.asciidoc index 72b9ef299884b..158ee70f70a2e 100644 --- a/docs/reference/migration/migrate_8_0/network.asciidoc +++ b/docs/reference/migration/migrate_8_0/network.asciidoc @@ -4,10 +4,8 @@ //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -//tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] +//tag::notable-breaking-changes[] .The `network.tcp.connect_timeout` setting has been removed. [%collapsible] ==== @@ -22,3 +20,4 @@ timeout for client connections. Discontinue use of the `network.tcp.connect_timeout` setting in `elasticsearch.yml` will result in an error on startup. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc index 0dc5b05d5a0bc..58b730bc28e32 100644 --- a/docs/reference/migration/migrate_8_0/node.asciidoc +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -4,10 +4,8 @@ //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -//tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] +//tag::notable-breaking-changes[] .The `node.max_local_storage_nodes` setting has been removed. [%collapsible] ==== @@ -158,3 +156,4 @@ open or closed, at startup time. Reindex closed indices created in {es} 6.x or before with {es} 7.x if they need to be carried forward to {es} 8.x. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/packaging.asciidoc b/docs/reference/migration/migrate_8_0/packaging.asciidoc index 8399f8f7ed07a..2e5ef1146cf3b 100644 --- a/docs/reference/migration/migrate_8_0/packaging.asciidoc +++ b/docs/reference/migration/migrate_8_0/packaging.asciidoc @@ -2,6 +2,9 @@ [[breaking_80_packaging_changes]] ==== Packaging changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + //tag::notable-breaking-changes[] .Java 11 is required. [%collapsible] @@ -14,9 +17,7 @@ line tools. Use Java 11 or higher. Attempts to run {es} 8.0 using earlier Java versions will fail. ==== -//end::notable-breaking-changes[] -//tag::notable-breaking-changes[] .JAVA_HOME is no longer supported. [%collapsible] ==== diff --git a/docs/reference/migration/migrate_8_0/reindex.asciidoc b/docs/reference/migration/migrate_8_0/reindex.asciidoc index 8682259e512ab..26d103ff21d8e 100644 --- a/docs/reference/migration/migrate_8_0/reindex.asciidoc +++ b/docs/reference/migration/migrate_8_0/reindex.asciidoc @@ -6,8 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] -//end::notable-breaking-changes[] - .Reindex from remote now re-encodes URL-encoded index names. [%collapsible] ==== @@ -52,3 +50,4 @@ Similarly, the `size` parameter has been renamed to `max_docs` for Use the replacement parameters. Requests containing the `size` parameter will return an error. ==== +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/rollup.asciidoc b/docs/reference/migration/migrate_8_0/rollup.asciidoc index fc78d5bc8eccd..4cf1d448fe233 100644 --- a/docs/reference/migration/migrate_8_0/rollup.asciidoc +++ b/docs/reference/migration/migrate_8_0/rollup.asciidoc @@ -6,9 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - .The StartRollupJob endpoint now returns a success status if a job has already started. [%collapsible] ==== @@ -26,3 +23,4 @@ attempting to start a rollup job means the job is in an actively started state. The request itself may have started the job, or it was previously running and so the request had no effect. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/search.asciidoc b/docs/reference/migration/migrate_8_0/search.asciidoc index 98d07d9f23d0b..6ddffbb566eb5 100644 --- a/docs/reference/migration/migrate_8_0/search.asciidoc +++ b/docs/reference/migration/migrate_8_0/search.asciidoc @@ -1,6 +1,6 @@ [discrete] [[breaking_80_search_changes]] -==== Search Changes +==== Search changes //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -20,7 +20,22 @@ Aggregating and sorting on `_id` should be avoided. As an alternative, the `_id` field's contents can be duplicated into another field with docvalues enabled (note that this does not apply to auto-generated IDs). ==== -//end::notable-breaking-changes[] + +[[max_clause_count_change]] +.The `indices.query.bool.max_clause_count` setting now limits all query clauses. +[%collapsible] +==== +*Details* + +Previously, the `indices.query.bool.max_clause_count` would apply to the number +of clauses of a single `bool` query. It now applies to the total number of +clauses of the rewritten query. In order to reduce chances of breaks, its +default value has been bumped from 1024 to 4096. + +*Impact* + +Queries with many clauses should be avoided whenever possible. If you had bumped +this setting already in order to accomodate for some heavy queries, you might +need to bump it further so that these heavy queries keep working. +==== .Search-related REST API endpoints containing mapping types have been removed. [%collapsible] @@ -173,5 +188,17 @@ If you query date fields without a specified `format`, check if the values in yo actually meant to be milliseconds-since-epoch and use a numeric value in this case. If not, use a string value which gets parsed by either the date format set on the field in the mappings or by `strict_date_optional_time` by default. +==== +.The `geo_bounding_box` query's `type` parameter has been removed. +[%collapsible] ==== +*Details* + +The `geo_bounding_box` query's `type` parameter was deprecated in 7.14.0 and has +been removed in 8.0.0. This parameter is a no-op and has no effect on the query. + +*Impact* + +Discontinue use of the `type` parameter. `geo_bounding_box` queries that include +this parameter will return an error. +==== +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index 3f90ada310b7e..e9179430e16e8 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -51,7 +51,6 @@ to use the new names and to possibly account for gzip archives instead of plaint The Docker build of Elasticsearch is not affected since it logs on stdout where rollover is not performed. ==== -// end::notable-breaking-changes[] [[accept-default-password-removed]] .The `accept_default_password` setting has been removed. @@ -115,7 +114,7 @@ result in an error on startup. [discrete] [[ssl-validation-changes]] -==== SSL/TLS configuration validation +===== SSL/TLS configuration validation .The `xpack.security.transport.ssl.enabled` setting is now required to configure `xpack.security.transport.ssl` settings. [%collapsible] @@ -233,9 +232,35 @@ settings. If certificate and key is not provided, {es} will return in an error on startup. ==== +[discrete] +[[ssl-misc-changes]] +===== Other SSL/TLS changes + +.PKCS#11 keystores and trustores cannot be configured in `elasticsearch.yml` +[%collapsible] +==== +*Details* + +The settings `*.ssl.keystore.type` and `*.ssl.truststore.type` no longer accept "PKCS11" as a valid type. +This applies to all SSL settings in Elasticsearch, including + +- `xpack.security.http.keystore.type` +- `xpack.security.transport.keystore.type` +- `xpack.security.http.truststore.type` +- `xpack.security.transport.truststore.type` + +As well as SSL settings for security realms, watcher and monitoring. + +Use of a PKCS#11 keystore or truststore as the JRE's default store is not affected. + +*Impact* + +If you have a PKCS#11 keystore configured within your `elasticsearch.yml` file, you must remove that +configuration and switch to a supported keystore type, or configure your PKCS#11 keystore as the +JRE default store. +==== + [discrete] [[builtin-users-changes]] -==== Changes to built-in users +===== Changes to built-in users .The `kibana` user has been replaced by `kibana_system`. [%collapsible] @@ -268,7 +293,7 @@ user password. You must explicitly set a password for the `kibana_system` user. [discrete] [[builtin-roles-changes]] -==== Changes to built-in roles +===== Changes to built-in roles .The `kibana_user` role has been renamed `kibana_admin`. [%collapsible] @@ -282,3 +307,34 @@ renamed to better reflect its intended use. Assign users with the `kibana_user` role to the `kibana_admin` role. Discontinue use of the `kibana_user` role. ==== +// end::notable-breaking-changes[] + +// These are non-notable changes + +[discrete] +// This change is not notable because it should not have any impact on upgrades +// However we document it here out of an abundance of caution +[[fips-default-hash-changed]] +===== Changes to FIPS 140 mode +.When FIPS mode is enabled the default password hash is now PBKDF2_STRETCH +[%collapsible] +==== +*Details* + +If `xpack.security.fips_mode.enabled` is true (see <>), +the value of `xpack.security.authc.password_hashing.algorithm` now defaults to +`pbkdf2_stretch`. + +In earlier versions this setting would always default to `bcrypt` and a runtime +check would prevent a node from starting unless the value was explicitly set to +a "pbkdf2" variant. + +There is no change for clusters that do not enable FIPS 140 mode. + +*Impact* + +This change should not have any impact on upgraded nodes. +Any node with an explicitly configured value for the password hashing algorithm +will continue to use that configured value. +Any node that did not have an explicitly configured password hashing algorithm in +{es} 6.x or {es} 7.x would have failed to start. +==== + diff --git a/docs/reference/migration/migrate_8_0/settings.asciidoc b/docs/reference/migration/migrate_8_0/settings.asciidoc index 9202ef52b16cf..f3787d274edba 100644 --- a/docs/reference/migration/migrate_8_0/settings.asciidoc +++ b/docs/reference/migration/migrate_8_0/settings.asciidoc @@ -6,8 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] -//end::notable-breaking-changes[] - [[search-remote-settings-removed]] .The `search.remote.*` settings have been removed. [%collapsible] @@ -197,8 +195,8 @@ operation explicitly name the indices it intends to modify. *Impact* + If you would like to use wildcard patterns for destructive actions, set -`action.destructive_requires_name` to `false` using the <> API. +`action.destructive_requires_name` to `false` using the +{ref}/cluster-update-settings.html[] cluster settings API]. ==== .Legacy role settings have been removed. @@ -226,7 +224,7 @@ Discontinue use of the removed settings. Specifying these settings in ==== [[system-call-filter-setting]] -.System call filter setting removed +.The system call filter setting has been removed. [%collapsible] ==== *Details* + @@ -244,14 +242,15 @@ configuration will result in an error on startup. ==== [[tier-filter-setting]] -.Tier filtering settings removed +.Tier filtering settings have been removed. [%collapsible] ==== *Details* + The cluster and index level settings ending in `._tier` used for filtering the allocation of a shard -to a particular set of nodes have been removed. Instead, the <>, `index.routing.allocation.include._tier_preference` should be used. The -removed settings are: +to a particular set of nodes have been removed. Instead, the +{ref}/data-tier-shard-filtering.html#tier-preference-allocation-filter[tier +preference setting], `index.routing.allocation.include._tier_preference` should +be used. The removed settings are: Cluster level settings: - `cluster.routing.allocation.include._tier` @@ -266,9 +265,10 @@ Index settings: Discontinue use of the removed settings. Specifying any of these cluster settings in Elasticsearch configuration will result in an error on startup. Any indices using these settings will have the settings archived (and they will have no effect) when the index metadata is loaded. +==== [[shared-data-path-setting]] -.Shared data path and per index data path settings deprecated +.Shared data path and per index data path settings are deprecated. [%collapsible] ==== *Details* + @@ -280,9 +280,10 @@ per index data path settings. *Impact* + Discontinue use of the deprecated settings. +==== [[single-data-node-watermark-setting]] -.Single data node watermark setting only accept true and is deprecated +.The single data node watermark setting is deprecated and now only accepts `true`. [%collapsible] ==== *Details* + @@ -298,4 +299,4 @@ allocation can be disabled by setting *Impact* + Discontinue use of the deprecated setting. ==== - +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/snapshots.asciidoc b/docs/reference/migration/migrate_8_0/snapshots.asciidoc index c4d91aaaafc3b..3716a7d9b7d4e 100644 --- a/docs/reference/migration/migrate_8_0/snapshots.asciidoc +++ b/docs/reference/migration/migrate_8_0/snapshots.asciidoc @@ -6,67 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] - -// end::notable-breaking-changes[] - -.The get snapshot API's response format has changed. -[%collapsible] -==== -*Details* + -It's possible to get snapshots from multiple repositories in one go. The response format has changed -and now contains separate response for each repository. - -For example, requesting one snapshot from particular repository - -[source,console] ------------------------------------ -GET _snapshot/repo1/snap1 ------------------------------------ -// TEST[skip:no repo and snapshots are created] - -produces the following response - -[source,console-result] ------------------------------------ -{ - "responses": [ - { - "repository": "repo1", - "snapshots": [ - { - "snapshot": "snap1", - "uuid": "cEzdqUKxQ5G6MyrJAcYwmA", - "version_id": 8000099, - "version": "8.0.0", - "indices": [], - "include_global_state": true, - "state": "SUCCESS", - "start_time": "2019-05-10T17:01:57.868Z", - "start_time_in_millis": 1557507717868, - "end_time": "2019-05-10T17:01:57.909Z", - "end_time_in_millis": 1557507717909, - "duration_in_millis": 41, - "failures": [], - "shards": { - "total": 0, - "failed": 0, - "successful": 0 - } - } - ] - } - ] -} ------------------------------------ -// TESTRESPONSE[skip:no repo and snapshots are created] - -See <> for more information. - -*Impact* + -Update your workflow and applications to use the get snapshot API's new response -format. -==== - .The `repositories.fs.compress` node-level setting has been removed. [%collapsible] ==== @@ -77,7 +16,8 @@ The `repositories.fs.compress` setting has been removed. *Impact* + Use the repository specific `compress` setting to enable compression. See -<> for information on the `compress` setting. +{ref}/snapshots-register-repository.html[Register a snapshot repository] for +information on the `compress` setting. Discontinue use of the `repositories.fs.compress` node-level setting. ==== @@ -91,7 +31,8 @@ Previously, the default value for `compress` was `false`. The default has been c This change will affect both newly created repositories and existing repositories where `compress=false` has not been explicitly specified. -For more information on the compress option, see <> +For more information on the compress option, see +{ref}/snapshots-register-repository.html[Register a snapshot repository]. *Impact* + Update your workflow and applications to assume a default value of `true` for @@ -139,5 +80,7 @@ The repository stats API has been removed. We deprecated this experimental API in 7.10.0. *Impact* + -Use the <> instead. +Use the {ref}/repositories-metering-apis.html[repositories metering APIs] +instead. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/threadpool.asciidoc b/docs/reference/migration/migrate_8_0/threadpool.asciidoc index 4ee3be59c3513..148787398e247 100644 --- a/docs/reference/migration/migrate_8_0/threadpool.asciidoc +++ b/docs/reference/migration/migrate_8_0/threadpool.asciidoc @@ -6,8 +6,6 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] -//end::notable-breaking-changes[] - .The `fixed_auto_queue_size` thread pool type has been removed. [%collapsible] ==== @@ -19,3 +17,4 @@ The `search` and `search_throttled` thread pools have the `fixed` type now. *Impact* + No action needed. ==== +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/transport.asciidoc b/docs/reference/migration/migrate_8_0/transport.asciidoc index 6c3fca83cb0d2..5005ee97bc6ab 100644 --- a/docs/reference/migration/migrate_8_0/transport.asciidoc +++ b/docs/reference/migration/migrate_8_0/transport.asciidoc @@ -2,6 +2,9 @@ [[breaking_80_transport_changes]] ==== Transport changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + //tag::notable-breaking-changes[] .Several `transport` settings have been replaced. [%collapsible] @@ -26,8 +29,6 @@ Specifying the removed settings in `elasticsearch.yml` will result in an error on startup. ==== -// end::notable-breaking-changes[] - .The `es.unsafely_permit_handshake_from_incompatible_builds` system property has been removed. [%collapsible] ==== @@ -45,3 +46,4 @@ system property, and ensure that all nodes of the same version are running exactly the same build. Setting this system property will result in an error on startup. ==== +// end::notable-breaking-changes[] diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index 40f4f285c6404..d10a83a5972aa 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -26,8 +26,6 @@ operations, but you can still explore and navigate results. * Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. -* Before you can close an {anomaly-job}, you must stop its {dfeed}. See -<>. [[ml-close-job-desc]] == {api-description-title} @@ -36,6 +34,10 @@ You can close multiple {anomaly-jobs} in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the ``. +If you close an {anomaly-job} whose {dfeed} is running, the request will first +attempt to stop the {dfeed}, as though <> was called with +the same `timeout` and `force` parameters as the close request. + When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and @@ -46,7 +48,7 @@ maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. When a {dfeed} that has a specified end date stops, it automatically closes -the job. +its associated job. NOTE: If you use the `force` query parameter, the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots. diff --git a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc index 630f7db4c2450..163a53d3a8d2b 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc @@ -22,9 +22,10 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the [[ml-delete-filter-desc]] == {api-description-title} -This API deletes a {ml-docs}/ml-rules.html[filter]. -If a {ml} job references the filter, you cannot delete the filter. You must -update or delete the job before you can delete the filter. +This API deletes a filter. If an {anomaly-job} references the filter, you cannot +delete the filter. You must update or delete the job before you can delete the +filter. For more information, see +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[Custom rules]. [[ml-delete-filter-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc index 2d6216fc2f80d..5336a8f96c4be 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc @@ -34,7 +34,7 @@ one or more forecasts before they expire. NOTE: When you delete a job, its associated forecasts are deleted. For more information, see -{ml-docs}/ml-overview.html#ml-forecasting[Forecasting the future]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-forecast[Forecasting the future]. [[ml-delete-forecast-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index 82b20e58c78f4..316bbd287a9d9 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -18,8 +18,6 @@ Deletes an existing {anomaly-job}. * Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. -* Before you can delete a job, you must delete the {dfeeds} that are associated -with it. See <>. * Before you can delete a job, you must close it (unless you specify the `force` parameter). See <>. @@ -36,6 +34,10 @@ are granted to anyone over the `.ml-*` indices. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. +If you delete a job that has a {dfeed}, the request will first attempt to +delete the {dfeed}, as though <> was called with the same +`timeout` and `force` parameters as this delete request. + [[ml-delete-job-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc index 5ed0f4e0584d2..ee08c3b53f155 100644 --- a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc @@ -24,8 +24,9 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the You can create a forecast job based on an {anomaly-job} to extrapolate future behavior. Refer to -{ml-docs}/ml-overview.html#ml-forecasting[Forecasting the future] and -{ml-docs}/ml-limitations.html#ml-forecast-limitations[forecast limitations] to +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-forecast[Forecasting the future] +and +{ml-docs}/ml-limitations.html#ml-forecast-limitations[Forecast limitations] to learn more. You can delete a forecast by using the diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index 470bb64c1a43c..a349f3c018c77 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -30,7 +30,7 @@ You can get scheduled event information for all calendars by using `_all`, by specifying `*` as the ``, or by omitting the ``. For more information, see -{ml-docs}/ml-calendars.html[Calendars and scheduled events]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. [[ml-get-calendar-event-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index 4f865b1b1c953..5b6ca1030f688 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -30,7 +30,7 @@ information for all calendars by using `_all`, by specifying `*` as the ``, or by omitting the ``. For more information, see -{ml-docs}/ml-calendars.html[Calendars and scheduled events]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. [[ml-get-calendar-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc index 79bf82378d74a..46567ac8bb134 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc @@ -25,7 +25,7 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the == {api-description-title} You can get a single filter or all filters. For more information, see -{ml-docs}/ml-rules.html[Machine learning custom rules]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[Custom rules]. [[ml-get-filter-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index f2e6b2f5dbdbb..b45af60b965a1 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -22,7 +22,7 @@ Retrieves configuration information for {anomaly-jobs}. [[ml-get-job-prereqs]] == {api-prereq-title} -Requires the `monitor_ml` cluster privilege. This privilege is included in the +Requires the `monitor_ml` cluster privilege. This privilege is included in the `machine_learning_user` built-in role. [[ml-get-job-desc]] @@ -81,6 +81,93 @@ monitor progress. (string) The time the job was created. For example, `1491007356077`. This property is informational; you cannot change its value. +`datafeed_config`:: +(object) The {dfeed} configured for the current {anomaly-job}. ++ +.Properties of `datafeed_config` +[%collapsible%open] +==== +`datafeed_id`::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id] + +`aggregations`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=aggregations] + +`chunking_config`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=chunking-config] ++ +.Properties of `chunking_config` +[%collapsible%open] +===== +`mode`::: +(string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=mode] + +`time_span`::: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=time-span] +===== + +`delayed_data_check_config`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config] ++ +.Properties of `delayed_data_check_config` +[%collapsible%open] +===== +`check_window`:: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-check-window] + +`enabled`:: +(Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-enabled] +===== + +`frequency`::: +(Optional, <>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=frequency] + +`indices`::: +(Required, array) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=indices] + +`indices_options`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=indices-options] + +`job_id`::: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] + +`max_empty_searches`::: +(Optional,integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=max-empty-searches] + +`query`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=query] + +`query_delay`::: +(Optional, <>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=query-delay] + +`runtime_mappings`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=runtime-mappings] + +`script_fields`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=script-fields] + +`scroll_size`::: +(Optional, unsigned integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=scroll-size] +==== + `finished_time`:: (string) If the job closed or failed, this is the time the job finished, otherwise it is `null`. This property is informational; you cannot change its diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index 10d1bc92f802e..14d5562105706 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -97,6 +97,10 @@ properties: .Properties of `model_size_stats` [%collapsible%open] ==== +`assignment_memory_basis`::: +(string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=assignment-memory-basis] + `bucket_allocation_failures_count`::: (long) The number of buckets for which entities were not processed due to memory limit constraints. @@ -164,6 +168,10 @@ reclaim space. `model_bytes_memory_limit`::: (long) The upper limit for memory usage, checked on increasing values. +`peak_model_bytes`::: +(long) The highest recorded value for the model memory usage. + + `rare_category_count`::: (long) The number of categories that match just one categorized document. diff --git a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc index 76a5f5ab452f8..c0a7c485dd944 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc @@ -22,7 +22,8 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the [[ml-post-calendar-event-desc]] == {api-description-title} -This API accepts a list of {ml-docs}/ml-calendars.html[scheduled events], each +This API accepts a list of +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[scheduled events], each of which must have a start time, end time, and description. [[ml-post-calendar-event-path-parms]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc index 1802c6e8cb0c5..01f951a816fed 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc @@ -23,7 +23,7 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the == {api-description-title} For more information, see -{ml-docs}/ml-calendars.html[Calendars and scheduled events]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. [[ml-put-calendar-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 1e61cf95c3106..4786dfcef2300 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -65,10 +65,34 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=aggregations] `chunking_config`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=chunking-config] ++ +.Properties of `chunking_config` +[%collapsible%open] +==== +`mode`::: +(string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=mode] + +`time_span`::: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=time-span] +==== `delayed_data_check_config`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config] ++ +.Properties of `delayed_data_check_config` +[%collapsible%open] +==== +`check_window`:: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-check-window] + +`enabled`:: +(Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-enabled] +==== `frequency`:: (Optional, <>) diff --git a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc index 537832828e9aa..3123b06e9a224 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc @@ -22,9 +22,10 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the [[ml-put-filter-desc]] == {api-description-title} -A {ml-docs}/ml-rules.html[filter] contains a list of strings. -It can be used by one or more jobs. Specifically, filters are referenced in -the `custom_rules` property of detector configuration objects. +A filter contains a list of strings. It can be used by one or more jobs. +Specifically, filters are referenced in the `custom_rules` property of detector +configuration objects. For more information, see +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[Custom rules]. [[ml-put-filter-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index c2a437199340b..87a1cc6f8f36f 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -16,7 +16,7 @@ Instantiates an {anomaly-job}. [[ml-put-job-prereqs]] == {api-prereq-title} -Requires the `manage_ml` cluster privilege. This privilege is included in the +Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. [[ml-put-job-desc]] @@ -70,7 +70,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=categorization-filters] `detectors`::: (array) An array of detector configuration objects. Detector configuration objects specify which data fields a job analyzes. They also specify which -analytical functions are used. You can specify multiple detectors for a job. +analytical functions are used. You can specify multiple detectors for a job. + NOTE: If the `detectors` array does not contain at least one detector, no analysis can occur and an error is returned. @@ -181,6 +181,10 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=influencers] (time units) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=latency] +`model_prune_window`::: +(Optional, <>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-prune-window] + `multivariate_by_fields`::: (Boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=multivariate-by-fields] @@ -222,7 +226,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=analysis-limits] include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=categorization-examples-limit] `model_memory_limit`::: -(long or string) +(long or string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-memory-limit-ad] ==== //End analysis_limits @@ -235,15 +239,100 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=background-persist-interval] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=custom-settings] +`daily_model_snapshot_retention_after_days`:: +(Optional, long) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=daily-model-snapshot-retention-after-days] + //Begin data_description [[put-datadescription]]`data_description`:: (Required, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=data-description] //End data_description -`daily_model_snapshot_retention_after_days`:: -(Optional, long) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=daily-model-snapshot-retention-after-days] +`datafeed_config`:: +(object) The {dfeed} configured for the current {anomaly-job}. ++ +.Properties of `datafeed` +[%collapsible%open] +==== +`aggregations`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=aggregations] + +`chunking_config`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=chunking-config] ++ +.Properties of `chunking_config` +[%collapsible%open] +===== +`mode`::: +(string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=mode] + +`time_span`::: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=time-span] +===== + +`datafeed_id`::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id] ++ +Defaults to the same ID as the {anomaly-job}. + +`delayed_data_check_config`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config] ++ +.Properties of `delayed_data_check_config` +[%collapsible%open] +===== +`check_window`:: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-check-window] + +`enabled`:: +(Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-enabled] +===== + +`frequency`::: +(Optional, <>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=frequency] + +`indices`::: +(Required, array) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=indices] + +`indices_options`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=indices-options] + +`max_empty_searches`::: +(Optional,integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=max-empty-searches] + +`query`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=query] + +`query_delay`::: +(Optional, <>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=query-delay] + +`runtime_mappings`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=runtime-mappings] + +`script_fields`::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=script-fields] + +`scroll_size`::: +(Optional, unsigned integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=scroll-size] +==== `description`:: (Optional, string) A description of the job. diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index f476721ec8a18..25810784aefb1 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -55,10 +55,34 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=aggregations] `chunking_config`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=chunking-config] ++ +.Properties of `chunking_config` +[%collapsible%open] +==== +`mode`::: +(string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=mode] + +`time_span`::: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=time-span] +==== `delayed_data_check_config`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config] ++ +.Properties of `delayed_data_check_config` +[%collapsible%open] +==== +`check_window`:: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-check-window] + +`enabled`:: +(Boolean) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=delayed-data-check-config-enabled] +==== `frequency`:: (Optional, <>) diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index 5fda34d639282..baced48466c05 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -46,6 +46,8 @@ close the job, then reopen the job and restart the {dfeed} for the changes to ta (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=analysis-limits] + +You can update the `analysis_limits` only while the job is closed. ++ .Properties of `analysis_limits` [%collapsible%open] ==== @@ -54,14 +56,16 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=analysis-limits] include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-memory-limit-ad] + -- -NOTE: You can update the `analysis_limits` only while the job is closed. The -`model_memory_limit` property value cannot be decreased below the current usage. - -TIP: If the `memory_status` property in the -<> has a value of +[NOTE] +======= +* You cannot decrease the `model_memory_limit` value below the current usage. To +determine the current usage, refer to the `model_bytes` value in +the <> API. +* If the `memory_status` property in the +<> has a value of `hard_limit`, this means that it was unable to process some data. You might want to re-run the job with an increased `model_memory_limit`. - +======= -- ==== //End analysis_limits @@ -192,6 +196,10 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-plot-config-terms] ==== //End model_plot_config +`model_prune_window`:: +(<>) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-prune-window] + `model_snapshot_retention_days`:: (long) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-snapshot-retention-days] diff --git a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc index f06eb9146695b..b4ac6dc1c14f7 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-count-functions]] +["appendix",role="exclude",id="ml-count-functions"] = Count functions Count functions detect anomalies when the number of events in a bucket is diff --git a/docs/reference/ml/anomaly-detection/functions/ml-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-functions.asciidoc index 058d3335b2737..ec5e429bfc584 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-functions.asciidoc @@ -35,10 +35,10 @@ functions are strongly affected by empty buckets. For this reason, there are `non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data. These functions effectively ignore empty buckets. -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index 31ba8121302c6..2dba8a32f75b3 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-geo-functions]] +["appendix",role="exclude",id="ml-geo-functions"] = Geographic functions The geographic functions detect anomalies in the geographic location of the @@ -71,7 +70,7 @@ For example, JSON data might contain the following transaction coordinates: // NOTCONSOLE In {es}, location data is likely to be stored in `geo_point` fields. For more -information, see {ref}/geo-point.html[Geo-point data type]. This data type is +information, see {ref}/geo-point.html[`geo_point` data type]. This data type is supported natively in {ml-features}. Specifically, {dfeed} when pulling data from a `geo_point` field, will transform the data into the appropriate `lat,lon` string format before sending to the {anomaly-job}. diff --git a/docs/reference/ml/anomaly-detection/functions/ml-info-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-info-functions.asciidoc index ea10142885552..7197e535e55e3 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-info-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-info-functions.asciidoc @@ -1,5 +1,5 @@ -[[ml-info-functions]] -= Information Content Functions +["appendix",role="exclude",id="ml-info-functions"] += Information content functions The information content functions detect anomalies in the amount of information that is contained in strings within a bucket. These functions can be used as diff --git a/docs/reference/ml/anomaly-detection/functions/ml-metric-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-metric-functions.asciidoc index 5091db15173e9..31ce07b01570f 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-metric-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-metric-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-metric-functions]] +["appendix",role="exclude",id="ml-metric-functions"] = Metric functions The metric functions include functions such as mean, min and max. These values diff --git a/docs/reference/ml/anomaly-detection/functions/ml-rare-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-rare-functions.asciidoc index f0a788698a3ef..c993800a9f65b 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-rare-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-rare-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-rare-functions]] +["appendix",role="exclude",id="ml-rare-functions"] = Rare functions The rare functions detect values that occur rarely in time or rarely for a diff --git a/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc index ec0d30365d660..398bdba30075c 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-sum-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-sum-functions]] +["appendix",role="exclude",id="ml-sum-functions"] = Sum functions The sum functions detect anomalies when the sum of a field in a bucket is diff --git a/docs/reference/ml/anomaly-detection/functions/ml-time-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-time-functions.asciidoc index 997566e4856d1..a7178e06aac38 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-time-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-time-functions.asciidoc @@ -1,5 +1,4 @@ -[role="xpack"] -[[ml-time-functions]] +["appendix",role="exclude",id="ml-time-functions"] = Time functions The time functions detect events that happen at unusual times, either of the day diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index 7970eacff6407..85de5d1254133 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -425,10 +425,11 @@ When using a `composite` aggregation: // NOTCONSOLE The top level aggregation must be exclusively one of the following: + * A {ref}/search-aggregations-bucket.html[bucket aggregation] containing a single sub-aggregation that is a `date_histogram` * A top level aggregation that is a `date_histogram` -* A top level aggregation is a `composite` aggregation. +* A top level aggregation is a `composite` aggregation There must be exactly one `date_histogram`, `composite` aggregation. For more information, see {ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date histogram aggregation] and diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-detector-custom-rules.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-detector-custom-rules.asciidoc index 2e6249fc5a199..150a5367fffcf 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-detector-custom-rules.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-detector-custom-rules.asciidoc @@ -2,7 +2,7 @@ [[ml-configuring-detector-custom-rules]] = Customizing detectors with custom rules -<> – or _job rules_ as {kib} refers to them – enable you +<> – or _job rules_ as {kib} refers to them – enable you to change the behavior of anomaly detectors based on domain-specific knowledge. Custom rules describe _when_ a detector should take a certain _action_ instead diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc index 9808b98cf1d16..9d02c3d011eac 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc @@ -421,7 +421,7 @@ The preview {dfeed} API returns the following results, which show that [[ml-configuring-transform8]] -.Example 8: Transforming geo_point data +.Example 8: Transforming geopoint data [source,console] -------------------------------------------------- diff --git a/docs/reference/ml/anomaly-detection/ml-delayed-data-detection.asciidoc b/docs/reference/ml/anomaly-detection/ml-delayed-data-detection.asciidoc index e1de295e43eff..60b3b5b163667 100644 --- a/docs/reference/ml/anomaly-detection/ml-delayed-data-detection.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-delayed-data-detection.asciidoc @@ -18,15 +18,15 @@ cluster. == Why worry about delayed data? -This is a particularly prescient question. If data are delayed randomly (and -consequently are missing from analysis), the results of certain types of -functions are not really affected. In these situations, it all comes out okay in -the end as the delayed data is distributed randomly. An example would be a `mean` -metric for a field in a large collection of data. In this case, checking for -delayed data may not provide much benefit. If data are consistently delayed, -however, {anomaly-jobs} with a `low_count` function may provide false positives. -In this situation, it would be useful to see if data comes in after an anomaly is -recorded so that you can determine a next course of action. +If data are delayed randomly (and consequently are missing from analysis), the +results of certain types of functions are not really affected. In these +situations, it all comes out okay in the end as the delayed data is distributed +randomly. An example would be a `mean` metric for a field in a large collection +of data. In this case, checking for delayed data may not provide much benefit. +If data are consistently delayed, however, {anomaly-jobs} with a `low_count` +function may provide false positives. In this situation, it would be useful to +see if data comes in after an anomaly is recorded so that you can determine a +next course of action. == How do we detect delayed data? @@ -40,7 +40,16 @@ of the associated {anomaly-job}. The `doc_count` of those buckets are then compared with the job's finalized analysis buckets to see whether any data has arrived since the analysis. If there is indeed missing data due to their ingest delay, the end user is notified. For example, you can see annotations in {kib} -for the periods where these delays occur. +for the periods where these delays occur: + +[role="screenshot"] +image::images/ml-annotations.png["Delayed data annotations in the Single Metric Viewer"] + +There is another tool for visualizing the delayed data on the *Annotations* tab +in the {anomaly-detect} job management page: + +[role="screenshot"] +image::images/ml-datafeed-chart.png["Delayed data in the {dfeed} chart"] == What to do about delayed data? @@ -50,4 +59,4 @@ delayed data is too great or the situation calls for it, the next course of action to consider is to increase the `query_delay` of the datafeed. This increased delay allows more time for data to be indexed. If you have real-time constraints, however, an increased delay might not be desirable. In which case, -you would have to {ref}/tune-for-indexing-speed.html[tune for better indexing speed]. +you would have to {ref}/tune-for-indexing-speed.html[tune for better indexing speed]. diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index b912c9f564435..73732f115ddd1 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -528,11 +528,11 @@ values: `failed`, `started`, `starting`,`stopping`, `stopped`. == {api-examples-title} The following API retrieves usage information for the -{ml-docs}/ecommerce-outliers.html[{oldetection} {dfanalytics-job} example]: +{ml-docs}/ml-dfa-finding-outliers.html#weblogs-outliers[{oldetection} {dfanalytics-job} example]: [source,console] -------------------------------------------------- -GET _ml/data_frame/analytics/ecommerce/_stats +GET _ml/data_frame/analytics/weblog-outliers/_stats -------------------------------------------------- // TEST[skip:Kibana sample data] @@ -542,7 +542,7 @@ GET _ml/data_frame/analytics/ecommerce/_stats "count" : 1, "data_frame_analytics" : [ { - "id" : "ecommerce", + "id" : "weblog-outliers", "state" : "stopped", "progress" : [ { @@ -554,7 +554,7 @@ GET _ml/data_frame/analytics/ecommerce/_stats "progress_percent" : 100 }, { - "phase" : "analyzing", + "phase" : "computing_outliers", "progress_percent" : 100 }, { @@ -563,17 +563,18 @@ GET _ml/data_frame/analytics/ecommerce/_stats } ], "data_counts" : { - "training_docs_count" : 3321, + "training_docs_count" : 1001, "test_docs_count" : 0, "skipped_docs_count" : 0 }, "memory_usage" : { - "timestamp" : 1586905058000, - "peak_usage_bytes" : 279484 + "timestamp" : 1626264770206, + "peak_usage_bytes" : 328011, + "status" : "ok" }, "analysis_stats" : { "outlier_detection_stats" : { - "timestamp" : 1586905058000, + "timestamp" : 1626264770206, "parameters" : { "n_neighbors" : 0, "method" : "ensemble", @@ -583,7 +584,7 @@ GET _ml/data_frame/analytics/ecommerce/_stats "standardization_enabled" : true }, "timing_stats" : { - "elapsed_time" : 245 + "elapsed_time" : 32 } } } diff --git a/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc b/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc index 3064d325fcbe3..439d8b16a791c 100644 --- a/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc @@ -225,7 +225,8 @@ List of the available hyperparameters optimized during the `absolute_importance`:::: (double) A positive number showing how much the parameter influences the variation of the -{ml-docs}/dfa-regression.html#dfa-regression-lossfunction[loss function]. For +// {ml-docs}/dfa-regression-lossfunction.html[loss function]. +loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. diff --git a/docs/reference/ml/df-analytics/apis/index.asciidoc b/docs/reference/ml/df-analytics/apis/index.asciidoc index 8c0f8713c90ab..5e7a0791775f4 100644 --- a/docs/reference/ml/df-analytics/apis/index.asciidoc +++ b/docs/reference/ml/df-analytics/apis/index.asciidoc @@ -18,8 +18,12 @@ include::get-dfanalytics.asciidoc[leveloffset=+2] include::get-dfanalytics-stats.asciidoc[leveloffset=+2] include::get-trained-models.asciidoc[leveloffset=+2] include::get-trained-models-stats.asciidoc[leveloffset=+2] +//INFER +include::infer-trained-model-deployment.asciidoc[leveloffset=+2] //PREVIEW include::preview-dfanalytics.asciidoc[leveloffset=+2] //SET/START/STOP include::start-dfanalytics.asciidoc[leveloffset=+2] +include::start-trained-model-deployment.asciidoc[leveloffset=+2] include::stop-dfanalytics.asciidoc[leveloffset=+2] +include::stop-trained-model-deployment.asciidoc[leveloffset=+2] diff --git a/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc new file mode 100644 index 0000000000000..9e1ce728d553d --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc @@ -0,0 +1,116 @@ +[role="xpack"] +[testenv="basic"] +[[infer-trained-model-deployment]] += Infer trained model deployment API +[subs="attributes"] +++++ +Infer trained model deployment +++++ + +Evaluates a trained model. + +[[infer-trained-model-deployment-request]] +== {api-request-title} + +`POST _ml/trained_models//deployment/_infer` + +//// +[[infer-trained-model-deployment-prereq]] +== {api-prereq-title} + +//// +//// +[[infer-trained-model-deployment-desc]] +== {api-description-title} + +//// + +[[infer-trained-model-deployment-path-params]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] + +[[infer-trained-model-deployment-query-params]] +== {api-query-parms-title} + +`timeout`:: +(Optional, time) +Controls the amount of time to wait for {infer} results. Defaults to 10 seconds. + +[[infer-trained-model-request-body]] +== {api-request-body-title} + +`input`:: +(Required,string) +The input text for evaluation. + +//// +[[infer-trained-model-deployment-results]] +== {api-response-body-title} +//// +//// +[[ml-get-trained-models-response-codes]] +== {api-response-codes-title} + +//// + +[[infer-trained-model-deployment-example]] +== {api-examples-title} + +The response depends on the task the model is trained for. If it is a +sentiment analysis task, the response is the score. For example: + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/model2/deployment/_infer +{ + "input": "The movie was awesome!!" +} +-------------------------------------------------- +// TEST[skip:TBD] + +The API returns scores in this case, for example: + +[source,console-result] +---- +{ + "positive" : 0.9998062667902223, + "negative" : 1.9373320977752957E-4 +} +---- +// NOTCONSOLE + +For named entity recognition (NER) tasks, the response contains the recognized +entities and their type. For example: + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/model2/deployment/_infer +{ + "input": "Hi my name is Josh and I live in Berlin" +} +-------------------------------------------------- +// TEST[skip:TBD] + +The API returns scores in this case, for example: + +[source,console-result] +---- +{ + "entities" : [ + { + "label" : "person", + "score" : 0.9988716330253505, + "word" : "Josh" + }, + { + "label" : "location", + "score" : 0.9980872542990656, + "word" : "Berlin" + } + ] +} +---- +// NOTCONSOLE diff --git a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc index f2fc3f35016e7..e23533b959373 100644 --- a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc +++ b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc @@ -3,34 +3,36 @@ [[ml-df-analytics-apis]] = {ml-cap} {dfanalytics} APIs -You can use the following APIs to perform {ml} {dfanalytics} activities. +You can use the following APIs to perform {ml} {dfanalytics} activities: -* <> * <> -* <> * <> * <> * <> -* <> -* <> * <> * <> +* <> +* <> +* <> +* <> - -You can use the following APIs to perform {infer} operations. +You can use the following APIs to perform {infer} operations: * <> -* <> -* <> -* <> * <> +* <> * <> +* <> +* <> You can deploy a trained model to make predictions in an ingest pipeline or in -an aggregation. Refer to the following documentation to learn more. +an aggregation. Refer to the following documentation to learn more: -* <> * <> +* <> +* <> +* <> +* <> See also <>. diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 64aa29376b5b7..29a1ff5fc8ef8 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -82,7 +82,7 @@ one of the following types of analysis: {classification}, {oldetection}, or `classification`::: (Required^*^, object) The configuration information necessary to perform -{ml-docs}/dfa-classification.html[{classification}]. +{ml-docs}/ml-dfa-classification.html[{classification}]. + TIP: Advanced parameters are for fine-tuning {classanalysis}. They are set automatically by hyperparameter optimization to give the minimum validation @@ -263,9 +263,9 @@ a large number of categories, there could be a significant effect on the size of + -- NOTE: To use the -{ml-docs}/ml-dfanalytics-evaluate.html#ml-dfanalytics-class-aucroc[AUC ROC evaluation method], -`num_top_classes` must be set to `-1` or a value greater than or equal to the -total number of categories. +{ml-docs}/ml-dfa-classification.html#ml-dfanalytics-class-aucroc[AUC ROC] +evaluation method, `num_top_classes` must be set to `-1` or a value greater than +or equal to the total number of categories. -- @@ -300,7 +300,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=training-percent] `outlier_detection`::: (Required^*^, object) The configuration information necessary to perform -{ml-docs}/dfa-outlier-detection.html[{oldetection}]: +{ml-docs}/ml-dfa-finding-outliers.html[{oldetection}]: + .Properties of `outlier_detection` [%collapsible%open] @@ -334,7 +334,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=standardization-enabled] `regression`::: (Required^*^, object) The configuration information necessary to perform -{ml-docs}/dfa-regression.html[{regression}]. +{ml-docs}/ml-dfa-regression.html[{regression}]. + TIP: Advanced parameters are for fine-tuning {reganalysis}. They are set automatically by hyperparameter optimization to give the minimum validation @@ -391,9 +391,10 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=lambda] (Optional, string) The loss function used during {regression}. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber -loss). Defaults to `mse`. Refer to -{ml-docs}/dfa-regression.html#dfa-regression-lossfunction[Loss functions for {regression} analyses] -to learn more. +loss). Defaults to `mse`. +// Refer to +// {ml-docs}/dfa-regression-lossfunction.html[Loss functions for {regression} analyses] +// to learn more. `loss_function_parameter`:::: (Optional, double) diff --git a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc new file mode 100644 index 0000000000000..957cbc09f1ec1 --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc @@ -0,0 +1,54 @@ +[role="xpack"] +[testenv="basic"] +[[start-trained-model-deployment]] += Start trained model deployment API +[subs="attributes"] +++++ +Start trained model deployment +++++ + +[[start-trained-model-deployment-request]] +== {api-request-title} + +`POST _ml/trained_models//deployent/_start` +//// +[[start-trained-model-deployment-prereq]] +== {api-prereq-title} + +//// +//// +[[start-trained-model-deployment-desc]] +== {api-description-title} + +//// + +[[start-trained-model-deployment-path-params]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] + +[[start-trained-model-deployment-query-params]] +== {api-query-parms-title} + +`timeout`:: +(Optional, time) +Controls the amount of time to wait for the model to deploy. Defaults +to 20 seconds. +//// +[role="child_attributes"] +[[start-trained-model-deployment-results]] +== {api-response-body-title} + +//// +//// +[[ml-get-trained-models-response-codes]] +== {api-response-codes-title} + +//// +//// +[[start-trained-model-deployment-example]] +== {api-examples-title} + +//// \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc new file mode 100644 index 0000000000000..6302fa8257697 --- /dev/null +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -0,0 +1,49 @@ +[role="xpack"] +[testenv="basic"] +[[stop-trained-model-deployment]] += Stop trained model deployment API +[subs="attributes"] +++++ +Stop trained model deployment +++++ + +[[stop-trained-model-deployment-request]] +== {api-request-title} + +`POST _ml/trained_models//deployment/_stop` + +//// +[[stop-trained-model-deployment-prereq]] +== {api-prereq-title} + +//// +//// +[[stop-trained-model-deployment-desc]] +== {api-description-title} + +//// +[[stop-trained-model-deployment-path-params]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] + +//// +[[stop-trained-model-deployment-query-params]] +== {api-query-parms-title} +//// +//// +[role="child_attributes"] +[[stop-trained-model-deployment-results]] +== {api-response-body-title} +//// +//// +[[ml-get-trained-models-response-codes]] +== {api-response-codes-title} +//// + +//// +[[stop-trained-model-deployment-example]] +== {api-examples-title} +//// \ No newline at end of file diff --git a/docs/reference/ml/images/ml-annotations.png b/docs/reference/ml/images/ml-annotations.png new file mode 100644 index 0000000000000..c4a33bb06d269 Binary files /dev/null and b/docs/reference/ml/images/ml-annotations.png differ diff --git a/docs/reference/ml/images/ml-datafeed-chart.png b/docs/reference/ml/images/ml-datafeed-chart.png new file mode 100644 index 0000000000000..9093d811fbdb5 Binary files /dev/null and b/docs/reference/ml/images/ml-datafeed-chart.png differ diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 6ddbc53595af2..1a84e2cf6cdb0 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -157,7 +157,7 @@ The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. The default value is `5m`. If the {anomaly-job} uses a {dfeed} with {ml-docs}/ml-configuring-aggregation.html[aggregations], this value must be divisible by the interval of the date histogram aggregation. For more -information, see {ml-docs}/ml-buckets.html[Buckets]. +information, see {ml-docs}/ml-ad-finding-anomalies.html#ml-ad-bucket-span[Bucket span]. end::bucket-span[] tag::bucket-span-results[] @@ -296,18 +296,6 @@ tag::chunking-config[] months or years. This search is split into time chunks in order to ensure the load on {es} is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. -+ -.Properties of `chunking_config` -[%collapsible%open] -==== -`mode`::: -(string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=mode] - -`time_span`::: -(<>) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=time-span] -==== end::chunking-config[] tag::class-assignment-objective[] @@ -411,7 +399,7 @@ of the most recent snapshot for this job. Valid values range from `0` to `model_snapshot_retention_days`. For new jobs, the default value is `1`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. For more information, refer to -{ml-docs}/ml-model-snapshots.html[Model snapshots]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-model-snapshots[Model snapshots]. end::daily-model-snapshot-retention-after-days[] tag::data-description[] @@ -468,23 +456,21 @@ that moment in time. See {ml-docs}/ml-delayed-data-detection.html[Working with delayed data]. + This check runs only on real-time {dfeeds}. -+ -.Properties of `delayed_data_check_config` -[%collapsible%open] -==== -`check_window`:: -(<>) The window of time that is searched for late data. -This window of time ends with the latest finalized bucket. It defaults to -`null`, which causes an appropriate `check_window` to be calculated when the -real-time {dfeed} runs. In particular, the default `check_window` span -calculation is based on the maximum of `2h` or `8 * bucket_span`. - -`enabled`:: -(Boolean) Specifies whether the {dfeed} periodically checks for delayed data. -Defaults to `true`. -==== end::delayed-data-check-config[] +tag::delayed-data-check-config-check-window[] +The window of time that is searched for late data. This window of time ends with +the latest finalized bucket. It defaults to `null`, which causes an appropriate +`check_window` to be calculated when the real-time {dfeed} runs. In particular, +the default `check_window` span calculation is based on the maximum of `2h` or +`8 * bucket_span`. +end::delayed-data-check-config-check-window[] + +tag::delayed-data-check-config-enabled[] +Specifies whether the {dfeed} periodically checks for delayed data. Defaults to +`true`. +end::delayed-data-check-config-enabled[] + tag::dependent-variable[] Defines which field of the document is to be predicted. This parameter is supplied by field name and must match one of the fields in @@ -1243,6 +1229,16 @@ applied. For example, "CPU,NetworkIn,DiskWrites". Wildcards are not supported. Only the specified `terms` can be viewed when using the Single Metric Viewer. end::model-plot-config-terms[] +tag::model-prune-window[] +Advanced configuration option. +Affects the pruning of models that have not been updated for the given time +duration. The value must be set to a multiple of the `bucket_span`. If set too +low, important information may be removed from the model. Typically, set to +`30d` or longer. If not set, model pruning only occurs if the model memory +status reaches the soft limit (`model_memory_limit`) or the hard limit +(`xpack.ml.max_model_memory_limit`). +end::model-prune-window[] + tag::model-snapshot-id[] A numerical character string that uniquely identifies the model snapshot. For example, `1575402236000 `. @@ -1254,7 +1250,7 @@ snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. For more information, refer -to {ml-docs}/ml-model-snapshots.html[Model snapshots]. +to {ml-docs}/ml-ad-finding-anomalies.html#ml-ad-model-snapshots[Model snapshots]. end::model-snapshot-retention-days[] tag::model-timestamp[] @@ -1438,7 +1434,9 @@ retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from {es}. The default value is null, which means all -results are retained. +results are retained. Annotations generated by the system also count as results +for retention purposes; they are deleted after the same number of days as +results. Annotations added by users are retained forever. end::results-retention-days[] tag::retain[] diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index e2ea9dffa10d8..cc5a7e78c21dd 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -131,3 +131,41 @@ documentation for more information. Limit for the number of unique dynamic scripts within a certain interval that are allowed to be compiled for a given context. Defaults to `75/5m`, meaning 75 every 5 minutes. + +[[regex-circuit-breaker]] +[discrete] +==== Regex circuit breaker + +Poorly written regular expressions can degrade cluster stability and +performance. The regex circuit breaker limits the use and complexity of +{painless}/painless-regexes.html[regex in Painless scripts]. + +[[script-painless-regex-enabled]] +`script.painless.regex.enabled`:: +(<>) Enables regex in Painless scripts. Accepts: + +`limit` (Default)::: +Enables regex but limits complexity using the +<> +cluster setting. + +`true`::: +Enables regex with no complexity limits. Disables the regex circuit breaker. + +`false`::: +Disables regex. Any Painless script containing a regular expression returns an +error. + +[[script-painless-regex-limit-factor]] +`script.painless.regex.limit-factor`:: +(<>) Limits the number of characters a regular +expression in a Painless script can consider. {es} calculates this limit by +multiplying the setting value by the script input's character length. ++ +For example, the input `foobarbaz` has a character length of `9`. If +`script.painless.regex.limit-factor` is `6`, a regular expression on `foobarbaz` +can consider up to 54 (9 * 6) characters. If the expression exceeds this limit, +it triggers the regex circuit breaker and returns an error. ++ +{es} only applies this limit if +<> is `limit`. diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc index 0ac99b41299ca..1993d3da5db37 100644 --- a/docs/reference/modules/indices/recovery.asciidoc +++ b/docs/reference/modules/indices/recovery.asciidoc @@ -73,3 +73,30 @@ and may interfere with indexing, search, and other activities in your cluster. Do not increase this setting without carefully verifying that your cluster has the resources available to handle the extra load that will result. +`indices.recovery.use_snapshots`:: +(<>, Expert) Enables snapshot-based peer recoveries. ++ +{es} recovers replicas and relocates primary shards using the _peer recovery_ +process, which involves constructing a new copy of a shard on the target node. +When `indices.recovery.use_snapshots` is `false` {es} will construct this new +copy by transferring the index data from the current primary. When this setting +is `true` {es} will attempt to copy the index data from a recent snapshot +first, and will only copy data from the primary if it cannot identify a +suitable snapshot. Defaults to `true`. ++ +Setting this option to `true` reduces your operating costs if your cluster runs +in an environment where the node-to-node data transfer costs are higher than +the costs of recovering data from a snapshot. It also reduces the amount of +work that the primary must do during a recovery. ++ +Additionally, repositories having the setting `use_for_peer_recovery=true` +will be consulted to find a good snapshot when recovering a shard. If none +of the registered repositories have this setting defined, index files will +be recovered from the source node. + +`indices.recovery.max_concurrent_snapshot_file_downloads`:: +(<>, Expert) Number of snapshot file downloads requests +sent in parallel to the target node for each recovery. Defaults to `5`. ++ +Do not increase this setting without carefully verifying that your cluster has +the resources available to handle the extra load that will result. diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index 7a0609b04ca68..329b27aa273fe 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -7,22 +7,28 @@ limits. [[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: (<>, integer) -Maximum number of clauses a Lucene BooleanQuery can contain. Defaults to `1024`. +Maximum number of clauses a query can contain. Defaults to `4096`. + -This setting limits the number of clauses a Lucene BooleanQuery can have. The -default of 1024 is quite high and should normally be sufficient. This limit does -not only affect Elasticsearchs `bool` query, but many other queries are rewritten to Lucene's -BooleanQuery internally. The limit is in place to prevent searches from becoming too large -and taking up too much CPU and memory. In case you're considering increasing this setting, -make sure you've exhausted all other options to avoid having to do this. Higher values can lead -to performance degradations and memory issues, especially in clusters with a high load or -few resources. +This setting limits the total number of clauses that a query tree can have. The default of 4096 +is quite high and should normally be sufficient. This limit applies to the rewritten query, so +not only `bool` queries can contribute high numbers of clauses, but also all queries that rewrite +to `bool` queries internally such as `fuzzy` queries. The limit is in place to prevent searches +from becoming too large, and taking up too much CPU and memory. In case you're considering +increasing this setting, make sure you've exhausted all other options to avoid having to do this. +Higher values can lead to performance degradations and memory issues, especially in clusters with +a high load or few resources. + +Elasticsearch offers some tools to avoid running into issues with regards to the maximum number of +clauses such as the <> query, which allows querying many distinct +values while still counting as a single clause, or the <> option +of <> fields, which allows executing prefix queries that expand to a high +number of terms as a single term query. [[search-settings-max-buckets]] `search.max_buckets`:: (<>, integer) Maximum number of <> allowed in -a single response. Defaults to 65,535. +a single response. Defaults to 65,536. + Requests that attempt to return more than this limit will return an error. diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 64a7d70666e81..e3e28dbe95eb6 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -87,6 +87,11 @@ corresponding settings for the HTTP and transport interfaces. `0.0.0.0`:: The addresses of all available network interfaces. +NOTE: In some systems these special values resolve to multiple addresses. If +so, {es} will select one of them as its publish address and may change its +selection on each node restart. Ensure your node is accessible at every possible +address. + NOTE: Any values containing a `:` (e.g. an IPv6 address or some of the <>) must be quoted because `:` is a special character in YAML. @@ -181,11 +186,12 @@ if binding to multiple addresses or using different addresses for publishing and binding. NOTE: You can specify a list of addresses for `network.host` and -`network.publish_host`. You can also specify a single hostname which resolves -to multiple addresses. If you do this then {es} chooses one of the addresses -for its publish address. This choice uses heuristics based on IPv4/IPv6 stack -preference and reachability and may change when the node restarts. You must -make sure that each node is accessible at all possible publish addresses. +`network.publish_host`. You can also specify one or more hostnames or +<> that resolve to multiple addresses. +If you do this then {es} chooses one of the addresses for its publish address. +This choice uses heuristics based on IPv4/IPv6 stack preference and +reachability and may change when the node restarts. Ensure +each node is accessible at all possible publish addresses. [[tcp-settings]] ===== Advanced TCP settings diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index f2a6fdda5c0b5..0b1dfb3d21e7c 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -36,7 +36,18 @@ set `node.roles`, the node is assigned the following roles: [IMPORTANT] ==== If you set `node.roles`, ensure you specify every node role your cluster needs. -Some {stack} features require specific node roles: +Every cluster requires the following node roles: + +* `master` +* {blank} ++ +-- +`data_content` and `data_hot` + +OR + +`data` +-- + +Some {stack} features also require specific node roles: - {ccs-cap} and {ccr} require the `remote_cluster_client` role. - {stack-monitor-app} and ingest pipelines require the `ingest` role. @@ -75,10 +86,9 @@ as a remote client. <>:: -A node that has `xpack.ml.enabled` and the `ml` role. If you want to use -{ml-features}, there must be at least one {ml} node in your cluster. For more -information about {ml-features}, see {ml-docs}/index.html[Machine learning in -the {stack}]. +A node that has the `ml` role. If you want to use {ml-features}, there must be +at least one {ml} node in your cluster. For more information, see +<> and {ml-docs}/index.html[Machine learning in the {stack}]. <>:: @@ -224,8 +234,9 @@ assign data nodes to specific tiers: `data_content`,`data_hot`, `data_warm`, `data_cold`, or `data_frozen`. A node can belong to multiple tiers, but a node that has one of the specialized data roles cannot have the generic `data` role. +[role="xpack"] [[data-content-node]] -==== [x-pack]#Content data node# +==== Content data node Content data nodes accommodate user-created content. They enable operations like CRUD, search and aggregations. @@ -236,8 +247,9 @@ To create a dedicated content node, set: node.roles: [ data_content ] ---- +[role="xpack"] [[data-hot-node]] -==== [x-pack]#Hot data node# +==== Hot data node Hot data nodes store time series data as it enters {es}. The hot tier must be fast for both reads and writes, and requires more hardware resources (such as SSD drives). @@ -248,8 +260,9 @@ To create a dedicated hot node, set: node.roles: [ data_hot ] ---- +[role="xpack"] [[data-warm-node]] -==== [x-pack]#Warm data node# +==== Warm data node Warm data nodes store indices that are no longer being regularly updated, but are still being queried. Query volume is usually at a lower frequency than it was while the index was in the hot tier. @@ -261,8 +274,9 @@ To create a dedicated warm node, set: node.roles: [ data_warm ] ---- +[role="xpack"] [[data-cold-node]] -==== [x-pack]#Cold data node# +==== Cold data node Cold data nodes store read-only indices that are accessed less frequently. This tier uses less performant hardware and may leverage searchable snapshot indices to minimize the resources required. @@ -272,8 +286,9 @@ To create a dedicated cold node, set: node.roles: [ data_cold ] ---- +[role="xpack"] [[data-frozen-node]] -==== [x-pack]#Frozen data node# +==== Frozen data node The frozen tier stores <> exclusively. We recommend you use dedicated nodes in the frozen tier. @@ -343,31 +358,20 @@ node.roles: [ remote_cluster_client ] [[ml-node]] ==== [xpack]#Machine learning node# -The {ml-features} provide {ml} nodes, which run jobs and handle {ml} API -requests. If `xpack.ml.enabled` is set to `true` and the node does not have the -`ml` role, the node can service API requests but it cannot run jobs. - -If you want to use {ml-features} in your cluster, you must enable {ml} -(set `xpack.ml.enabled` to `true`) on all master-eligible nodes. If you want to -use {ml-features} in clients (including {kib}), it must also be enabled on all -coordinating nodes. - -For more information about these settings, see <>. +{ml-cap} nodes run jobs and handle {ml} API requests. For more information, see +<>. To create a dedicated {ml} node, set: [source,yaml] ---- -node.roles: [ ml, remote_cluster_client] <1> -xpack.ml.enabled: true <2> +node.roles: [ ml, remote_cluster_client] ---- -<1> The `remote_cluster_client` role is optional but strongly recommended. -Otherwise, {ccs} fails when used in {ml} jobs or {dfeeds}. See <>. -<2> The `xpack.ml.enabled` setting is enabled by default. -NOTE: If you use {ccs} in your {anomaly-jobs}, the `remote_cluster_client` role -is also required on all master-eligible nodes. Otherwise, the {dfeed} cannot -start. +The `remote_cluster_client` role is optional but strongly recommended. +Otherwise, {ccs} fails when used in {ml} jobs or {dfeeds}. If you use {ccs} in +your {anomaly-jobs}, the `remote_cluster_client` role is also required on all +master-eligible nodes. Otherwise, the {dfeed} cannot start. See <>. [[transform-node]] ==== [xpack]#{transform-cap} node# @@ -379,9 +383,10 @@ To create a dedicated {transform} node, set: [source,yaml] ---- -node.roles: [ transform, remote_cluster_client ] <1> +node.roles: [ transform, remote_cluster_client ] ---- -<1> The `remote_cluster_client` role is optional but strongly recommended. + +The `remote_cluster_client` role is optional but strongly recommended. Otherwise, {ccs} fails when used in {transforms}. See <>. [[change-node-role]] diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 923580133a76d..fa22171775fdc 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -112,8 +112,7 @@ PUT _cluster/settings "cluster_one": { "seeds": [ "127.0.0.1:9300" - ], - "transport.ping_schedule": "30s" + ] }, "cluster_two": { "mode": "sniff", @@ -136,7 +135,7 @@ PUT _cluster/settings // TEST[s/127.0.0.1:9300/\${transport_host}/] You can dynamically update the compression and ping schedule settings. However, -you must re-include seeds or `proxy_address` in the settings update request. +you must include the `seeds` or `proxy_address` in the settings update request. For example: [source,console] @@ -149,8 +148,7 @@ PUT _cluster/settings "cluster_one": { "seeds": [ "127.0.0.1:9300" - ], - "transport.ping_schedule": "60s" + ] }, "cluster_two": { "mode": "sniff", @@ -162,7 +160,8 @@ PUT _cluster/settings "cluster_three": { "mode": "proxy", "proxy_address": "127.0.0.1:9302", - "transport.compress": true + "transport.compress": true, + "transport.ping_schedule": "60s" } } } @@ -189,9 +188,7 @@ PUT _cluster/settings "mode": null, "seeds": null, "skip_unavailable": null, - "transport": { - "compress": null - } + "transport.compress": null } } } @@ -215,15 +212,14 @@ cluster: remote: cluster_one: <1> seeds: 127.0.0.1:9300 <2> - transport.ping_schedule: 30s <3> cluster_two: <1> - mode: sniff <4> + mode: sniff <3> seeds: 127.0.0.1:9301 <2> - transport.compress: true <5> - skip_unavailable: true <6> + transport.compress: true <4> + skip_unavailable: true <5> cluster_three: <1> - mode: proxy <4> - proxy_address: 127.0.0.1:9302 <7> + mode: proxy <3> + proxy_address: 127.0.0.1:9302 <6> -------------------------------- <1> `cluster_one`, `cluster_two`, and `cluster_three` are arbitrary _cluster aliases_ @@ -231,14 +227,13 @@ representing the connection to each cluster. These names are subsequently used t distinguish between local and remote indices. <2> The hostname and <> (default: 9300) of a seed node in the remote cluster. -<3> A keep-alive ping is configured for `cluster_one`. -<4> The configured connection mode. By default, this is <>, so +<3> The configured connection mode. By default, this is <>, so the mode is implicit for `cluster_one`. However, it can be explicitly configured as demonstrated by `cluster_two` and must be explicitly configured for <> as demonstrated by `cluster_three`. -<5> Compression is explicitly enabled for requests to `cluster_two`. -<6> Disconnected remote clusters are optional for `cluster_two`. -<7> The address for the proxy endpoint used to connect to `cluster_three`. +<4> Compression is explicitly enabled for requests to `cluster_two`. +<5> Disconnected remote clusters are optional for `cluster_two`. +<6> The address for the proxy endpoint used to connect to `cluster_three`. [discrete] [[remote-cluster-settings]] @@ -280,19 +275,34 @@ separately. `cluster.remote..transport.ping_schedule`:: Sets the time interval between regular application-level ping messages that - are sent to ensure that transport connections to nodes belonging to remote - clusters are kept alive. If set to `-1`, application-level ping messages to - this remote cluster are not sent. If unset, application-level ping messages - are sent according to the global `transport.ping_schedule` setting, which - defaults to `-1` meaning that pings are not sent. + are sent to try and keep remote cluster connections alive. If set to `-1`, + application-level ping messages to this remote cluster are not sent. If + unset, application-level ping messages are sent according to the global + `transport.ping_schedule` setting, which defaults to `-1` meaning that pings + are not sent. It is preferable to correctly configure TCP keep-alives instead + of configuring a `ping_schedule`, because TCP keep-alives are handled by the + operating system and not by {es}. By default {es} enables TCP keep-alives on + remote cluster connections. Remote cluster connections are transport + connections so the `transport.tcp.*` <> + regarding TCP keep-alives apply to them. `cluster.remote..transport.compress`:: - Per cluster boolean setting that enables you to configure compression for - requests to a specific remote cluster. This setting impacts only requests + Per cluster setting that enables you to configure compression for requests + to a specific remote cluster. This setting impacts only requests sent to the remote cluster. If the inbound request is compressed, - Elasticsearch compresses the response. If unset, the global - `transport.compress` is used as the fallback setting. + Elasticsearch compresses the response. The setting options are `true`, + `indexing_data`, and `false`. If unset, the global `transport.compress` is + used as the fallback setting. + +`cluster.remote..transport.compression_scheme`:: + + Per cluster setting that enables you to configure compression scheme for + requests to a specific remote cluster. This setting impacts only requests + sent to the remote cluster. If an inbound request is compressed, {es} + compresses the response using the same compression scheme. The setting options + are `deflate` and `lz4`. If unset, the global `transport.compression_scheme` + is used as the fallback setting. [discrete] [[remote-cluster-sniff-settings]] diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 58727ca9f2a32..91e58228fd97a 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -49,8 +49,18 @@ time setting format). Defaults to `30s`. `transport.compress`:: (<>) -Set to `true` to enable compression (`DEFLATE`) between -all nodes. Defaults to `false`. +Set to `true`, `indexing_data`, or `false` to configure transport compression +between nodes. The option `true` will compress all data. The option +`indexing_data` will compress only the raw index data sent between nodes during +ingest, ccr following (excluding bootstrap), and operations based shard recovery +(excluding transferring lucene files). Defaults to `false`. + +`transport.compression_scheme`:: +(<>) +Configures the compression scheme for `transport.compress`. The options are +`deflate` or `lz4`. If `lz4` is configured and the remote node has not been +upgraded to a version supporting `lz4`, the traffic will be sent uncompressed. +Defaults to `deflate`. `transport.ping_schedule`:: (<>) @@ -172,6 +182,11 @@ normally makes sense for local cluster communication as compression has a noticeable CPU cost and local clusters tend to be set up with fast network connections between nodes. +The `transport.compress` configuration option `indexing_data` will only +compress requests that relate to the transport of raw indexing source data +between nodes. This option primarily compresses data sent during ingest, +ccr, and shard recovery. + The `transport.compress` setting always configures local cluster request compression and is the fallback setting for remote cluster request compression. If you want to configure remote request compression differently than local @@ -185,4 +200,6 @@ request compression, you can set it on a per-remote cluster basis using the The compression settings do not configure compression for responses. {es} will compress a response if the inbound request was compressed--even when compression is not enabled. Similarly, {es} will not compress a response if the inbound -request was uncompressed--even when compression is enabled. +request was uncompressed--even when compression is enabled. The compression +scheme used to compress a response will be the same scheme the remote node used +to compress the request. diff --git a/docs/reference/monitoring/collecting-monitoring-data.asciidoc b/docs/reference/monitoring/collecting-monitoring-data.asciidoc index b9ce381a5b7f6..b43f4b82182ec 100644 --- a/docs/reference/monitoring/collecting-monitoring-data.asciidoc +++ b/docs/reference/monitoring/collecting-monitoring-data.asciidoc @@ -125,7 +125,7 @@ HTTP exporter in the `xpack.monitoring.exporters` settings in the xpack.monitoring.exporters: id1: type: http - host: ["http://es-mon-1:9200", "http://es-mon2:9200"] + host: ["http://es-mon-1:9200", "http://es-mon-2:9200"] -------------------------------------------------- -- @@ -148,7 +148,7 @@ For example: xpack.monitoring.exporters: id1: type: http - host: ["http://es-mon-1:9200", "http://es-mon2:9200"] + host: ["http://es-mon-1:9200", "http://es-mon-2:9200"] auth.username: remote_monitoring_user # "xpack.monitoring.exporters.id1.auth.secure_password" must be set in the keystore -------------------------------------------------- @@ -169,7 +169,7 @@ specify the location of the PEM encoded certificate with the xpack.monitoring.exporters: id1: type: http - host: ["https://es-mon1:9200", "https://es-mon2:9200"] + host: ["https://es-mon1:9200", "https://es-mon-2:9200"] auth: username: remote_monitoring_user # "xpack.monitoring.exporters.id1.auth.secure_password" must be set in the keystore @@ -187,7 +187,7 @@ xpack.monitoring.exporters: xpack.monitoring.exporters: id1: type: http - host: ["https://es-mon1:9200", "https://es-mon2:9200"] + host: ["https://es-mon1:9200", "https://es-mon-2:9200"] auth: username: remote_monitoring_user # "xpack.monitoring.exporters.id1.auth.secure_password" must be set in the keystore diff --git a/docs/reference/monitoring/configuring-filebeat.asciidoc b/docs/reference/monitoring/configuring-filebeat.asciidoc index 0331d4eab9457..43286fb9a75fc 100644 --- a/docs/reference/monitoring/configuring-filebeat.asciidoc +++ b/docs/reference/monitoring/configuring-filebeat.asciidoc @@ -66,7 +66,7 @@ the {filebeat} configuration file (`filebeat.yml`): ---------------------------------- output.elasticsearch: # Array of hosts to connect to. - hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + hosts: ["http://es-mon-1:9200", "http://es-mon-2:9200"] <1> # Optional protocol and basic auth credentials. #protocol: "https" diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 0fa1e26546e99..d8a5ba1b042f4 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -148,7 +148,7 @@ configuration file (`metricbeat.yml`): ---------------------------------- output.elasticsearch: # Array of hosts to connect to. - hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + hosts: ["http://es-mon-1:9200", "http://es-mon-2:9200"] <1> # Optional protocol and basic auth credentials. #protocol: "https" diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index 35af3104d0594..c2d8718f06bb3 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -49,8 +49,6 @@ the stability of the cluster. Those queries can be categorised as follows: * <> -* Queries on <> - * Queries that may have a high per-document cost: ** <> ** <> diff --git a/docs/reference/query-dsl/combined-fields-query.asciidoc b/docs/reference/query-dsl/combined-fields-query.asciidoc index 390d71276cf3d..42d1f45b0368b 100644 --- a/docs/reference/query-dsl/combined-fields-query.asciidoc +++ b/docs/reference/query-dsl/combined-fields-query.asciidoc @@ -37,9 +37,9 @@ model perfectly.) [WARNING] .Field number limit =================================================== -There is a limit on the number of fields that can be queried at once. It is -defined by the `indices.query.bool.max_clause_count` <> -which defaults to 1024. +There is a limit on the number of fields times terms that can be queried at +once. It is defined by the `indices.query.bool.max_clause_count` +<> which defaults to 4096. =================================================== ==== Per-field boosting diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index e377bb74b0149..5d898f6925aa6 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -368,7 +368,7 @@ decay function is specified as -------------------------------------------------- // NOTCONSOLE <1> The `DECAY_FUNCTION` should be one of `linear`, `exp`, or `gauss`. -<2> The specified field must be a numeric, date, or geo-point field. +<2> The specified field must be a numeric, date, or geopoint field. In the above example, the field is a <> and origin can be provided in geo format. `scale` and `offset` must be given with a unit in @@ -656,7 +656,7 @@ image::https://f.cloud.github.com/assets/4320215/768165/19d8b1aa-e899-11e2-91bc- ==== Supported fields for decay functions -Only numeric, date, and geo-point fields are supported. +Only numeric, date, and geopoint fields are supported. ==== What if a field is missing? diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index afa1175055719..ec7e103ce0d49 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -169,16 +169,13 @@ GET my_locations,my_geoshapes/_search accept geo points with invalid latitude or longitude, set to `COERCE` to also try to infer correct latitude or longitude. (default is `STRICT`). -|`type` |Set to one of `indexed` or `memory` to defines whether this filter will -be executed in memory or indexed. See <> below for further details -Default is `memory`. |======================================================================= [[query-dsl-geo-bounding-box-query-accepted-formats]] [discrete] ==== Accepted Formats -In much the same way the geo_point type can accept different +In much the same way the `geo_point` type can accept different representations of the geo point, the filter can accept it as well: [discrete] @@ -388,46 +385,6 @@ The filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter -[discrete] -[[geo-bbox-type]] -==== Type - -The type of the bounding box execution by default is set to `memory`, -which means in memory checks if the doc falls within the bounding box -range. In some cases, an `indexed` option will perform faster (but note -that the `geo_point` type must have lat and lon indexed in this case). -Note, when using the indexed option, multi locations per document field -are not supported. Here is an example: - -[source,console] --------------------------------------------------- -GET my_locations/_search -{ - "query": { - "bool": { - "must": { - "match_all": {} - }, - "filter": { - "geo_bounding_box": { - "pin.location": { - "top_left": { - "lat": 40.73, - "lon": -74.1 - }, - "bottom_right": { - "lat": 40.10, - "lon": -71.12 - } - }, - "type": "indexed" - } - } - } - } -} --------------------------------------------------- - [discrete] ==== Ignore Unmapped diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index dbe9adb0e2026..723e91e3bc6fb 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -145,7 +145,7 @@ GET /_search // TEST[warning:Deprecated field [geo_polygon] used, replaced by [[geo_shape] query where polygons are defined in geojson or wkt]] [discrete] -==== geo_point Type +==== `geo_point` type The query *requires* the <> type to be set on the relevant field. diff --git a/docs/reference/query-dsl/geo-queries.asciidoc b/docs/reference/query-dsl/geo-queries.asciidoc index b4eb86763e702..363c0ef8a0fad 100644 --- a/docs/reference/query-dsl/geo-queries.asciidoc +++ b/docs/reference/query-dsl/geo-queries.asciidoc @@ -9,20 +9,19 @@ lines, circles, polygons, multi-polygons, etc. The queries in this group are: <> query:: -Finds documents with geo-points that fall into the specified rectangle. +Finds documents with geopoints that fall into the specified rectangle. <> query:: -Finds documents with geo-points within the specified distance of a central point. +Finds documents with geopoints within the specified distance of a central point. <> query:: -Find documents with geo-points within the specified polygon. +Find documents with geopoints within the specified polygon. <> query:: Finds documents with: -* `geo-shapes` which either intersect, are contained by, or do not intersect -with the specified geo-shape -* `geo-points` which intersect the specified -geo-shape +* Geoshapes which either intersect, are contained by, or do not intersect +with the specified geoshape +* Geopoints which intersect the specified geoshape include::geo-bounding-box-query.asciidoc[] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index bc2bc63b83dde..a1e052f98eb4e 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -1,13 +1,13 @@ [[query-dsl-geo-shape-query]] -=== Geo-shape query +=== Geoshape query ++++ -Geo-shape +Geoshape ++++ Filter documents indexed using the `geo_shape` or `geo_point` type. -Requires the <> or the -<>. +Requires the <> or the +<>. The `geo_shape` query uses the same grid square representation as the `geo_shape` mapping to find documents that have a shape that intersects @@ -230,9 +230,6 @@ GET /example/_search ==== Spatial Relations -The <> mapping parameter determines which -spatial relation operators may be used at search time. - The following is a complete list of spatial relation operators available when searching a geo field: @@ -257,12 +254,6 @@ is not mapped. [[geo-shape-query-notes]] ==== Notes -* Geo-shape queries on geo-shapes implemented with - <> will not be executed if - <> is set - to false. - - * When data is indexed in a `geo_shape` field as an array of shapes, the arrays are treated as one shape. For this reason, the following requests are equivalent. diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index d155e5a05c1a9..0b38b25ad80e1 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -67,9 +67,9 @@ index settings, which in turn defaults to `*`. `*` extracts all fields in the ma are eligible to term queries and filters the metadata fields. All extracted fields are then combined to build a query. -WARNING: There is a limit on the number of fields that can be queried +WARNING: There is a limit on the number of fields times terms that can be queried at once. It is defined by the `indices.query.bool.max_clause_count` <> -which defaults to 1024. +which defaults to 4096. [[multi-match-types]] [discrete] diff --git a/docs/reference/query-dsl/pinned-query.asciidoc b/docs/reference/query-dsl/pinned-query.asciidoc index 729855e16de64..e0c314da541ad 100644 --- a/docs/reference/query-dsl/pinned-query.asciidoc +++ b/docs/reference/query-dsl/pinned-query.asciidoc @@ -4,7 +4,7 @@ === Pinned Query Promotes selected documents to rank higher than those matching a given query. This feature is typically used to guide searchers to curated documents that are -promoted over and above any "organic" matches for a search. +promoted over and above any "organic" matches for a search. The promoted or "pinned" documents are identified using the document IDs stored in the <> field. @@ -31,6 +31,53 @@ GET /_search ==== Top-level parameters for `pinned` `ids`:: -An array of <> listed in the order they are to appear in results. +(Optional, array) <> listed in the order they are to appear in results. +Required if `docs` is not specified. +`docs`:: +(Optional, array) Documents listed in the order they are to appear in results. +Required if `ids` is not specified. +You can specify the following attributes for each document: ++ +-- +`_id`:: +(Required, string) The unique <>. + +`_index`:: +(Required, string) The index that contains the document. +-- `organic`:: -Any choice of query used to rank documents which will be ranked below the "pinned" document ids. \ No newline at end of file +Any choice of query used to rank documents which will be ranked below the "pinned" documents. + +==== Pin documents in a specific index + +If you're searching over multiple indices, you can pin a document within a specific index using `docs`: + +[source,console] +-------------------------------------------------- +GET /_search +{ + "query": { + "pinned": { + "docs": [ + { + "_index": "my-index-000001", + "_id": "1" + }, + { + "_index": "my-index-000001", + "_id": "4" + }, + { + "_index": "my-index-000002", + "_id": "100" + } + ], + "organic": { + "match": { + "description": "iphone" + } + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index a36289722a723..e0f707a3f5869 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -77,9 +77,9 @@ documents. For mappings with a large number of fields, searching across all eligible fields could be expensive. -There is a limit on the number of fields that can be queried at once. +There is a limit on the number of fields times terms that can be queried at once. It is defined by the `indices.query.bool.max_clause_count` -<>, which defaults to 1024. +<>, which defaults to 4096. ==== -- @@ -142,8 +142,8 @@ You can use this parameter query to search across multiple fields. See -- `fuzziness`:: -(Optional, string) Maximum edit distance allowed for matching. See <> -for valid values and more information. +(Optional, string) Maximum edit distance allowed for fuzzy matching. For fuzzy +syntax, see <>. `fuzzy_max_expansions`:: (Optional, integer) Maximum number of terms to which the query expands for fuzzy diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 17d53365e31e2..6be4538dafd91 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -116,13 +116,15 @@ Use with caution! [[query-string-fuzziness]] ====== Fuzziness -We can search for terms that are -similar to, but not exactly like our search terms, using the ``fuzzy'' -operator: +You can run <> using the `~` operator: quikc~ brwn~ foks~ -This uses the +For these queries, the query string is <>. If +present, only certain filters from the analyzer are applied. For a list of +applicable filters, see <>. + +The query uses the {wikipedia}/Damerau-Levenshtein_distance[Damerau-Levenshtein distance] to find all terms with a maximum of two changes, where a change is the insertion, deletion diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index a101c8afc478a..3a869f64b45f3 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -25,7 +25,7 @@ GET /_search } }, { - "field_masking_span": { + "span_field_masking": { "query": { "span_term": { "text.stems": "fox" @@ -42,4 +42,4 @@ GET /_search } -------------------------------------------------- -Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. \ No newline at end of file +Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index 8a78c2ba19705..aefb3e4b75eb5 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -39,7 +39,7 @@ GET /_search -------------------------------------------------- WARNING: `span_multi` queries will hit too many clauses failure if the number of terms that match the query exceeds the -boolean query limit (defaults to 1024).To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will rewrite any prefix query on the field to a single term query that matches the indexed prefix. diff --git a/docs/reference/query-dsl/span-queries.asciidoc b/docs/reference/query-dsl/span-queries.asciidoc index cc14b0ee4935a..fbe80fa85d64c 100644 --- a/docs/reference/query-dsl/span-queries.asciidoc +++ b/docs/reference/query-dsl/span-queries.asciidoc @@ -18,7 +18,7 @@ The queries in this group are: <>:: Accepts a list of span queries, but only returns those spans which also match a second span query. -<>:: +<>:: Allows queries like `span-near` or `span-or` across different fields. <>:: @@ -66,4 +66,4 @@ include::span-or-query.asciidoc[] include::span-term-query.asciidoc[] -include::span-within-query.asciidoc[] \ No newline at end of file +include::span-within-query.asciidoc[] diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index cad9b28cbfdbc..ca3afd94092d7 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -6,7 +6,7 @@ This group contains queries which do not fit into the other groups: <>:: A query that computes scores based on the dynamically computed distances -between the origin and documents' date, date_nanos and geo_point fields. +between the origin and documents' `date`, `date_nanos`, and `geo_point` fields. It is able to efficiently skip non-competitive hits. <>:: diff --git a/docs/reference/query-dsl/terms-query.asciidoc b/docs/reference/query-dsl/terms-query.asciidoc index 90854bcf3205c..59a7825f8a4ea 100644 --- a/docs/reference/query-dsl/terms-query.asciidoc +++ b/docs/reference/query-dsl/terms-query.asciidoc @@ -79,9 +79,8 @@ Terms lookup fetches the field values of an existing document. {es} then uses those values as search terms. This can be helpful when searching for a large set of terms. -Because terms lookup fetches values from a document, the <> mapping field must be enabled to use terms lookup. The `_source` -field is enabled by default. +To run a terms lookup, the field's <> must be +enabled. You cannot use {ccs} to run a terms lookup on a remote index. [NOTE] By default, {es} limits the `terms` query to a maximum of 65,536 diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 00a5ae4ae6b50..5e253d39e1ef1 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -220,19 +220,19 @@ See <>. === Calendar resources See <> and -{ml-docs}/ml-calendars.html[Calendars and scheduled events]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. [role="exclude",id="ml-filter-resource"] === Filter resources See <> and -{ml-docs}/ml-rules.html[Machine learning custom rules]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[Machine learning custom rules]. [role="exclude",id="ml-event-resource"] === Scheduled event resources See <> and -{ml-docs}/ml-calendars.html[Calendars and scheduled events]. +{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. [role="exclude",id="index-apis"] === Index APIs @@ -826,7 +826,7 @@ See <> and <>. [role="exclude",id="indices-component-templates"] === Component template APIs -coming::[7.x] +See <>. [role="exclude",id="modules-indices"] === Indices module @@ -1593,3 +1593,49 @@ include::redirects.asciidoc[tag=frozen-index-redirect] === Monitoring frozen indices include::redirects.asciidoc[tag=frozen-index-redirect] + +[role="exclude",id="glossary"] +=== Glossary + +See the {glossary}/terms.html[Elastic glossary]. +[role="exclude",id="multi-index"] +=== Multi-target syntax + +See <>. + +[float] +[[hidden]] +==== Hidden data streams and indices + +See <>. + +[role="exclude",id="date-math-index-names"] +=== Date math support in system and index alias names + +See <>. + +[role="exclude",id="cron-expressions"] +=== Cron expressions + +See <>. + +[role="exclude",id="url-access-control"] +=== URL-based access control + +See <>. + +[role="exclude",id="indices-field-usage-stats"] +=== Field usage stats API + +See <>. + +[role="exclude",id="security-api-enroll-kibana"] +=== Enroll {kib} API + +See <>. + +[role="exclude",id="sql-rest-fields"] +=== Supported REST parameters for SQL search API + +See the <> for the +<>. diff --git a/docs/reference/release-notes/8.0.0-alpha1.asciidoc b/docs/reference/release-notes/8.0.0-alpha1.asciidoc index 3a2823b222132..a2c57cd3639f1 100644 --- a/docs/reference/release-notes/8.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/8.0.0-alpha1.asciidoc @@ -1,34 +1,473 @@ [[release-notes-8.0.0-alpha1]] == {es} version 8.0.0-alpha1 -coming[8.0.0] - -The changes listed below have been released for the first time in {es} -8.0.0-alpha1. +Also see <>. [[breaking-8.0.0-alpha1]] -[discrete] +[float] === Breaking changes Aggregations:: -* Disallow specifying the same percentile multiple times in percentiles aggregation {es-pull}52257[#52257] +* Percentiles aggregation: disallow specifying same percentile values twice {es-pull}52257[#52257] (issue: {es-issue}51871[#51871]) +* Remove Adjacency_matrix setting {es-pull}46327[#46327] (issues: {es-issue}46257[#46257], {es-issue}46324[#46324]) +* Remove `MovingAverage` pipeline aggregation {es-pull}39328[#39328] +* Remove deprecated `_time` and `_term` sort orders {es-pull}39450[#39450] +* Remove deprecated date histo interval {es-pull}75000[#75000] + +Allocation:: +* Breaking change for single data node setting {es-pull}73737[#73737] (issues: {es-issue}55805[#55805], {es-issue}73733[#73733]) +* Remove `include_relocations` setting {es-pull}47717[#47717] (issues: {es-issue}46079[#46079], {es-issue}47443[#47443]) + +Analysis:: +* Cleanup versioned deprecations in analysis {es-pull}41560[#41560] (issue: {es-issue}41164[#41164]) +* Remove preconfigured `delimited_payload_filter` {es-pull}43686[#43686] (issues: {es-issue}41560[#41560], {es-issue}43684[#43684]) + +Authentication:: +* Always add file and native realms unless explicitly disabled {es-pull}69096[#69096] (issue: {es-issue}50892[#50892]) +* Do not set a NameID format in Policy by default {es-pull}44090[#44090] (issue: {es-issue}40353[#40353]) +* Make order setting mandatory for Realm config {es-pull}51195[#51195] (issue: {es-issue}37614[#37614]) + +CCR:: +* Avoid auto following leader system indices in CCR {es-pull}72815[#72815] (issue: {es-issue}67686[#67686]) + +Cluster Coordination:: +* Remove join timeout {es-pull}60873[#60873] (issue: {es-issue}60872[#60872]) +* Remove node filters for voting config exclusions {es-pull}55673[#55673] (issues: {es-issue}47990[#47990], {es-issue}50836[#50836]) +* Remove support for delaying state recovery pending master {es-pull}53845[#53845] (issue: {es-issue}51806[#51806]) + +Distributed:: +* Remove synced flush {es-pull}50882[#50882] (issues: {es-issue}50776[#50776], {es-issue}50835[#50835]) +* Remove the `cluster.remote.connect` setting {es-pull}54175[#54175] (issue: {es-issue}53924[#53924]) + +Engine:: +* Force merge should reject requests with `only_expunge_deletes` and `max_num_segments` set {es-pull}44761[#44761] (issue: {es-issue}43102[#43102]) +* Remove per-type indexing stats {es-pull}47203[#47203] (issue: {es-issue}41059[#41059]) +* Remove translog retention settings {es-pull}51697[#51697] (issue: {es-issue}50775[#50775]) + +Features/CAT APIs:: +* Remove the deprecated `local` parameter for `_cat/indices` {es-pull}64868[#64868] (issue: {es-issue}62198[#62198]) +* Remove the deprecated `local` parameter for `_cat/shards` {es-pull}64867[#64867] (issue: {es-issue}62197[#62197]) + +Features/Features:: +* Remove deprecated ._tier allocation filtering settings {es-pull}73074[#73074] (issue: {es-issue}72835[#72835]) + +Features/ILM+SLM:: +* Add lower bound on `poll_interval` {es-pull}39593[#39593] (issue: {es-issue}39163[#39163]) + +Features/Indices APIs:: +* Change prefer_v2_templates parameter to default to true {es-pull}55489[#55489] (issues: {es-issue}53101[#53101], {es-issue}55411[#55411]) +* Remove deprecated `_upgrade` API {es-pull}64732[#64732] (issue: {es-issue}21337[#21337]) +* Remove local parameter for get field mapping request {es-pull}55100[#55100] (issue: {es-issue}55099[#55099]) +* Remove `include_type_name` parameter from REST layer {es-pull}48632[#48632] (issue: {es-issue}41059[#41059]) +* Remove the `template` field in index templates {es-pull}49460[#49460] (issue: {es-issue}21009[#21009]) + +Features/Watcher:: +* Move watcher history to data stream {es-pull}64252[#64252] + +Geo:: +* Disallow creating `geo_shape` mappings with deprecated parameters {es-pull}70850[#70850] (issue: {es-issue}32039[#32039]) +* Remove bounding box query `type` parameter {es-pull}74536[#74536] + +Infra/Circuit Breakers:: +* Fixed synchronizing inflight breaker with internal variable {es-pull}40878[#40878] + +Infra/Core:: +* Fail when using multiple data paths {es-pull}72184[#72184] (issue: {es-issue}71205[#71205]) +* Limit processors by available processors {es-pull}44894[#44894] (issue: {es-issue}44889[#44889]) +* Remove `nodes/0` folder prefix from data path {es-pull}42489[#42489] +* Remove `bootstrap.system_call_filter` setting {es-pull}72848[#72848] +* Remove `fixed_auto_queue_size` threadpool type {es-pull}52280[#52280] +* Remove `node.max_local_storage_nodes` {es-pull}42428[#42428] (issue: {es-issue}42426[#42426]) +* Remove camel case named formats {es-pull}60044[#60044] +* Remove legacy role settings {es-pull}71163[#71163] (issues: {es-issue}54998[#54998], {es-issue}66409[#66409], {es-issue}71143[#71143]) +* Remove `processors` setting {es-pull}45905[#45905] (issue: {es-issue}45855[#45855]) +* Remove the `local` parameter of `/_cat/nodes` {es-pull}50594[#50594] (issues: {es-issue}50088[#50088], {es-issue}50499[#50499]) +* Remove the listener thread pool {es-pull}53314[#53314] (issue: {es-issue}53049[#53049]) +* Remove the node local storage setting {es-pull}54381[#54381] (issue: {es-issue}54374[#54374]) +* Remove the `pidfile` setting {es-pull}45940[#45940] (issue: {es-issue}45938[#45938]) +* Removes `week_year` date format {es-pull}63384[#63384] (issue: {es-issue}60707[#60707]) + +Infra/Logging:: +* Remove slowlog level {es-pull}57591[#57591] (issue: {es-issue}56171[#56171]) + +Infra/Plugins:: +* Remove deprecated basic license feature enablement settings from 8.0 {es-pull}56211[#56211] (issue: {es-issue}54745[#54745]) + +Infra/REST API:: +* Remove content type required setting {es-pull}61043[#61043] +* Remove deprecated endpoints containing `_xpack` {es-pull}48170[#48170] (issue: {es-issue}35958[#35958]) +* Remove deprecated endpoints of hot threads API {es-pull}55109[#55109] (issue: {es-issue}52640[#52640]) +* Allow parsing Content-Type and Accept headers with version {es-pull}61427[#61427] + +Infra/Resiliency:: +* Fail node containing ancient closed index {es-pull}44264[#44264] (issues: {es-issue}21830[#21830], {es-issue}41731[#41731], {es-issue}44230[#44230]) + +Infra/Scripting:: +* Consolidate script parsing from object {es-pull}59507[#59507] (issue: {es-issue}59391[#59391]) +* Scripting: Move `script_cache` into _nodes/stats {es-pull}59265[#59265] (issues: {es-issue}50152[#50152], {es-issue}59262[#59262]) +* Scripting: Remove general cache settings {es-pull}59262[#59262] (issue: {es-issue}50152[#50152]) + +Infra/Settings:: +* Change default value of `action.destructive_requires_name` to `true` {es-pull}66908[#66908] (issue: {es-issue}61074[#61074]) +* Forbid settings without a namespace {es-pull}45947[#45947] (issues: {es-issue}45905[#45905], {es-issue}45940[#45940]) + +Machine Learning:: +* Remove deprecated `_xpack` endpoints {es-pull}59870[#59870] (issues: {es-issue}35958[#35958], {es-issue}48170[#48170]) +* Remove the ability to update datafeed's `job_id` {es-pull}44752[#44752] (issue: {es-issue}44616[#44616]) Mapping:: -* Dynamic mappings in indices created on 8.0 and later have stricter validation at mapping update time. - (e.g. incorrect analyzer settings or unknown field types). {es-pull}51233[#51233] +* Remove `boost` mapping parameter {es-pull}62639[#62639] (issue: {es-issue}62623[#62623]) +* Remove support for chained multi-fields {es-pull}42333[#42333] (issues: {es-issue}41267[#41267], {es-issue}41926[#41926]) +* Remove support for string in `unmapped_type` {es-pull}45675[#45675] +* Removes typed URLs from mapping APIs {es-pull}41676[#41676] + +Network:: +* Remove client feature tracking {es-pull}44929[#44929] (issues: {es-issue}31020[#31020], {es-issue}42538[#42538], {es-issue}44667[#44667]) +* Remove escape hatch permitting incompatible builds {es-pull}65753[#65753] (issues: {es-issue}65249[#65249], {es-issue}65601[#65601]) + +Packaging:: +* Remove SysV init support {es-pull}51716[#51716] (issue: {es-issue}51480[#51480]) +* Remove support for `JAVA_HOME` {es-pull}69149[#69149] (issue: {es-issue}55820[#55820]) + +Recovery:: +* Remove dangling index auto import functionality {es-pull}59698[#59698] (issue: {es-issue}48366[#48366]) -Update by query:: -* Unsupported fields provided as part of the script section of the update by query API are now rejected. +Reindex:: +* Reindex from Remote encoding {es-pull}41007[#41007] (issue: {es-issue}40303[#40303]) +* Reindex remove outer level size {es-pull}43373[#43373] (issues: {es-issue}24344[#24344], {es-issue}41894[#41894]) -Deprecations:: -* Remove undocumented endpoints of hot threads API {es-pull}55109[#55109] +Rollup:: +* `RollupStart` endpoint should return OK if job already started {es-pull}41502[#41502] (issues: {es-issue}35928[#35928], {es-issue}39845[#39845]) -Slow loggers:: -* `index.indexing.slowlog.level` and `index.search.slowlog.level` are removed. These settings can be worked around -by using appropriate thresholds. If for instance we want to simulate `index.indexing.slowlog.level` = `INFO` then -all we need to do is to set `index.indexing.slowlog.threshold.index.debug` and -`index.indexing.slowlog.threshold.index.trace` to `-1` {es-pull}57591[#57591] +Search:: +* Decouple shard allocation awareness from search and get requests {es-pull}45735[#45735] (issue: {es-issue}43453[#43453]) +* Fix range query on date fields for number inputs {es-pull}63692[#63692] (issue: {es-issue}63680[#63680]) +* Make fuzziness reject illegal values earlier {es-pull}33511[#33511] +* Make remote cluster resolution stricter {es-pull}40419[#40419] (issue: {es-issue}37863[#37863]) +* Parse empty first line in msearch request body as action metadata {es-pull}41011[#41011] (issue: {es-issue}39841[#39841]) +* Remove `CommonTermsQuery` and `cutoff_frequency` param {es-pull}42654[#42654] (issue: {es-issue}37096[#37096]) +* Remove `type` query {es-pull}47207[#47207] (issue: {es-issue}41059[#41059]) +* Remove `use_field_mapping` format option for docvalue fields {es-pull}55622[#55622] +* Remove deprecated `SimpleQueryStringBuilder` parameters {es-pull}57200[#57200] +* Remove deprecated `search.remote` settings {es-pull}42381[#42381] (issues: {es-issue}33413[#33413], {es-issue}38556[#38556]) +* Remove deprecated sort options: `nested_path` and `nested_filter` {es-pull}42809[#42809] (issue: {es-issue}27098[#27098]) +* Remove deprecated vector functions {es-pull}48725[#48725] (issue: {es-issue}48604[#48604]) +* Remove support for `_type` in searches {es-pull}68564[#68564] (issues: {es-issue}41059[#41059], {es-issue}68311[#68311]) +* Remove support for sparse vectors {es-pull}48781[#48781] (issue: {es-issue}48368[#48368]) +* Remove the object format for `indices_boost` {es-pull}55078[#55078] +* Removes type from `TermVectors` APIs {es-pull}42198[#42198] (issue: {es-issue}41059[#41059]) +* Removes typed endpoint from search and related APIs {es-pull}41640[#41640] +* Set max allowed size for stored async response {es-pull}74455[#74455] (issue: {es-issue}67594[#67594]) +* `indices.query.bool.max_clause_count` now limits all query clauses {es-pull}75297[#75297] + +Security:: +* Remove obsolete security settings {es-pull}40496[#40496] +* Remove support of creating CA on the fly when generating certificates {es-pull}65590[#65590] (issue: {es-issue}61884[#61884]) +* Remove the `id` field from the `InvalidateApiKey` API {es-pull}66671[#66671] (issue: {es-issue}66317[#66317]) +* Remove the migrate tool {es-pull}42174[#42174] +* Compress audit logs {es-pull}64472[#64472] (issue: {es-issue}63843[#63843]) +* Remove insecure settings {es-pull}46147[#46147] (issue: {es-issue}45947[#45947]) + +Snapshot/Restore:: +* Blob store compress default to `true` {es-pull}40033[#40033] +* Get snapshots support for multiple repositories {es-pull}42090[#42090] (issue: {es-issue}41210[#41210]) +* Remove repository stats API {es-pull}62309[#62309] (issue: {es-issue}62297[#62297]) +* Remove frozen cache setting leniency {es-pull}71013[#71013] (issue: {es-issue}70341[#70341]) + +TLS:: +* Reject misconfigured/ambiguous SSL server config {es-pull}45892[#45892] +* Remove support for configurable PKCS#11 keystores {es-pull}75404[#75404] +* Remove the client transport profile filter {es-pull}43236[#43236] + + + +[[breaking-java-8.0.0-alpha1]] +[float] +=== Breaking Java changes + +Authentication:: +* Mandate x-pack REST handler installed {es-pull}71061[#71061] (issue: {es-issue}70523[#70523]) + +CCR:: +* Remove the `CcrClient` {es-pull}42816[#42816] + +CRUD:: +* Remove types from `BulkRequest` {es-pull}46983[#46983] (issue: {es-issue}41059[#41059]) +* Remove `Client.prepareIndex(index, type, id)` method {es-pull}48443[#48443] +* Remove deprecated `include-type` methods from HLRC indices client {es-pull}48471[#48471] + + +Client:: +* Remove `SecurityClient` from x-pack {es-pull}42471[#42471] + +Features/ILM+SLM:: +* Remove the `ILMClient` {es-pull}42817[#42817] +* Rename HLRC `indexlifecycle` components to `ilm` {es-pull}44982[#44982] (issues: {es-issue}44725[#44725], {es-issue}44917[#44917]) + +Features/Monitoring:: +* Remove `MonitoringClient` from x-pack {es-pull}42770[#42770] + +Features/Watcher:: +* Remove `WatcherClient` from x-pack {es-pull}42815[#42815] + +Infra/Core:: +* Remove `XPackClient` from x-pack {es-pull}42729[#42729] +* Remove the transport client {es-pull}42538[#42538] +* Remove transport client from x-pack {es-pull}42202[#42202] + +Infra/REST API:: +* Copy HTTP headers to `ThreadContext` strictly {es-pull}45945[#45945] + +Machine Learning:: +* Remove the `MachineLearningClient` {es-pull}43108[#43108] + +Mapping:: +* Remove type filter from `GetMappings` API {es-pull}47364[#47364] (issue: {es-issue}41059[#41059]) +* Remove `type` parameter from `PutMappingRequest.buildFromSimplifiedDef()` {es-pull}50844[#50844] (issue: {es-issue}41059[#41059]) +* Remove unused parameter from `MetadataFieldMapper.TypeParser#getDefault()` {es-pull}51219[#51219] +* Remove `type` parameter from `CIR.mapping(type, object...)` {es-pull}50739[#50739] (issue: {es-issue}41059[#41059]) Search:: -* Consistent treatment of numeric values for range query on date fields without `format` {es-pull}[#63692] +* Removes types from `SearchRequest` and `QueryShardContext` {es-pull}42112[#42112] + +Snapshot/Restore:: +* Remove deprecated repository methods {es-pull}42359[#42359] (issue: {es-issue}42213[#42213]) + + + +[[enhancement-8.0.0-alpha1]] +[float] +=== Enhancements + +Analysis:: +* Move `reload_analyzers` endpoint to x-pack {es-pull}43559[#43559] + +Authentication:: +* Reset elastic password CLI tool {es-pull}74892[#74892] (issues: {es-issue}70113[#70113], {es-issue}74890[#74890]) + +EQL:: +* Add option for returning results from the tail of the stream {es-pull}64869[#64869] (issue: {es-issue}58646[#58646]) +* Introduce case insensitive variant `in~` {es-pull}68176[#68176] (issue: {es-issue}68172[#68172]) +* Optimize redundant `toString` {es-pull}71070[#71070] (issue: {es-issue}70681[#70681]) + +Engine:: +* Always use soft-deletes in `InternalEngine` {es-pull}50415[#50415] +* Remove translog retention policy {es-pull}51417[#51417] (issue: {es-issue}50775[#50775]) + +Features/CAT APIs:: +* Remove `size` and add `time` params to `_cat/threadpool` {es-pull}55736[#55736] (issue: {es-issue}54478[#54478]) + +Features/Stats:: +* Add bulk stats track the bulk per shard {es-pull}52208[#52208] (issues: {es-issue}47345[#47345], {es-issue}50536[#50536]) + + +Features/Watcher:: +* Remove Watcher history clean up from monitoring {es-pull}67154[#67154] + +Infra/Core:: +* Remove aliases exist action {es-pull}43430[#43430] +* Remove indices exists action {es-pull}43164[#43164] +* Remove types exists action {es-pull}43344[#43344] + +Infra/Logging:: +* Make Elasticsearch JSON logs ECS compliant {es-pull}47105[#47105] (issue: {es-issue}46119[#46119]) + +Infra/REST API:: +* Allow for field declaration for future compatible versions {es-pull}69774[#69774] (issue: {es-issue}51816[#51816]) +* Introduce stability description to the REST API specification {es-pull}38413[#38413] +* Parsing: Validate that fields are not registered twice {es-pull}70243[#70243] +* Support response content-type with versioned media type {es-pull}65500[#65500] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Typed endpoints for index and get APIs {es-pull}69131[#69131] (issue: {es-issue}54160[#54160]) +* [REST API Compatibility] Typed endpoints for put and get mapping and get field mappings {es-pull}71721[#71721] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Allow `copy_settings` flag for resize operations {es-pull}75184[#75184] (issues: {es-issue}38514[#38514], {es-issue}51816[#51816]) +* [REST API Compatibility] Allow for type in geo shape query {es-pull}74553[#74553] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Always return `adjust_pure_negative` value {es-pull}75182[#75182] (issues: {es-issue}49543[#49543], {es-issue}51816[#51816]) +* [REST API Compatibility] Clean up x-pack/plugin rest compat tests {es-pull}74701[#74701] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Do not return `_doc` for empty mappings in template {es-pull}75448[#75448] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}70966[#70966], {es-issue}74544[#74544]) +* [REST API Compatibility] Dummy REST action for `indices.upgrade` API {es-pull}75136[#75136] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] REST Terms vector typed response {es-pull}73117[#73117] +* [REST API Compatibility] Rename `BulkItemResponse.Failure` type field {es-pull}74937[#74937] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Type metadata for docs used in simulate request {es-pull}74222[#74222] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Typed `TermLookups` {es-pull}74544[#74544] (issues: {es-issue}46943[#46943], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Typed and x-pack graph explore API {es-pull}74185[#74185] (issues: {es-issue}46935[#46935], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Typed endpoint for bulk API {es-pull}73571[#73571] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Typed endpoint for multi-get API {es-pull}73878[#73878] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Typed endpoints for `RestUpdateAction` and `RestDeleteAction` {es-pull}73115[#73115] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Typed endpoints for `get_source` API {es-pull}73957[#73957] (issues: {es-issue}46587[#46587], {es-issue}46931[#46931], {es-issue}51816[#51816]) +* [REST API Compatibility] Typed endpoints for explain API {es-pull}73901[#73901] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Typed endpoints for search `_count` API {es-pull}73958[#73958] (issues: {es-issue}42112[#42112], {es-issue}51816[#51816]) +* [REST API Compatibility] Typed indexing stats {es-pull}74181[#74181] (issues: {es-issue}47203[#47203], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Types for percolate query API {es-pull}74698[#74698] (issues: {es-issue}46985[#46985], {es-issue}51816[#51816], {es-issue}54160[#54160], {es-issue}74689[#74689]) +* [REST API Compatibility] Validate query typed API {es-pull}74171[#74171] (issues: {es-issue}46927[#46927], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Voting config exclusion exception message {es-pull}75406[#75406] (issues: {es-issue}51816[#51816], {es-issue}55291[#55291]) +* [REST API Compatibility] `MoreLikeThisQuery` with types {es-pull}75123[#75123] (issues: {es-issue}42198[#42198], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Update and delete by query using size field {es-pull}69606[#69606] +* [REST API Compatibility] Indicies boost in object format {es-pull}74422[#74422] (issues: {es-issue}51816[#51816], {es-issue}55078[#55078]) +* [REST API Compatibility] Typed endpoints for search and related endpoints {es-pull}72155[#72155] (issues: {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Allow to use size `-1` {es-pull}75342[#75342] (issues: {es-issue}51816[#51816], {es-issue}69548[#69548], {es-issue}70209[#70209]) +* [REST API Compatibility] Ignore `use_field_mapping` option for docvalue {es-pull}74435[#74435] (issue: {es-issue}55622[#55622]) +* [REST API Compatibility] `_time` and `_term` sort orders {es-pull}74919[#74919] (issues: {es-issue}39450[#39450], {es-issue}51816[#51816]) +* [REST API Compatability] `template` parameter and field on PUT index template {es-pull}71238[#71238] (issues: {es-issue}49460[#49460], {es-issue}51816[#51816], {es-issue}68905[#68905]) +* [REST API Compatibility] Make query registration easier {es-pull}75722[#75722] (issue: {es-issue}51816[#51816]) +* [REST API Compatibility] Typed query {es-pull}75453[#75453] (issues: {es-issue}47207[#47207], {es-issue}51816[#51816], {es-issue}54160[#54160]) +* [REST API Compatibility] Deprecate the use of synced flush {es-pull}75372[#75372] (issues: {es-issue}50882[#50882], {es-issue}51816[#51816]) +* [REST API Compatibility] Licence `accept_enterprise` and response changes {es-pull}75479[#75479] (issues: {es-issue}50067[#50067], {es-issue}50735[#50735], {es-issue}51816[#51816], {es-issue}58217[#58217]) + +Infra/Scripting:: +* Update `DeprecationMap` to `DynamicMap` {es-pull}56149[#56149] (issue: {es-issue}52103[#52103]) + +Infra/Settings:: +* Fixed inconsistent `Setting.exist()` {es-pull}46603[#46603] (issue: {es-issue}41830[#41830]) +* Remove `index.optimize_auto_generated_id` setting (#27583) {es-pull}27600[#27600] (issue: {es-issue}27583[#27583]) + +License:: +* Add deprecated `accept_enterprise` param to `/_xpack` {es-pull}58220[#58220] (issue: {es-issue}58217[#58217]) +* Support `accept_enterprise` param in get license API {es-pull}50067[#50067] (issue: {es-issue}49474[#49474]) + +Machine Learning:: +* The Windows build platform for the {ml} C++ code now uses Visual Studio 2019 {ml-pull}1352[#1352] +* The macOS build platform for the {ml} C++ code is now Mojave running Xcode 11.3.1, + or Ubuntu 20.04 running clang 8 for cross compilation {ml-pull}1429[#1429] +* The Linux build platform for the {ml} C++ code is now CentOS 7 running gcc 9.3 {ml-pull}1170[#1170] +* Add a new application for evaluating PyTorch models. The app depends on LibTorch - the C++ front end to PyTorch - and performs inference on models stored in the TorchScript format {ml-pull}1902[#1902] + +Mapping:: +* Sparse vector to throw exception consistently {es-pull}62646[#62646] + +Packaging:: +* Make the Docker build more re-usable in Cloud {es-pull}50277[#50277] (issues: {es-issue}46166[#46166], {es-issue}49926[#49926]) +* Update docker-compose.yml to fix bootstrap check error {es-pull}47650[#47650] + +Recovery:: +* Use Lucene index in peer recovery and resync {es-pull}51189[#51189] (issue: {es-issue}50775[#50775]) + +Reindex:: +* Make reindexing managed by a persistent task {es-pull}43382[#43382] (issue: {es-issue}42612[#42612]) +* Reindex restart from checkpoint {es-pull}46055[#46055] (issue: {es-issue}42612[#42612]) +* Reindex search resiliency {es-pull}45497[#45497] (issues: {es-issue}42612[#42612], {es-issue}43187[#43187]) +* Reindex v2 rethrottle sliced fix {es-pull}46967[#46967] (issues: {es-issue}42612[#42612], {es-issue}46763[#46763]) + +Rollup:: +* Adds support for `date_nanos` in Rollup Metric and `DateHistogram` Configs {es-pull}59349[#59349] (issue: {es-issue}44505[#44505]) + +SQL:: +* Add text formatting support for multivalue {es-pull}68606[#68606] +* Add xDBC and CLI support. QA CSV specs {es-pull}68966[#68966] +* Export array values through result sets {es-pull}69512[#69512] +* Improve alias resolution in sub-queries {es-pull}67216[#67216] (issue: {es-issue}56713[#56713]) +* Improve the optimization of null conditionals {es-pull}71192[#71192] +* Push `WHERE` clause inside subqueries {es-pull}71362[#71362] +* Use Java `String` methods for `LTRIM/RTRIM` {es-pull}57594[#57594] +* QL: Make canonical form take into account children {es-pull}71266[#71266] +* QL: Polish optimizer expression rule declaration {es-pull}71396[#71396] +* QL: Propagate nullability constraints across conjunctions {es-pull}71187[#71187] (issue: {es-issue}70683[#70683]) + +Search:: +* Completely disallow setting negative size in search {es-pull}70209[#70209] (issue: {es-issue}69548[#69548]) +* Make `0` as invalid value for `min_children` in `has_child` query {es-pull}41347[#41347] +* Return error when remote indices are locally resolved {es-pull}74556[#74556] (issue: {es-issue}26247[#26247]) + +Security:: +* Add a tool for creating enrollment tokens {es-pull}74890[#74890] +* Add the Enroll Kibana API {es-pull}72207[#72207] +* Change default hashing algorithm for FIPS 140 {es-pull}55544[#55544] +* Create enrollment token {es-pull}73573[#73573] (issues: {es-issue}71438[#71438], {es-issue}72129[#72129]) +* Enroll node API {es-pull}72129[#72129] +* Not encoding the Api Key in Enrollment token {es-pull}74510[#74510] (issue: {es-issue}73573[#73573]) +* Configure security for the initial node CLI {es-pull}74868[#74868] + +Snapshot/Restore:: +* Introduce searchable snapshots index setting for cascade deletion of snapshots {es-pull}74977[#74977] +* Unify blob store compress setting {es-pull}39346[#39346] (issue: {es-issue}39073[#39073]) +* Add recovery state tracking for searchable snapshots {es-pull}60505[#60505] + +TLS:: +* Add `ChaCha20` TLS ciphers on Java 12+ {es-pull}42155[#42155] +* Add support for `KeyStore` filters to `ssl-config` {es-pull}75407[#75407] +* Update TLS ciphers and protocols for JDK 11 {es-pull}41808[#41808] (issues: {es-issue}38646[#38646], {es-issue}41385[#41385]) + + + +[[bug-8.0.0-alpha1]] +[float] +=== Bug fixes + +Aggregations:: +* Fix BWC issues for `x_pack/usage` {es-pull}55181[#55181] (issue: {es-issue}54847[#54847]) +* Fix `DoubleBounds` null serialization {es-pull}59475[#59475] +* Fix `TopHitsAggregationBuilder` adding duplicate `_score` sort clauses {es-pull}42179[#42179] (issue: {es-issue}42154[#42154]) +* Fix `t_test` usage stats {es-pull}54753[#54753] (issue: {es-issue}54744[#54744]) +* Throw exception if legacy interval cannot be parsed in `DateIntervalWrapper` {es-pull}41972[#41972] (issue: {es-issue}41970[#41970]) + +CCR:: +* Fix `AutoFollow` version checks {es-pull}73776[#73776] (issue: {es-issue}72935[#72935]) + +Cluster Coordination:: +* Apply cluster states in system context {es-pull}53785[#53785] (issue: {es-issue}53751[#53751]) + +Distributed:: +* Introduce `?wait_for_active_shards=index-setting` {es-pull}67158[#67158] (issue: {es-issue}66419[#66419]) +* Respect `CloseIndexRequest#waitForActiveShards` in HLRC {es-pull}67374[#67374] (issues: {es-issue}67158[#67158], {es-issue}67246[#67246]) +* Fixes to task result index mapping {es-pull}50359[#50359] (issue: {es-issue}50248[#50248]) + +Features/CAT APIs:: +* Fix cat recovery display of bytes fields {es-pull}40379[#40379] (issue: {es-issue}40335[#40335]) + +Features/Java High Level REST Client:: +* Fix HLRC compatibility with Java 8 {es-pull}74290[#74290] (issues: {es-issue}73910[#73910], {es-issue}74272[#74272], {es-issue}74289[#74289]) +* Avoid `StackOverflowError` due to regex alternate paths {es-pull}61259[#61259] (issue: {es-issue}60889[#60889]) + +Geo:: +* Preprocess polygon rings before processing it for decomposition {es-pull}59501[#59501] (issues: {es-issue}54441[#54441], {es-issue}59386[#59386]) + +Infra/Core:: +* Add searchable snapshot cache folder to `NodeEnvironment` {es-pull}66297[#66297] (issue: {es-issue}65725[#65725]) +* CLI tools: Write errors to stderr instead of stdout {es-pull}45586[#45586] (issue: {es-issue}43260[#43260]) +* Precompute `ParsedMediaType` for XContentType {es-pull}67409[#67409] + +Infra/Logging:: +* Fix NPE when logging null values in JSON {es-pull}53715[#53715] (issue: {es-issue}46702[#46702]) +* Fix stats in slow logs to be a escaped JSON {es-pull}44642[#44642] +* Populate data stream fields when `xOpaqueId` not provided {es-pull}62156[#62156] + +Infra/REST API:: +* Do not allow spaces within `MediaType's` parameters {es-pull}64650[#64650] (issue: {es-issue}51816[#51816]) +* Handle incorrect header values {es-pull}64708[#64708] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) +* Ignore media ranges when parsing {es-pull}64721[#64721] (issues: {es-issue}51816[#51816], {es-issue}64689[#64689]) +* `RestController` should not consume request content {es-pull}44902[#44902] (issue: {es-issue}37504[#37504]) + +Infra/Scripting:: +* Change compound assignment structure to support string concatenation {es-pull}61825[#61825] +* Fixes casting in constant folding {es-pull}61508[#61508] +* Several minor Painless fixes {es-pull}61594[#61594] + +Machine Learning:: +* Handle null value of `FieldCapabilitiesResponse` {es-pull}64327[#64327] + +Mapping:: +* Remove assertions that mappings have one top-level key {es-pull}58779[#58779] (issue: {es-issue}58521[#58521]) + +Packaging:: +* Suppress illegal access in plugin install {es-pull}41620[#41620] (issue: {es-issue}41478[#41478]) + +SQL:: +* Introduce dedicated node for `HAVING` declaration {es-pull}71279[#71279] (issue: {es-issue}69758[#69758]) +* Make `RestSqlQueryAction` thread-safe {es-pull}69901[#69901] + +Search:: +* Check for negative `from` values in search request body {es-pull}54953[#54953] (issue: {es-issue}54897[#54897]) +* Fix `VectorsFeatureSetUsage` serialization in BWC mode {es-pull}55399[#55399] (issue: {es-issue}55378[#55378]) +* Handle total hits equal to `track_total_hits` {es-pull}37907[#37907] (issue: {es-issue}37897[#37897]) +* Improve error msg for CCS request on node without remote cluster role {es-pull}60351[#60351] (issue: {es-issue}59683[#59683]) + +Snapshot/Restore:: +* Fix `GET /_snapshot/_all/_all` if there are no repos {es-pull}43558[#43558] (issue: {es-issue}43547[#43547]) + + +[[upgrade-8.0.0-alpha1]] +[float] +=== Upgrades + +Lucene:: +* Upgrade to Lucene 8.9.0 {es-pull}74729[#74729] diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 4b2afd2b6ded6..7a5f4fefcda1d 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,8 +1,6 @@ [[release-highlights]] == What's new in {minor-version} -coming::[{minor-version}] - Here are the highlights of what's new and improved in {es} {minor-version}! For detailed information about this release, see the <> and diff --git a/docs/reference/rest-api/common-options.asciidoc b/docs/reference/rest-api/common-options.asciidoc new file mode 100644 index 0000000000000..e4d7408e34f03 --- /dev/null +++ b/docs/reference/rest-api/common-options.asciidoc @@ -0,0 +1,406 @@ +[[common-options]] +== Common options + +All {es} REST APIs support the following options. + +[discrete] +=== Pretty Results + +When appending `?pretty=true` to any request made, the JSON returned +will be pretty formatted (use it for debugging only!). Another option is +to set `?format=yaml` which will cause the result to be returned in the +(sometimes) more readable yaml format. + + +[discrete] +=== Human readable output + +Statistics are returned in a format suitable for humans +(e.g. `"exists_time": "1h"` or `"size": "1kb"`) and for computers +(e.g. `"exists_time_in_millis": 3600000` or `"size_in_bytes": 1024`). +The human readable values can be turned off by adding `?human=false` +to the query string. This makes sense when the stats results are +being consumed by a monitoring tool, rather than intended for human +consumption. The default for the `human` flag is +`false`. + +[[date-math]] +[discrete] +=== Date Math + +Most parameters which accept a formatted date value -- such as `gt` and `lt` +in <>, or `from` and `to` +in <> -- understand date maths. + +The expression starts with an anchor date, which can either be `now`, or a +date string ending with `||`. This anchor date can optionally be followed by +one or more maths expressions: + +* `+1h`: Add one hour +* `-1d`: Subtract one day +* `/d`: Round down to the nearest day + +The supported time units differ from those supported by <> for durations. +The supported units are: + +[horizontal] +`y`:: Years +`M`:: Months +`w`:: Weeks +`d`:: Days +`h`:: Hours +`H`:: Hours +`m`:: Minutes +`s`:: Seconds + +Assuming `now` is `2001-01-01 12:00:00`, some examples are: + +[horizontal] +`now+1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 13:00:00` +`now-1h`:: `now` in milliseconds minus one hour. Resolves to: `2001-01-01 11:00:00` +`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00` + `2001.02.01\|\|+1M/d`:: `2001-02-01` in milliseconds plus one month. Resolves to: `2001-03-01 00:00:00` + +[discrete] +[[common-options-response-filtering]] +=== Response Filtering + +All REST APIs accept a `filter_path` parameter that can be used to reduce +the response returned by Elasticsearch. This parameter takes a comma +separated list of filters expressed with the dot notation: + +[source,console] +-------------------------------------------------- +GET /_search?q=kimchy&filter_path=took,hits.hits._id,hits.hits._score +-------------------------------------------------- +// TEST[setup:my_index] + +Responds: + +[source,console-result] +-------------------------------------------------- +{ + "took" : 3, + "hits" : { + "hits" : [ + { + "_id" : "0", + "_score" : 1.6375021 + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 3/"took" : $body.took/] +// TESTRESPONSE[s/1.6375021/$body.hits.hits.0._score/] + +It also supports the `*` wildcard character to match any field or part +of a field's name: + +[source,console] +-------------------------------------------------- +GET /_cluster/state?filter_path=metadata.indices.*.stat* +-------------------------------------------------- +// TEST[s/^/PUT my-index-000001\n/] + +Responds: + +[source,console-result] +-------------------------------------------------- +{ + "metadata" : { + "indices" : { + "my-index-000001": {"state": "open"} + } + } +} +-------------------------------------------------- + +And the `**` wildcard can be used to include fields without knowing the +exact path of the field. For example, we can return the Lucene version +of every segment with this request: + +[source,console] +-------------------------------------------------- +GET /_cluster/state?filter_path=routing_table.indices.**.state +-------------------------------------------------- +// TEST[s/^/PUT my-index-000001\n/] + +Responds: + +[source,console-result] +-------------------------------------------------- +{ + "routing_table": { + "indices": { + "my-index-000001": { + "shards": { + "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] + } + } + } + } +} +-------------------------------------------------- + +It is also possible to exclude one or more fields by prefixing the filter with the char `-`: + +[source,console] +-------------------------------------------------- +GET /_count?filter_path=-_shards +-------------------------------------------------- +// TEST[setup:my_index] + +Responds: + +[source,console-result] +-------------------------------------------------- +{ + "count" : 5 +} +-------------------------------------------------- + +And for more control, both inclusive and exclusive filters can be combined in the same expression. In +this case, the exclusive filters will be applied first and the result will be filtered again using the +inclusive filters: + +[source,console] +-------------------------------------------------- +GET /_cluster/state?filter_path=metadata.indices.*.state,-metadata.indices.logstash-* +-------------------------------------------------- +// TEST[s/^/PUT my-index-000001\nPUT my-index-000002\nPUT my-index-000003\nPUT logstash-2016.01\n/] + +Responds: + +[source,console-result] +-------------------------------------------------- +{ + "metadata" : { + "indices" : { + "my-index-000001" : {"state" : "open"}, + "my-index-000002" : {"state" : "open"}, + "my-index-000003" : {"state" : "open"} + } + } +} +-------------------------------------------------- + +Note that Elasticsearch sometimes returns directly the raw value of a field, +like the `_source` field. If you want to filter `_source` fields, you should +consider combining the already existing `_source` parameter (see +<> for more details) with the `filter_path` +parameter like this: + +[source,console] +-------------------------------------------------- +POST /library/_doc?refresh +{"title": "Book #1", "rating": 200.1} +POST /library/_doc?refresh +{"title": "Book #2", "rating": 1.7} +POST /library/_doc?refresh +{"title": "Book #3", "rating": 0.1} +GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc +-------------------------------------------------- + +[source,console-result] +-------------------------------------------------- +{ + "hits" : { + "hits" : [ { + "_source":{"title":"Book #1"} + }, { + "_source":{"title":"Book #2"} + }, { + "_source":{"title":"Book #3"} + } ] + } +} +-------------------------------------------------- + + +[discrete] +=== Flat Settings + +The `flat_settings` flag affects rendering of the lists of settings. When the +`flat_settings` flag is `true`, settings are returned in a flat format: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_settings?flat_settings=true +-------------------------------------------------- +// TEST[setup:my_index] + +Returns: + +[source,console-result] +-------------------------------------------------- +{ + "my-index-000001" : { + "settings": { + "index.number_of_replicas": "1", + "index.number_of_shards": "1", + "index.creation_date": "1474389951325", + "index.uuid": "n6gzFZTgS664GUfx0Xrpjw", + "index.version.created": ..., + "index.routing.allocation.include._tier_preference" : "data_content", + "index.provided_name" : "my-index-000001" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/1474389951325/$body.my-index-000001.settings.index\\\\.creation_date/] +// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.my-index-000001.settings.index\\\\.uuid/] +// TESTRESPONSE[s/"index.version.created": \.\.\./"index.version.created": $body.my-index-000001.settings.index\\\\.version\\\\.created/] + +When the `flat_settings` flag is `false`, settings are returned in a more +human readable structured format: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_settings?flat_settings=false +-------------------------------------------------- +// TEST[setup:my_index] + +Returns: + +[source,console-result] +-------------------------------------------------- +{ + "my-index-000001" : { + "settings" : { + "index" : { + "number_of_replicas": "1", + "number_of_shards": "1", + "creation_date": "1474389951325", + "uuid": "n6gzFZTgS664GUfx0Xrpjw", + "version": { + "created": ... + }, + "routing": { + "allocation": { + "include": { + "_tier_preference": "data_content" + } + } + }, + "provided_name" : "my-index-000001" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/1474389951325/$body.my-index-000001.settings.index.creation_date/] +// TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.my-index-000001.settings.index.uuid/] +// TESTRESPONSE[s/"created": \.\.\./"created": $body.my-index-000001.settings.index.version.created/] + +By default `flat_settings` is set to `false`. + +[[fuzziness]] +[discrete] +=== Fuzziness + +Some queries and APIs support parameters to allow inexact _fuzzy_ matching, +using the `fuzziness` parameter. + +When querying `text` or `keyword` fields, `fuzziness` is interpreted as a +{wikipedia}/Levenshtein_distance[Levenshtein Edit Distance] +-- the number of one character changes that need to be made to one string to +make it the same as another string. + +The `fuzziness` parameter can be specified as: + +[horizontal] +`0`, `1`, `2`:: + +The maximum allowed Levenshtein Edit Distance (or number of edits) + +`AUTO`:: ++ +-- +Generates an edit distance based on the length of the term. +Low and high distance arguments may be optionally provided `AUTO:[low],[high]`. If not specified, +the default values are 3 and 6, equivalent to `AUTO:3,6` that make for lengths: + +`0..2`:: Must match exactly +`3..5`:: One edit allowed +`>5`:: Two edits allowed + +`AUTO` should generally be the preferred value for `fuzziness`. +-- + +[discrete] +[[common-options-error-options]] +=== Enabling stack traces + +By default when a request returns an error Elasticsearch doesn't include the +stack trace of the error. You can enable that behavior by setting the +`error_trace` url parameter to `true`. For example, by default when you send an +invalid `size` parameter to the `_search` API: + +[source,console] +---------------------------------------------------------------------- +POST /my-index-000001/_search?size=surprise_me +---------------------------------------------------------------------- +// TEST[s/surprise_me/surprise_me&error_trace=false/ catch:bad_request] +// Since the test system sends error_trace=true by default we have to override + +The response looks like: + +[source,console-result] +---------------------------------------------------------------------- +{ + "error" : { + "root_cause" : [ + { + "type" : "illegal_argument_exception", + "reason" : "Failed to parse int parameter [size] with value [surprise_me]" + } + ], + "type" : "illegal_argument_exception", + "reason" : "Failed to parse int parameter [size] with value [surprise_me]", + "caused_by" : { + "type" : "number_format_exception", + "reason" : "For input string: \"surprise_me\"" + } + }, + "status" : 400 +} +---------------------------------------------------------------------- + +But if you set `error_trace=true`: + +[source,console] +---------------------------------------------------------------------- +POST /my-index-000001/_search?size=surprise_me&error_trace=true +---------------------------------------------------------------------- +// TEST[catch:bad_request] + +The response looks like: + +[source,console-result] +---------------------------------------------------------------------- +{ + "error": { + "root_cause": [ + { + "type": "illegal_argument_exception", + "reason": "Failed to parse int parameter [size] with value [surprise_me]", + "stack_trace": "Failed to parse int parameter [size] with value [surprise_me]]; nested: IllegalArgumentException..." + } + ], + "type": "illegal_argument_exception", + "reason": "Failed to parse int parameter [size] with value [surprise_me]", + "stack_trace": "java.lang.IllegalArgumentException: Failed to parse int parameter [size] with value [surprise_me]\n at org.elasticsearch.rest.RestRequest.paramAsInt(RestRequest.java:175)...", + "caused_by": { + "type": "number_format_exception", + "reason": "For input string: \"surprise_me\"", + "stack_trace": "java.lang.NumberFormatException: For input string: \"surprise_me\"\n at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)..." + } + }, + "status": 400 +} +---------------------------------------------------------------------- +// TESTRESPONSE[s/"stack_trace": "Failed to parse int parameter.+\.\.\."/"stack_trace": $body.error.root_cause.0.stack_trace/] +// TESTRESPONSE[s/"stack_trace": "java.lang.IllegalArgum.+\.\.\."/"stack_trace": $body.error.stack_trace/] +// TESTRESPONSE[s/"stack_trace": "java.lang.Number.+\.\.\."/"stack_trace": $body.error.caused_by.stack_trace/] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 8752c26a20f33..f2a80f40f054e 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -602,8 +602,8 @@ tag::bulk-dynamic-templates[] `dynamic_templates`:: (Optional, map) A map from the full name of fields to the name of <. -Defaults to an empty map. If a name matches a dynamic template, then that template will be -applied regardless of other match predicates defined in the template. And if a field is +Defaults to an empty map. If a name matches a dynamic template, then that template will be +applied regardless of other match predicates defined in the template. And if a field is already defined in the mapping, then this parameter won't be used. end::bulk-dynamic-templates[] @@ -896,11 +896,11 @@ end::source-transforms[] tag::source-index-transforms[] The _source indices_ for the {transform}. It can be a single index, an index pattern (for example, `"my-index-*"`), an array of indices (for example, -`["my-index-000001", "my-index-000002"]`), or an array of index patterns (for -example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax +`["my-index-000001", "my-index-000002"]`), or an array of index patterns (for +example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. -NOTE: If any indices are in remote clusters then the master node and at least +NOTE: If any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. end::source-index-transforms[] @@ -910,8 +910,8 @@ A query clause that retrieves a subset of data from the source index. See end::source-query-transforms[] tag::source-runtime-mappings-transforms[] -Definitions of search-time runtime fields that can be used by the transform. For -search runtime fields all data nodes, including remote nodes, must be 7.12 or +Definitions of search-time runtime fields that can be used by the transform. For +search runtime fields all data nodes, including remote nodes, must be 7.12 or later. end::source-runtime-mappings-transforms[] @@ -971,7 +971,7 @@ unique key. end::transform-latest[] tag::transform-retention[] -Defines a retention policy for the {transform}. Data that meets the defined +Defines a retention policy for the {transform}. Data that meets the defined criteria is deleted from the destination index. end::transform-retention[] @@ -984,7 +984,7 @@ The date field that is used to calculate the age of the document. end::transform-retention-time-field[] tag::transform-retention-time-max-age[] -Specifies the maximum age of a document in the destination index. Documents that +Specifies the maximum age of a document in the destination index. Documents that are older than the configured value are removed from the destination index. end::transform-retention-time-max-age[] @@ -1008,7 +1008,7 @@ end::transform-settings-docs-per-second[] tag::transform-settings-max-page-search-size[] Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker exceptions occur, the page size is dynamically -adjusted to a lower value. The minimum value is `10` and the maximum is `10,000`. +adjusted to a lower value. The minimum value is `10` and the maximum is `65,536`. The default value is `500`. end::transform-settings-max-page-search-size[] diff --git a/docs/reference/rest-api/cron-expressions.asciidoc b/docs/reference/rest-api/cron-expressions.asciidoc index d817974e154c6..4e89365acd3e3 100644 --- a/docs/reference/rest-api/cron-expressions.asciidoc +++ b/docs/reference/rest-api/cron-expressions.asciidoc @@ -1,4 +1,5 @@ -[[cron-expressions]] +[discrete] +[[api-cron-expressions]] === Cron expressions A cron expression is a string of the following form: @@ -16,7 +17,7 @@ All schedule times are in coordinated universal time (UTC); other timezones are TIP: You can use the <> command line tool to validate your cron expressions. - +[discrete] [[cron-elements]] ==== Cron expression elements @@ -51,6 +52,7 @@ Valid values: `1`-`7`, `SUN`-`SAT`, `sun`-`sat`, and the special characters `,` (Optional) Valid values: `1970`-`2099` and the special characters `,` `-` `*` `/` +[discrete] [[cron-special-characters]] ==== Cron special characters @@ -122,9 +124,11 @@ Friday of the month. Note that if you specify `3#5` and there are not 5 Tuesdays in a particular month, the schedule won't trigger that month. +[discrete] [[cron-expression-examples]] ==== Examples +[discrete] [[cron-example-daily]] ===== Setting daily triggers @@ -134,6 +138,7 @@ Trigger at 9:05 a.m. UTC every day. `0 5 9 * * ? 2020`:: Trigger at 9:05 a.m. UTC every day during the year 2020. +[discrete] [[cron-example-range]] ===== Restricting triggers to a range of days or times @@ -143,6 +148,7 @@ Trigger at 9:05 a.m. UTC Monday through Friday. `0 0-5 9 * * ?`:: Trigger every minute starting at 9:00 a.m. UTC and ending at 9:05 a.m. UTC every day. +[discrete] [[cron-example-interval]] ===== Setting interval triggers @@ -152,6 +158,7 @@ Trigger every 15 minutes starting at 9:00 a.m. UTC and ending at 9:45 a.m. UTC e `0 5 9 1/3 * ?`:: Trigger at 9:05 a.m. UTC every 3 days every month, starting on the first day of the month. +[discrete] [[cron-example-day]] ===== Setting schedules that trigger on a particular day @@ -169,6 +176,7 @@ Trigger at 9:05 a.m. UTC on the nearest weekday to the 15th of every month. `0 5 9 ? * 6#1`:: Trigger at 9:05 a.m. UTC on the first Friday of every month. +[discrete] [[cron-example-last]] ===== Setting triggers using last diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 87888b45d5b40..bd0d4ae6e1ee3 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -11,6 +11,7 @@ We are working on including more {es} APIs in this section. Some content might not be included yet. * <> +* <> * <> * <> * <> @@ -19,9 +20,10 @@ not be included yet. * <> * <> * <> -* <> +* <> * <> * <> +* <> * <> * <> * <> @@ -40,12 +42,14 @@ not be included yet. * <> * <> * <> +* <> * <> * <> * <> -- include::{es-repo-dir}/api-conventions.asciidoc[] +include::{es-repo-dir}/rest-api/common-options.asciidoc[] include::{es-repo-dir}/autoscaling/apis/autoscaling-apis.asciidoc[] include::{es-repo-dir}/cat.asciidoc[] include::{es-repo-dir}/cluster.asciidoc[] @@ -53,6 +57,7 @@ include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/data-streams/data-stream-apis.asciidoc[] include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/ingest/apis/enrich/index.asciidoc[] +include::{es-repo-dir}/eql/eql-apis.asciidoc[] include::{es-repo-dir}/features/apis/features-apis.asciidoc[] include::{es-repo-dir}/fleet/index.asciidoc[] include::{es-repo-dir}/text-structure/apis/find-structure.asciidoc[leveloffset=+1] @@ -75,6 +80,7 @@ include::{es-repo-dir}/searchable-snapshots/apis/searchable-snapshots-apis.ascii include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{es-repo-dir}/snapshot-restore/apis/snapshot-restore-apis.asciidoc[] include::{es-repo-dir}/slm/apis/slm-api.asciidoc[] +include::{es-repo-dir}/sql/apis/sql-apis.asciidoc[] include::{es-repo-dir}/transform/apis/index.asciidoc[] include::usage.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 3ab37721128d9..6884dca24499a 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -142,10 +142,6 @@ Example response: "available" : true, "enabled" : true }, - "vectors" : { - "available" : true, - "enabled" : true - }, "voting_only" : { "available" : true, "enabled" : true diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index 1f98b2ffa0c95..6ce1725a49c1e 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -272,12 +272,6 @@ GET /_xpack/usage "available" : true, "enabled" : true }, - "vectors" : { - "available" : true, - "enabled" : true, - "dense_vector_fields_count" : 0, - "dense_vector_dims_avg_count" : 0 - }, "voting_only" : { "available" : true, "enabled" : true diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 2027c971928d6..98b945b47950f 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -19,8 +19,9 @@ experimental[] [[rollup-get-rollup-index-caps-prereqs]] ==== {api-prereq-title} -* If the {es} {security-features} are enabled, you must have the `read` index -privilege on the index that stores the rollup results. For more information, see +* If the {es} {security-features} are enabled, you must have any of the `read`, +`view_index_metadata`, or `manage` <> +on the index that stores the rollup results. For more information, see <>. [[rollup-get-rollup-index-caps-desc]] @@ -46,7 +47,7 @@ Wildcard (`*`) expressions are supported. ==== {api-examples-title} Imagine we have an index named `sensor-1` full of raw data. We know that the -data will grow over time, so there will be a `sensor-2`, `sensor-3`, etc. +data will grow over time, so there will be a `sensor-2`, `sensor-3`, etc. Let's create a {rollup-job} that stores its data in `sensor_rollup`: [source,console] @@ -145,7 +146,7 @@ original rollup configuration, but formatted differently. First, there are some house-keeping details: the {rollup-job} ID, the index that holds the rolled data, the index pattern that the job was targeting. -Next it shows a list of fields that contain data eligible for rollup searches. +Next it shows a list of fields that contain data eligible for rollup searches. Here we see four fields: `node`, `temperature`, `timestamp` and `voltage`. Each of these fields list the aggregations that are possible. For example, you can use a min, max, or sum aggregation on the `temperature` field, but only a @@ -164,4 +165,3 @@ instead of explicit indices: GET /*_rollup/_rollup/data -------------------------------------------------- // TEST[continued] - diff --git a/docs/reference/scripting/apis/create-stored-script-api.asciidoc b/docs/reference/scripting/apis/create-stored-script-api.asciidoc index 782597781fc8d..a53472ce13ba9 100644 --- a/docs/reference/scripting/apis/create-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/create-stored-script-api.asciidoc @@ -82,34 +82,14 @@ Contains the script or search template, its parameters, and its language. `source`:: (Required, string or object) -Script or search template. +For scripts, a string containing the script. ++ +For search templates, an object containing the search template. The object +supports the same parameters as the <>'s request body. +Also supports https://mustache.github.io/[Mustache] variables. See +<>. `params`:: (Optional, object) Parameters for the script or search template. ==== - -[[create-stored-script-api-example]] -==== {api-examples-title} - -The following request stores a search template. Search templates must use a -`lang` of `mustache`. - -[source,console] ----- -PUT _scripts/my-search-template -{ - "script": { - "lang": "mustache", - "source": { - "from": "{{from}}{{^from}}0{{/from}}", - "size": "{{size}}{{^size}}10{{/size}}", - "query": { - "match": { - "content": "{{query_string}}" - } - } - } - } -} ----- diff --git a/docs/reference/scripting/expression.asciidoc b/docs/reference/scripting/expression.asciidoc index 61301fa873b40..84c07ebecaf31 100644 --- a/docs/reference/scripting/expression.asciidoc +++ b/docs/reference/scripting/expression.asciidoc @@ -137,5 +137,5 @@ e.g. based on geolocation of the user. There are a few limitations relative to other script languages: -* Only numeric, boolean, date, and geo_point fields may be accessed +* Only numeric, `boolean`, `date`, and `geo_point` fields may be accessed * Stored fields are not available diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 354d6af6df32d..a0f57d131d28c 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -4,7 +4,7 @@ Search APIs are used to search and aggregate data stored in {es} indices and data streams. For an overview and related tutorials, see <>. -Most search APIs support <>, with the +Most search APIs support <>, with the exception of the <>. [discrete] @@ -35,20 +35,14 @@ exception of the <>. [[search-template-apis]] === Search templates -* <> +* <> * <> +* <> [discrete] -[[eql-search-apis]] -=== EQL search - -For an overview of EQL and related tutorials, see <>. - -* <> -* <> -* <> -* <> - +[[geo-search-apis]] +=== Geospatial search +* <> include::search/search.asciidoc[] @@ -60,7 +54,11 @@ include::search/scroll-api.asciidoc[] include::search/clear-scroll-api.asciidoc[] -include::search/search-template.asciidoc[] +include::search/search-template-api.asciidoc[] + +include::search/multi-search-template-api.asciidoc[] + +include::search/render-search-template-api.asciidoc[] include::search/search-shards.asciidoc[] @@ -68,14 +66,6 @@ include::search/suggesters.asciidoc[] include::search/multi-search.asciidoc[] -include::eql/eql-search-api.asciidoc[] - -include::eql/get-async-eql-search-api.asciidoc[] - -include::eql/get-async-eql-status-api.asciidoc[] - -include::eql/delete-async-eql-search-api.asciidoc[] - include::search/count.asciidoc[] include::search/validate.asciidoc[] @@ -89,3 +79,5 @@ include::search/profile.asciidoc[] include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] + +include::search/search-vector-tile-api.asciidoc[] diff --git a/docs/reference/search/async-search.asciidoc b/docs/reference/search/async-search.asciidoc index 6af34d2069f5f..e38bf9543f3f5 100644 --- a/docs/reference/search/async-search.asciidoc +++ b/docs/reference/search/async-search.asciidoc @@ -132,6 +132,11 @@ nor search requests that only include the <>. {ccs-cap} is supported only with <> set to `false`. +WARNING: By default, {es} doesn't allow to store an async search response +larger than 10Mb, and an attempt to do this results in an error. The maximum +allowed size for a stored async search response can be set by changing the +`search.max_async_search_response_size` cluster level setting. + [[get-async-search]] ==== Get async search diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index dd96702a6390c..152571a25ec17 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -37,7 +37,7 @@ that query. The query can either be provided using a simple query string as a parameter, or using the <> defined within the request body. -The count API supports <>. You can run a single +The count API supports <>. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard id group, a replica diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc new file mode 100644 index 0000000000000..754132c0e5377 --- /dev/null +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -0,0 +1,163 @@ +[[multi-search-template]] +=== Multi search template API +++++ +Multi search template +++++ + +Runs multiple <> with a single +request. + +//// +[source,console] +---- +PUT _scripts/my-search-template +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}", + "size": "{{size}}" + }, + "params": { + "query_string": "My query string" + } + } +} + +PUT my-index/_doc/1?refresh +{ + "message": "hello world" +} +---- +// TESTSETUP +//// + +[source,console] +---- +GET my-index/_msearch/template +{ } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} +---- +// TEST[s/my-other-search-template/my-search-template/] + +[[multi-search-template-api-request]] +==== {api-request-title} + +`GET /_msearch/template` + +`GET _msearch/template` + +`POST /_msearch/template` + +`POST _msearch/template` + +[[multi-search-template-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the target data stream, index, +or alias. For cross-cluster search, see <>. + +[[multi-search-template-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) Comma-separated list of data streams, indices, and aliases to +search. Supports wildcards (`*`). To search all data streams and indices, omit +this parameter or use `*`. + +[[multi-search-template-api-query-params]] +==== {api-query-parms-title} + +`ccs_minimize_roundtrips`:: +(Optional, Boolean) If `true`, network round-trips are minimized for +cross-cluster search requests. Defaults to `true`. + +`max_concurrent_searches`:: +(Optional, integer) Maximum number of concurrent searches the API can run. +Defaults to +max(1, (# of <> * +min(<>, 10)))+. + +`rest_total_hits_as_int`:: +(Optional, Boolean) If `true`, the response returns `hits.total` as an integer. +If false, it returns `hits.total` as an object. Defaults to `false`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=search_type] + +`typed_keys`:: +(Optional, Boolean) If `true`, the response prefixes aggregation and suggester +names with their respective types. Defaults to `false`. + +[role="child_attributes"] +[[multi-search-template-api-request-body]] +==== {api-request-body-title} + +The request body must be newline-delimited JSON (NDJSON) in the following +format: + +[source,js] +---- +
\n +\n +
\n +\n +---- +// NOTCONSOLE + +Each `
` and `` pair represents a search request. + +The `
` supports the same parameters as the <>'s `
`. The `` supports the same parameters as the +<>'s request body. + +include::multi-search.asciidoc[tag=header-params] + +``:: +(Request, object) Parameters for the search. ++ +===== +include::search-template-api.asciidoc[tag=body-params] +===== + +[[multi-search-template-api-response-codes]] +==== {api-response-codes-title} + +The API returns a `400` status code only if the request itself fails. If one or +more searches in the request fail, the API returns a `200` status code with an +`error` object for each failed search in the response. + +[[multi-search-template-api-response-body]] +==== {api-response-body-title} + +`responses`:: +(array of objects) Results for each search, returned in the order submitted. +Each object uses the same properties as the <>'s +response. ++ +If a search fails, the response includes an `error` object containing an error +message. + +[[multi-search-template-api-curl-requests]] +==== curl requests + +If a providing text file or text input to `curl`, use the `--data-binary` flag +instead of `-d` to preserve newlines. + +[source,sh] +---- +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +---- +// NOTCONSOLE diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 4dab825323bef..563e76f5ab0cc 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -168,9 +168,10 @@ respective types in the response. The request body contains a newline-delimited list of search `
` and search `` objects. +// tag::header-params[] `
`:: (Required, object) -Contains parameters used to limit or change the subsequent search body request. +Parameters used to limit or change the search. + This object is required for each search body but can be empty (`{}`) or a blank line. @@ -235,6 +236,7 @@ Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate. -- ==== +// end::header-params[] ``:: (Optional, object) @@ -335,76 +337,6 @@ all search requests. See <> -[[template-msearch]] -==== Template support - -Much like described in <> for the _search resource, _msearch -also provides support for templates. Submit them like follows for inline -templates: - -[source,console] ------------------------------------------------ -GET _msearch/template -{"index" : "my-index-000001"} -{ "source" : "{ \"query\": { \"match\": { \"message\" : \"{{keywords}}\" } } } }", "params": { "query_type": "match", "keywords": "some message" } } -{"index" : "my-index-000001"} -{ "source" : "{ \"query\": { \"match_{{template}}\": {} } }", "params": { "template": "all" } } ------------------------------------------------ -// TEST[setup:my_index] - - -You can also create search templates: - -[source,console] ------------------------------------------- -POST /_scripts/my_template_1 -{ - "script": { - "lang": "mustache", - "source": { - "query": { - "match": { - "message": "{{query_string}}" - } - } - } - } -} ------------------------------------------- -// TEST[setup:my_index] - - -[source,console] ------------------------------------------- -POST /_scripts/my_template_2 -{ - "script": { - "lang": "mustache", - "source": { - "query": { - "term": { - "{{field}}": "{{value}}" - } - } - } - } -} ------------------------------------------- -// TEST[continued] - -You can use search templates in a _msearch: - -[source,console] ------------------------------------------------ -GET _msearch/template -{"index" : "main"} -{ "id": "my_template_1", "params": { "query_string": "some message" } } -{"index" : "main"} -{ "id": "my_template_2", "params": { "field": "user", "value": "test" } } ------------------------------------------------ -// TEST[continued] - - [[multi-search-partial-responses]] ==== Partial responses diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 048a36d1803b7..0fa13008c4396 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -129,3 +129,63 @@ The API returns the following response: <1> If true, all search contexts associated with the point-in-time id are successfully closed <2> The number of search contexts have been successfully closed + +[discrete] +[[search-slicing]] +=== Search slicing + +When paging through a large number of documents, it can be helpful to split the search into multiple slices +to consume them independently: + +[source,console] +-------------------------------------------------- +GET /_search +{ + "slice": { + "id": 0, <1> + "max": 2 <2> + }, + "query": { + "match": { + "message": "foo" + } + }, + "pit": { + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + } +} + +GET /_search +{ + "slice": { + "id": 1, + "max": 2 + }, + "pit": { + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + }, + "query": { + "match": { + "message": "foo" + } + } +} +-------------------------------------------------- +// TEST[skip:both calls will throw errors] + +<1> The id of the slice +<2> The maximum number of slices + +The result from the first request returns documents belonging to the first slice (id: 0) and the +result from the second request returns documents in the second slice. Since the maximum number of +slices is set to 2 the union of the results of the two requests is equivalent to the results of a +point-in-time search without slicing. By default the splitting is done first on the shards, then +locally on each shard. The local splitting partitions the shard into contiguous ranges based on +Lucene document IDs. + +For instance if the number of shards is equal to 2 and the user requested 4 slices then the slices +0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, +then slices can overlap and miss documents. This is because the splitting criterion is based on +Lucene document IDs, which are not stable across changes to the index. diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 82acb624f42fc..2bb7640c9288e 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -805,7 +805,8 @@ This yields the following aggregation profile output: }, "debug": { "total_buckets": 1, - "result_strategy": "long_terms" + "result_strategy": "long_terms", + "built_buckets": 1 } }, { @@ -826,6 +827,9 @@ This yields the following aggregation profile output: "post_collection": 1584, "post_collection_count": 1 }, + "debug": { + "built_buckets": 1 + }, "children": [ { "type": "NumericTermsAggregator", @@ -847,7 +851,8 @@ This yields the following aggregation profile output: }, "debug": { "total_buckets": 1, - "result_strategy": "long_terms" + "result_strategy": "long_terms", + "built_buckets": 1 } } ] diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index f2b45990e8837..4999776c93f6f 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -205,7 +205,7 @@ GET /my-index-000001/_rank_eval <3> a reference to a previously defined template <4> the parameters to use to fill the template -It is also possible to use <> in the cluster state by referencing their id in the templates section. +You can also use a <>. [source,js] -------------------------------- diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc new file mode 100644 index 0000000000000..1f259dddf6879 --- /dev/null +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -0,0 +1,89 @@ +[[render-search-template-api]] +=== Render search template API +++++ +Render search template +++++ + +Renders a <> as a <>. + +//// +[source,console] +---- +PUT _scripts/my-search-template +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}", + "size": "{{size}}" + }, + "params": { + "query_string": "My query string" + } + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST _render/template +{ + "id": "my-search-template", + "params": { + "query_string": "hello world", + "from": 20, + "size": 10 + } +} +---- + +[[render-search-template-api-request]] +==== {api-request-title} + +`GET _render/template` + +`GET _render/template/` + +`POST _render/template` + +`POST _render/template/` + +[[render-search-template-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for at least one index pattern. + +[[render-search-template-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required*, string) ID of the search template to render. If no `source` is +specified, this or the `id` request body parameter is required. + +[[render-search-template-api-request-body]] +==== {api-request-body-title} + +`id`:: +(Required*, string) ID of the search template to render. If no `source` is +specified, this or the `` request path parameter is required. If +you specify both this parameter and the `` parameter, the API uses +only ``. + +`params`:: +(Optional, object) Key-value pairs used to replace Mustache variables in the +template. The key is the variable name. The value is the variable value. + +`source`:: +(Required*, object) An inline search template. Supports the same parameters as +the <>'s request body. These parameters also support +https://mustache.github.io/[Mustache] variables. If no `id` or `` +is specified, this parameter is required. diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc new file mode 100644 index 0000000000000..d737930956ca3 --- /dev/null +++ b/docs/reference/search/search-template-api.asciidoc @@ -0,0 +1,148 @@ +[[search-template-api]] +=== Search template API +++++ +Search template +++++ + +Runs a search with a <>. + +//// +[source,console] +---- +PUT _scripts/my-search-template +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}", + "size": "{{size}}" + }, + "params": { + "query_string": "My query string" + } + } +} + +PUT my-index/_doc/1?refresh +{ + "message": "hello world" +} +---- +// TESTSETUP +//// + +[source,console] +---- +GET my-index/_search/template +{ + "id": "my-search-template", + "params": { + "query_string": "hello world", + "from": 0, + "size": 10 + } +} +---- + +[[search-template-api-request]] +==== {api-request-title} + +`GET /_search/template` + +`GET _search/template` + +`POST /_search/template` + +`POST _search/template` + +[[search-template-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the target data stream, index, +or alias. For cross-cluster search, see <>. + +[[search-template-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) Comma-separated list of data streams, indices, and aliases to +search. Supports wildcards (`*`). To search all data streams and indices, omit +this parameter or use `*`. + +[[search-template-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] ++ +Defaults to `true`. + +`ccs_minimize_roundtrips`:: +(Optional, Boolean) If `true`, network round-trips are minimized for +cross-cluster search requests. Defaults to `true`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + +`explain`:: +(Optional, Boolean) If `true`, the response includes additional details about +score computation as part of a hit. Defaults to `false`. + +`ignore_throttled`:: +(Optional, Boolean) If `true`, specified concrete, expanded, or aliased indices +are not included in the response when throttled. Defaults to `true`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=preference] + +`rest_total_hits_as_int`:: +(Optional, Boolean) If `true`, the response returns `hits.total` as an integer. +If false, it returns `hits.total` as an object. Defaults to `false`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=scroll] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=search_type] + +`typed_keys`:: +(Optional, Boolean) If `true`, the response prefixes aggregation and suggester +names with their respective types. Defaults to `false`. + +[[search-template-api-request-body]] +==== {api-request-body-title} + +// tag::body-params[] +`explain`:: +(Optional, Boolean) If `true`, returns detailed information about score +calculation as part of each hit. Defaults to `false`. +// end::body-params[] ++ +If you specify both this and the `explain` query parameter, the API uses only +the query parameter. + +// tag::body-params[] +`id`:: +(Required*, string) ID of the search template to use. If no `source` is +specified, this parameter is required. + +`params`:: +(Optional, object) Key-value pairs used to replace Mustache variables in the +template. The key is the variable name. The value is the variable value. + +`profile`:: +(Optional, Boolean) If `true`, the query execution is profiled. Defaults to +`false`. + +`source`:: +(Required*, object) An inline search template. Supports the same parameters as +the <>'s request body. Also supports +https://mustache.github.io/[Mustache] variables. ++ +If no `id` is specified, this parameter is required. +// end::body-params[] diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc deleted file mode 100644 index 7d6c3ea39402a..0000000000000 --- a/docs/reference/search/search-template.asciidoc +++ /dev/null @@ -1,725 +0,0 @@ -[[search-template]] -=== Search template API -++++ -Search template -++++ - -Allows you to use the mustache language to pre render search requests. - -[source,console] ------------------------------------------- -GET _search/template -{ - "source" : { - "query": { "match" : { "{{my_field}}" : "{{my_value}}" } }, - "size" : "{{my_size}}" - }, - "params" : { - "my_field" : "message", - "my_value" : "foo", - "my_size" : 5 - } -} ------------------------------------------- -// TEST[setup:my_index] - -[[search-template-api-request]] -==== {api-request-title} - -`GET _search/template` - -[[search-template-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `read` -<> for the target data stream, index, -or alias. For cross-cluster search, see <>. - -[[search-template-api-desc]] -==== {api-description-title} - -The `/_search/template` endpoint allows you to use the mustache language to pre- -render search requests, before they are executed and fill existing templates -with template parameters. - -For more information on how Mustache templating and what kind of templating you -can do with it check out the https://mustache.github.io/mustache.5.html[online -documentation of the mustache project]. - -NOTE: The mustache language is implemented in {es} as a sandboxed scripting -language, hence it obeys settings that may be used to enable or disable scripts -per type and context as described in the -<>. - - -[[search-template-api-path-params]] -==== {api-path-parms-title} - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index] - - -[[search-template-api-query-params]] -==== {api-query-parms-title} - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] -+ -Defaults to `true`. - -`ccs_minimize_roundtrips`:: - (Optional, Boolean) If `true`, network round-trips are minimized for - cross-cluster search requests. Defaults to `true`. - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] - -`explain`:: - (Optional, Boolean) If `true`, the response includes additional details about - score computation as part of a hit. Defaults to `false`. - -`ignore_throttled`:: - (Optional, Boolean) If `true`, specified concrete, expanded or aliased indices - are not included in the response when throttled. Defaults to `true`. - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=preference] - -`profile`:: - (Optional, Boolean) If `true`, the query execution is profiled. Defaults - to `false`. - -`rest_total_hits_as_int`:: - (Optional, Boolean) If `true`, `hits.total` are rendered as an integer in - the response. Defaults to `false`. - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=routing] - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=scroll] - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=search_type] - -`typed_keys`:: - (Optional, Boolean) If `true`, aggregation and suggester names are - prefixed by their respective types in the response. Defaults to `false`. - - -[[search-template-api-request-body]] -==== {api-request-body-title} - -The API request body must contain the search definition template and its parameters. - - -[[search-template-api-example]] -==== {api-examples-title} - - -[[pre-registered-templates]] -===== Store a search template - -To store a search template, use the <>. Specify `mustache` as the `lang`. - -[source,console] ------------------------------------------- -POST _scripts/ -{ - "script": { - "lang": "mustache", - "source": { - "query": { - "match": { - "title": "{{query_string}}" - } - } - } - } -} ------------------------------------------- -// TEST[continued] - -////////////////////////// - -The API returns the following result if the template has been successfully -created: - -[source,console-result] --------------------------------------------------- -{ - "acknowledged" : true -} --------------------------------------------------- - -////////////////////////// - - -To retrieve the template, use the <>. - -[source,console] ------------------------------------------- -GET _scripts/ ------------------------------------------- -// TEST[continued] - -The API returns: - -[source,console-result] ------------------------------------------- -{ - "script" : { - "lang" : "mustache", - "source" : """{"query":{"match":{"title":"{{query_string}}"}}}""", - "options": { - "content_type" : "application/json;charset=utf-8" - } - }, - "_id": "", - "found": true -} ------------------------------------------- - -To delete the template, use the <>. - -[source,console] ------------------------------------------- -DELETE _scripts/ ------------------------------------------- -// TEST[continued] - -[[use-registered-templates]] -===== Using a stored search template - -To use a stored template at search time send the following request: - -[source,console] ------------------------------------------- -GET _search/template -{ - "id": "", <1> - "params": { - "query_string": "search for these words" - } -} ------------------------------------------- -// TEST[catch:missing] -<1> Name of the stored template script. - - -[[_validating_templates]] -==== Validating a search template - -A template can be rendered in a response with given parameters by using the -following request: - -[source,console] ------------------------------------------- -GET _render/template -{ - "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", - "params": { - "statuses" : { - "status": [ "pending", "published" ] - } - } -} ------------------------------------------- - - -The API returns the rendered template: - -[source,console-result] ------------------------------------------- -{ - "template_output": { - "query": { - "terms": { - "status": [ <1> - "pending", - "published" - ] - } - } - } -} ------------------------------------------- - -<1> `status` array has been populated with values from the `params` object. - - -Stored templates can also be rendered by calling the following request: - -[source,js] ------------------------------------------- -GET _render/template/ -{ - "params": { - "..." - } -} ------------------------------------------- -// NOTCONSOLE - -[[search-template-explain-parameter]] -===== Using the explain parameter - -You can use the `explain` parameter when running a template: - -[source,console] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "explain": true -} ------------------------------------------- -// TEST[catch:missing] - - -[[search-template-profile-parameter]] -===== Profiling - -You can use the `profile` parameter when running a template: - -[source,console] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "profile": true -} ------------------------------------------- -// TEST[catch:missing] - - -[[search-template-query-string-single]] -===== Filling in a query string with a single value - -[source,console] ------------------------------------------- -GET _search/template -{ - "source": { - "query": { - "term": { - "message": "{{query_string}}" - } - } - }, - "params": { - "query_string": "search for these words" - } -} ------------------------------------------- -// TEST[setup:my_index] - -[[search-template-converting-to-json]] -===== Converting parameters to JSON - -The `{{#toJson}}parameter{{/toJson}}` function can be used to convert parameters -like maps and array to their JSON representation: - -[source,console] ------------------------------------------- -GET _search/template -{ - "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", - "params": { - "statuses" : { - "status": [ "pending", "published" ] - } - } -} ------------------------------------------- - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "query": { - "terms": { - "status": [ - "pending", - "published" - ] - } - } -} ------------------------------------------- -// NOTCONSOLE - -A more complex example substitutes an array of JSON objects: - -[source,console] ------------------------------------------- -GET _search/template -{ - "source": "{\"query\":{\"bool\":{\"must\": {{#toJson}}clauses{{/toJson}} }}}", - "params": { - "clauses": [ - { "term": { "user" : "foo" } }, - { "term": { "user" : "bar" } } - ] - } -} ------------------------------------------- - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "query": { - "bool": { - "must": [ - { - "term": { - "user": "foo" - } - }, - { - "term": { - "user": "bar" - } - } - ] - } - } -} ------------------------------------------- -// NOTCONSOLE - -[[search-template-concatenate-array]] -===== Concatenating array of values - -The `{{#join}}array{{/join}}` function can be used to concatenate the -values of an array as a comma delimited string: - -[source,console] ------------------------------------------- -GET _search/template -{ - "source": { - "query": { - "match": { - "emails": "{{#join}}emails{{/join}}" - } - } - }, - "params": { - "emails": [ "username@email.com", "lastname@email.com" ] - } -} ------------------------------------------- - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "query" : { - "match" : { - "emails" : "username@email.com,lastname@email.com" - } - } -} ------------------------------------------- -// NOTCONSOLE - -The function also accepts a custom delimiter: - -[source,console] ------------------------------------------- -GET _search/template -{ - "source": { - "query": { - "range": { - "born": { - "gte" : "{{date.min}}", - "lte" : "{{date.max}}", - "format": "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}" - } - } - } - }, - "params": { - "date": { - "min": "2016", - "max": "31/12/2017", - "formats": ["dd/MM/yyyy", "yyyy"] - } - } -} ------------------------------------------- - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "query": { - "range": { - "born": { - "gte": "2016", - "lte": "31/12/2017", - "format": "dd/MM/yyyy||yyyy" - } - } - } -} - ------------------------------------------- -// NOTCONSOLE - -[[search-template-default-values]] -===== Default values - -A default value is written as `{{var}}{{^var}}default{{/var}}` for instance: - -[source,js] ------------------------------------------- -{ - "source": { - "query": { - "range": { - "line_no": { - "gte": "{{start}}", - "lte": "{{end}}{{^end}}20{{/end}}" - } - } - } - }, - "params": { ... } -} ------------------------------------------- -// NOTCONSOLE - -When `params` is `{ "start": 10, "end": 15 }` this query would be rendered as: - -[source,js] ------------------------------------------- -{ - "range": { - "line_no": { - "gte": "10", - "lte": "15" - } - } -} ------------------------------------------- -// NOTCONSOLE - -But when `params` is `{ "start": 10 }` this query would use the default value -for `end`: - -[source,js] ------------------------------------------- -{ - "range": { - "line_no": { - "gte": "10", - "lte": "20" - } - } -} ------------------------------------------- -// NOTCONSOLE - -[[search-template-conditional-clauses]] -===== Conditional clauses - -Conditional clauses cannot be expressed using the JSON form of the template. -Instead, the template *must* be passed as a string. For instance, let's say -we wanted to run a `match` query on the `line` field, and optionally wanted -to filter by line numbers, where `start` and `end` are optional. - -The `params` would look like: - -[source,js] ------------------------------------------- -{ - "params": { - "text": "words to search for", - "line_no": { <1> - "start": 10, - "end": 20 - } - } -} ------------------------------------------- -// NOTCONSOLE -<1> The `line_no`, `start`, and `end` parameters are optional. - -When written as a query, the template would include invalid JSON, such as -section markers like `{{#line_no}}`: - -[source,js] ------------------------------------------- -{ - "query": { - "bool": { - "must": { - "match": { - "line": "{{text}}" <1> - } - }, - "filter": { - {{#line_no}} <2> - "range": { - "line_no": { - {{#start}} <3> - "gte": "{{start}}" <4> - {{#end}},{{/end}} <5> - {{/start}} - {{#end}} <6> - "lte": "{{end}}" <7> - {{/end}} - } - } - {{/line_no}} - } - } - } -} ------------------------------------------- -// NOTCONSOLE -<1> Fill in the value of param `text` -<2> Include the `range` filter only if `line_no` is specified -<3> Include the `gte` clause only if `line_no.start` is specified -<4> Fill in the value of param `line_no.start` -<5> Add a comma after the `gte` clause only if `line_no.start` - AND `line_no.end` are specified -<6> Include the `lte` clause only if `line_no.end` is specified -<7> Fill in the value of param `line_no.end` - -Because search templates cannot include invalid JSON, you can pass the same -query as a string instead: - -[source,js] --------------------- -"source": "{\"query\":{\"bool\":{\"must\":{\"match\":{\"line\":\"{{text}}\"}},\"filter\":{{{#line_no}}\"range\":{\"line_no\":{{{#start}}\"gte\":\"{{start}}\"{{#end}},{{/end}}{{/start}}{{#end}}\"lte\":\"{{end}}\"{{/end}}}}{{/line_no}}}}}}" --------------------- -// NOTCONSOLE - - -[[search-template-encode-urls]] -===== Encoding URLs - -The `{{#url}}value{{/url}}` function can be used to encode a string value -in a HTML encoding form as defined in by the -https://www.w3.org/TR/html4/[HTML specification]. - -As an example, it is useful to encode a URL: - -[source,console] ------------------------------------------- -GET _render/template -{ - "source": { - "query": { - "term": { - "http_access_log": "{{#url}}{{host}}/{{page}}{{/url}}" - } - } - }, - "params": { - "host": "https://www.elastic.co/", - "page": "learn" - } -} ------------------------------------------- - - -The previous query will be rendered as: - -[source,console-result] ------------------------------------------- -{ - "template_output": { - "query": { - "term": { - "http_access_log": "https%3A%2F%2Fwww.elastic.co%2F%2Flearn" - } - } - } -} ------------------------------------------- - - -[[multi-search-template]] -=== Multi search template API -++++ -Multi search template -++++ - -Allows to execute several search template requests. - -[[multi-search-template-api-request]] -==== {api-request-title} - -`GET _msearch/template` - -[[multi-search-template-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `read` -<> for the target data stream, index, -or alias. For cross-cluster search, see <>. - -[[multi-search-template-api-desc]] -==== {api-description-title} - -Allows to execute several search template requests within the same API using the -`_msearch/template` endpoint. - -The format of the request is similar to the <> format: - -[source,js] --------------------------------------------------- -header\n -body\n -header\n -body\n --------------------------------------------------- -// NOTCONSOLE - -The header part supports the same `index`, `search_type`, `preference`, and -`routing` options as the Multi Search API. - -The body includes a search template body request and supports inline, stored and -file templates. - - -[[multi-search-template-api-example]] -==== {api-examples-title} - -[source,js] --------------------------------------------------- -$ cat requests -{"index": "test"} -{"source": {"query": {"match": {"user" : "{{username}}" }}}, "params": {"username": "john"}} <1> -{"source": {"query": {"{{query_type}}": {"name": "{{name}}" }}}, "params": {"query_type": "match_phrase_prefix", "name": "Smith"}} -{"index": "_all"} -{"id": "template_1", "params": {"query_string": "search for these words" }} <2> - -$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo --------------------------------------------------- -// NOTCONSOLE -// Not converting to console because this shows how curl works -<1> Inline search template request - -<2> Search template request based on a stored template - -The response returns a `responses` array, which includes the search template -response for each search template request matching its order in the original -multi search template request. If there was a complete failure for that specific -search template request, an object with `error` message will be returned in -place of the actual search response. diff --git a/docs/reference/search/search-vector-tile-api.asciidoc b/docs/reference/search/search-vector-tile-api.asciidoc new file mode 100644 index 0000000000000..c280c73768e0a --- /dev/null +++ b/docs/reference/search/search-vector-tile-api.asciidoc @@ -0,0 +1,755 @@ +[[search-vector-tile-api]] +=== Vector tile search API +++++ +Vector tile search +++++ + +experimental::[] + +Searches a vector tile for geospatial values. Returns results as a binary +https://docs.mapbox.com/vector-tiles/specification[Mapbox vector tile]. + +//// +[source,console] +---- +PUT my-index +{ + "mappings": { + "properties": { + "my-geo-field": { + "type": "geo_point" + } + } + } +} + +PUT my-index/_doc/0?refresh +{ + "my-geo-field": "37.3864953,-122.0863176" +} +---- +//// + +[source,console] +---- +GET my-index/_mvt/my-geo-field/15/5271/12710 +---- +// TEST[continued] + +[[search-vector-tile-api-request]] +==== {api-request-title} + +`GET /_mvt////` + +`POST /_mvt////` + +[[search-vector-tile-api-prereqs]] +==== {api-prereq-title} + +* Before using this API, you should be familiar with the +https://github.com/mapbox/vector-tile-spec[Mapbox vector tile specification]. + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the target data stream, index, +or alias. + +[[search-vector-tile-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Comma-separated list of data streams, indices, or aliases to +search. Supports wildcards (`*`). To search all data streams and indices, omit +this parameter or use `*` or `_all`. + +``:: +(Required, string) Field containing geospatial values to return. Must be a +<> or <> field. The field must +have <> enabled. Cannot be a nested field. ++ +NOTE: Vector tiles do not natively support geometry collections. For +`geometrycollection` values in a `geo_shape` field, the API returns a `hits` +layer feature for each element of the collection. This behavior may change may +change in a future release. + +``:: +(Required, integer) Zoom level for the vector tile to search. Accepts `0`-`29`. + +``:: +(Required, integer) X coordinate for the vector tile to search. + +``:: +(Required, integer) Y coordinate for the vector tile to search. + +[[search-vector-tile-api-desc]] +==== {api-description-title} + +Internally, {es} translates a vector tile search API request into a +<> containing: + +* A <> query on the +``. The query uses the `//` tile as a bounding box. + +* A <> +aggregation on the ``. The aggregation uses the `//` tile as +a bounding box. + +* Optionally, a +<> aggregation +on the ``. The search only includes this aggregation if the +`exact_bounds` parameter is `true`. + +For example, {es} may translate a vector tile search API request with an +`exact_bounds` argument of `true` into the following search: + +[source,console] +---- +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +---- +// TEST[continued] + +The API returns results as a binary +https://github.com/mapbox/vector-tile-spec[Mapbox vector tile]. Mapbox vector +tiles are encoded as https://github.com/protocolbuffers/protobuf[Google +Protobufs (PBF)]. By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the +`geo_bounding_box` query. + +* An `aggs` layer containing a feature for each cell of the `geotile_grid`. You +can use these cells as tiles for lower zoom levels. The layer only contains +features for cells with matching data. + +* A `meta` layer containing: +** A feature containing a bounding box. By default, this is the bounding box of +the tile. +** Value ranges for any sub-aggregations on the `geotile_grid`. +** Metadata for the search. + +The API only returns features that can display at its zoom level. For example, +if a polygon feature has no area at its zoom level, the API omits it. + +The API returns errors as UTF-8 encoded JSON. + +[[search-vector-tile-api-query-params]] +==== {api-query-parms-title} + +IMPORTANT: You can specify several options for this API as either a query +parameter or request body parameter. If you specify both parameters, the query +parameter takes precedence. + +// tag::exact-bounds[] +`exact_bounds`:: +(Optional, Boolean) +If `false`, the `meta` layer's feature is the bounding box of the tile. Defaults +to `false`. ++ +If `true`, the `meta` layer's feature is a bounding box resulting from a +<> aggregation. +The aggregation runs on `` values that intersect the `//` +tile with `wrap_longitude` set to `false`. The resulting bounding box may be +larger than the vector tile. +// end::exact-bounds[] + +// tag::extent-param[] +`extent`:: +(Optional, integer) Size, in pixels, of a side of the tile. Vector tiles are +square with equal sides. Defaults to `4096`. +// end::extent-param[] + +// tag::grid-precision[] +`grid_precision`:: +(Optional, integer) Additional zoom levels available through the `aggs` layer. +For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to +level 15. Accepts `0`-`8`. Defaults to `8`. If `0`, results don't include the +`aggs` layer. ++ +This value determines the grid size of the `geotile_grid` as follows: ++ +`(2^grid_precision) x (2^grid_precision)` ++ +For example, a value of `8` divides the tile into a grid of 256 x 256 cells. The +`aggs` layer only contains features for cells with matching data. +// end::grid-precision[] + +// tag::grid-type[] +`grid_type`:: +(Optional, string) Determines the geometry type for features in the `aggs` +layer. In the `aggs` layer, each feature represents a `geotile_grid` cell. +Accepts: + +`grid` (Default)::: +Each feature is a `Polygon` of the cell's bounding box. + +`point`::: +Each feature is a `Point` that's the centroid of the cell. +// end::grid-type[] + +// tag::size[] +`size`:: +(Optional, integer) Maximum number of features to return in the `hits` layer. +Accepts `0`-`10000`. Defaults to `10000`. If `0`, results don't include the +`hits` layer. +// end::size[] + +[role="child_attributes"] +[[search-vector-tile-api-request-body]] +==== {api-request-body-title} + +`aggs`:: +(Optional, <>) +<> for the `geotile_grid`. Supports the following +aggregation types: ++ +* <> +* <> +* <> +* <> +* <> + +include::search-vector-tile-api.asciidoc[tag=exact-bounds] + +include::search-vector-tile-api.asciidoc[tag=extent-param] + +`fields`:: +(Optional, array of strings and objects) Fields to return in the `hits` layer. +Supports wildcards (`*`). ++ +This parameter does not support fields with <>. Fields with +array values may return inconsistent results. ++ +You can specify fields in the array as a string or object. ++ +.Properties of `fields` objects +[%collapsible%open] +==== +include::search.asciidoc[tag=fields-api-props] +==== + +include::search-vector-tile-api.asciidoc[tag=grid-precision] + +include::search-vector-tile-api.asciidoc[tag=grid-type] + +`query`:: +(Optional, object) <> used to filter documents for the +search. + +include::{es-repo-dir}/search/search.asciidoc[tag=runtime-mappings-def] + +include::search-vector-tile-api.asciidoc[tag=size] + +`sort`:: +(Optional, array of <>) Sorts features in the +`hits` layer. ++ +By default, the API calculates a bounding box for each feature. It sorts +features based on this box's diagonal length, from longest to shortest. + +[role="child_attributes"] +[[search-vector-tile-api-response]] +==== Response + +Returned vector tiles contain the following data: + +`hits`:: +(object) Layer containing results for the `geo_bounding_box` query. ++ +.Properties of `hits` +[%collapsible%open] +==== +// tag::extent[] +`extent`:: +(integer) Size, in pixels, of a side of the tile. Vector tiles are square with +equal sides. +// end::extent[] + +// tag::version[] +`version`:: +(integer) Major version number of the +https://github.com/mapbox/vector-tile-spec[Mapbox vector tile specification]. +// end::version[] + +`features`:: +(array of objects) Array of features. Contains a feature for each `` +value that matches the `geo_bounding_box` query. ++ +.Properties of `features` objects +[%collapsible%open] +===== +// tag::geometry[] +`geometry`:: +(object) Geometry for the feature. ++ +.Properties of `geometry` +[%collapsible%open] +====== +`type`:: +(string) Geometry type for the feature. Valid values are: + +* `UNKNOWN` +* `POINT` +* `LINESTRING` +* `POLYGON` + +`coordinates`:: +(array of integers or array of arrays) Tile coordinates for the feature. +====== +// end::geometry[] + +`properties`:: +(object) Properties for the feature. ++ +.Properties of `properties` +[%collapsible%open] +====== +`_id`:: +(string) Document `_id` for the feature's document. + +``:: +Field value. Only returned for fields in the `fields` parameter. +====== +// tag::feature-id[] +`id`:: +(integer) Unique ID for the feature within the layer. +// end::feature-id[] + +// tag::feature-type[] +`type`:: +(integer) Identifier for the feature's geometry type. Values are: ++ +* `1` (`POINT`) +* `2` (`LINESTRING`) +* `3` (`POLYGON`) +// end::feature-type[] +===== +==== + +`aggs`:: +(object) Layer containing results for the `geotile_grid` aggregation and its +sub-aggregations. ++ +.Properties of `aggs` +[%collapsible%open] +==== +include::search-vector-tile-api.asciidoc[tag=extent] + +include::search-vector-tile-api.asciidoc[tag=version] + +`features`:: +(array of objects) Array of features. Contains a feature for each cell of the +`geotile_grid`. ++ +.Properties of `features` objects +[%collapsible%open] +===== +include::search-vector-tile-api.asciidoc[tag=geometry] + +`properties`:: +(object) Properties for the feature. ++ +.Properties of `properties` +[%collapsible%open] +====== +`_count`:: +(string) Count of the cell's documents. + +`.value`:: +Sub-aggregation results for the cell. Only returned for sub-aggregations in the +`aggs` parameter. +====== +include::search-vector-tile-api.asciidoc[tag=feature-id] + +include::search-vector-tile-api.asciidoc[tag=feature-type] +===== +==== + +`meta`:: +(object) Layer containing metadata for the request. ++ +.Properties of `meta` +[%collapsible%open] +==== +include::search-vector-tile-api.asciidoc[tag=extent] + +include::search-vector-tile-api.asciidoc[tag=version] + +`features`:: +(array of objects) Contains a feature for a bounding box. ++ +.Properties of `features` objects +[%collapsible%open] +===== +include::search-vector-tile-api.asciidoc[tag=geometry] + +`properties`:: +(object) Properties for the feature. ++ +.Properties of `properties` +[%collapsible%open] +====== +`_shards.failed`:: +(integer) Number of shards that failed to execute the search. See the search +API's <> response property. + +`_shards.skipped`:: +(integer) Number of shards that skipped the search. See the search +API's <> response property. + +`_shards.successful`:: +(integer) Number of shards that executed the search successfully. See the +search API's <> response property. + +`_shards.total`:: +(integer) Total number of shards that required querying, including unallocated +shards. See the search API's <> response property. + +`aggregations._count.avg`:: +(float) Average `_count` value for features in the `aggs` layer. + +`aggregations._count.count`:: +(integer) Number of unique `_count` values for features in the `aggs` layer. + +`aggregations._count.max`:: +(float) Largest `_count` value for features in the `aggs` layer. + +`aggregations._count.min`:: +(float) Smallest `_count` value for features in the `aggs` layer. + +`aggregations._count.sum`:: +(float) Sum of `_count` values for features in the `aggs` layer. + +`aggregations..avg`:: +(float) Average value for the sub-aggregation's results. + +`aggregations..count`:: +(integer) Number of unique values from the sub-aggregation's results. + +`aggregations..max`:: +(float) Largest value from the sub-aggregation's results. + +`aggregations..min`:: +(float) Smallest value from the sub-aggregation's results. + +`aggregations..sum`:: +(float) Sum of values for the sub-aggregation's results. + +`hits.max_score`:: +(float) Highest document `_score` for the search's hits. + +`hits.total.relation`:: +(string) Indicates whether `hits.total.value` is accurate or a lower bound. +Possible values are: + +`eq`::: Accurate + +`gte`::: Lower bound + +`hits.total.value`:: +(integer) Total number of hits for the search. + +`timed_out`:: +(Boolean) If `true`, the search timed out before completion. Results may be +partial or empty. + +`took`:: +(integer) Milliseconds it took {es} to run the search. See the search API's +<> response property. +====== +include::search-vector-tile-api.asciidoc[tag=feature-id] + +include::search-vector-tile-api.asciidoc[tag=feature-type] +===== +==== + +[[search-vector-tile-api-api-example]] +==== {api-examples-title} + +The following requests create the `museum` index and add several geospatial +`location` values. + +[source,console] +---- +PUT museums +{ + "mappings": { + "properties": { + "location": { + "type": "geo_point" + }, + "name": { + "type": "keyword" + }, + "price": { + "type": "long" + }, + "included": { + "type": "boolean" + } + } + } +} + +POST museums/_bulk?refresh +{ "index": { "_id": "1" } } +{ "location": "52.374081,4.912350", "name": "NEMO Science Museum", "price": 1750, "included": true } +{ "index": { "_id": "2" } } +{ "location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis", "price": 1500, "included": false } +{ "index": { "_id": "3" } } +{ "location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum", "price":1650, "included": true } +{ "index": { "_id": "4" } } +{ "location": "52.371667,4.914722", "name": "Amsterdam Centre for Architecture", "price":0, "included": true } +---- + +The following request searches the index for `location` values that intersect +the `13/4207/2692` vector tile. + +[source,console] +---- +GET museums/_mvt/location/13/4207/2692 +{ + "grid_precision": 2, + "fields": [ + "name", + "price" + ], + "query": { + "term": { + "included": true + } + }, + "aggs": { + "min_price": { + "min": { + "field": "price" + } + }, + "max_price": { + "max": { + "field": "price" + } + }, + "avg_price": { + "avg": { + "field": "price" + } + } + } +} +---- +// TEST[continued] + +The API returns results as a binary vector tile. When decoded into JSON, the +tile contains the following data: + +[source,js] +---- +{ + "hits": { + "extent": 4096, + "version": 2, + "features": [ + { + "geometry": { + "type": "Point", + "coordinates": [ + 3208, + 3864 + ] + }, + "properties": { + "_id": "1", + "name": "NEMO Science Museum", + "price": 1750 + }, + "id": 0, + "type": 1 + }, + { + "geometry": { + "type": "Point", + "coordinates": [ + 3429, + 3496 + ] + }, + "properties": { + "_id": "3", + "name": "Nederlands Scheepvaartmuseum", + "price": 1650 + }, + "id": 0, + "type": 1 + }, + { + "geometry": { + "type": "Point", + "coordinates": [ + 3429, + 3496 + ] + }, + "properties": { + "_id": "4", + "name": "Amsterdam Centre for Architecture", + "price": 0 + }, + "id": 0, + "type": 1 + } + ] + }, + "aggs": { + "extent": 4096, + "version": 2, + "features": [ + { + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 3072, + 3072 + ], + [ + 4096, + 3072 + ], + [ + 4096, + 4096 + ], + [ + 3072, + 4096 + ], + [ + 3072, + 3072 + ] + ] + ] + }, + "properties": { + "_count": 3, + "max_price.value": 1750.0, + "min_price.value": 0.0, + "avg_price.value": 1133.3333333333333 + }, + "id": 0, + "type": 3 + } + ] + }, + "meta": { + "extent": 4096, + "version": 2, + "features": [ + { + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 4096, + 0 + ], + [ + 4096, + 4096 + ], + [ + 0, + 4096 + ], + [ + 0, + 0 + ] + ] + ] + }, + "properties": { + "_shards.failed": 0, + "_shards.skipped": 0, + "_shards.successful": 1, + "_shards.total": 1, + "aggregations._count.avg": 3.0, + "aggregations._count.count": 1, + "aggregations._count.max": 3.0, + "aggregations._count.min": 3.0, + "aggregations._count.sum": 3.0, + "aggregations.avg_price.avg": 1133.3333333333333, + "aggregations.avg_price.count": 1, + "aggregations.avg_price.max": 1133.3333333333333, + "aggregations.avg_price.min": 1133.3333333333333, + "aggregations.avg_price.sum": 1133.3333333333333, + "aggregations.max_price.avg": 1750.0, + "aggregations.max_price.count": 1, + "aggregations.max_price.max": 1750.0, + "aggregations.max_price.min": 1750.0, + "aggregations.max_price.sum": 1750.0, + "aggregations.min_price.avg": 0.0, + "aggregations.min_price.count": 1, + "aggregations.min_price.max": 0.0, + "aggregations.min_price.min": 0.0, + "aggregations.min_price.sum": 0.0, + "hits.max_score": 0.0, + "hits.total.relation": "eq", + "hits.total.value": 3, + "timed_out": false, + "took": 2 + }, + "id": 0, + "type": 3 + } + ] + } +} +---- +// NOTCONSOLE diff --git a/docs/reference/search/search-your-data/collapse-search-results.asciidoc b/docs/reference/search/search-your-data/collapse-search-results.asciidoc index 465de013ff97c..5184198ca5f5e 100644 --- a/docs/reference/search/search-your-data/collapse-search-results.asciidoc +++ b/docs/reference/search/search-your-data/collapse-search-results.asciidoc @@ -9,8 +9,8 @@ For example, the following search collapses results by `user.id` and sorts them by `http.response.bytes`. [source,console] --------------------------------------------------- -GET /my-index-000001/_search +---- +GET my-index-000001/_search { "query": { "match": { @@ -18,24 +18,30 @@ GET /my-index-000001/_search } }, "collapse": { - "field": "user.id" <1> + "field": "user.id" <1> }, - "sort": [ "http.response.bytes" ], <2> - "from": 10 <3> + "sort": [ + { + "http.response.bytes": { <2> + "order": "desc" + } + } + ], + "from": 0 <3> } --------------------------------------------------- +---- // TEST[setup:my_index] -<1> Collapse the result set using the "user.id" field +<1> Collapse the result set using the `user.id` field <2> Sort the results by `http.response.bytes` -<3> define the offset of the first collapsed result +<3> Define the offset of the first collapsed result WARNING: The total number of hits in the response indicates the number of matching documents without collapsing. The total number of distinct group is unknown. -The field used for collapsing must be a single valued <> or <> field with <> activated +The field used for collapsing must be a single valued <> or <> field with <> activated. -NOTE: The collapsing is applied to the top hits only and does not affect aggregations. +NOTE: Collapsing is applied to the top hits only and does not affect aggregations. [discrete] [[expand-collapse-results]] @@ -44,7 +50,7 @@ NOTE: The collapsing is applied to the top hits only and does not affect aggrega It is also possible to expand each collapsed top hits with the `inner_hits` option. [source,console] --------------------------------------------------- +---- GET /my-index-000001/_search { "query": { @@ -57,20 +63,26 @@ GET /my-index-000001/_search "inner_hits": { "name": "most_recent", <2> "size": 5, <3> - "sort": [ { "@timestamp": "asc" } ] <4> + "sort": [ { "@timestamp": "desc" } ] <4> }, "max_concurrent_group_searches": 4 <5> }, - "sort": [ "http.response.bytes" ] + "sort": [ + { + "http.response.bytes": { + "order": "desc" + } + } + ] } --------------------------------------------------- +---- // TEST[setup:my_index] -<1> collapse the result set using the "user.id" field -<2> the name used for the inner hit section in the response -<3> the number of inner_hits to retrieve per collapse key -<4> how to sort the document inside each group -<5> the number of concurrent requests allowed to retrieve the `inner_hits` per group +<1> Collapse the result set using the `user.id` field +<2> The name used for the inner hit section in the response +<3> The number of `inner_hits` to retrieve per collapse key +<4> How to sort the document inside each group +<5> The number of concurrent requests allowed to retrieve the `inner_hits` per group See <> for the complete list of supported options and the format of the response. @@ -78,7 +90,7 @@ It is also possible to request multiple `inner_hits` for each collapsed hit. Thi multiple representations of the collapsed hits. [source,console] --------------------------------------------------- +---- GET /my-index-000001/_search { "query": { @@ -87,32 +99,47 @@ GET /my-index-000001/_search } }, "collapse": { - "field": "user.id", <1> - "inner_hits": [ + "field": "user.id", <1> + "inner_hits": [ { - "name": "largest_responses", <2> + "name": "largest_responses", <2> "size": 3, - "sort": [ "http.response.bytes" ] + "sort": [ + { + "http.response.bytes": { + "order": "desc" + } + } + ] }, { - "name": "most_recent", <3> + "name": "most_recent", <3> "size": 3, - "sort": [ { "@timestamp": "asc" } ] + "sort": [ + { + "@timestamp": { + "order": "desc" + } + } + ] } ] }, - "sort": [ "http.response.bytes" ] + "sort": [ + "http.response.bytes" + ] } --------------------------------------------------- +---- // TEST[setup:my_index] -<1> collapse the result set using the "user.id" field -<2> return the three largest HTTP responses for the user -<3> return the three most recent HTTP responses for the user +<1> Collapse the result set using the `user.id` field +<2> Return the three largest HTTP responses for the user +<3> Return the three most recent HTTP responses for the user The expansion of the group is done by sending an additional query for each -`inner_hit` request for each collapsed hit returned in the response. This can significantly slow things down -if you have too many groups and/or `inner_hit` requests. +`inner_hit` request for each collapsed hit returned in the response. This can +significantly slow your search if you have too many groups or `inner_hit` +requests. The `max_concurrent_group_searches` request parameter can be used to control the maximum number of concurrent searches allowed in this phase. @@ -131,7 +158,7 @@ collapse and sort on `user.id`, while paging through the results using `search_after`: [source,console] --------------------------------------------------- +---- GET /my-index-000001/_search { "query": { @@ -145,20 +172,37 @@ GET /my-index-000001/_search "sort": [ "user.id" ], "search_after": ["dd5ce1ad"] } --------------------------------------------------- +---- // TEST[setup:my_index] [discrete] [[second-level-of-collapsing]] === Second level of collapsing -Second level of collapsing is also supported and is applied to `inner_hits`. +A second level of collapsing is also supported and is applied to `inner_hits`. For example, the following search collapses results by `geo.country_name`. Within each `geo.country_name`, inner hits are collapsed by `user.id`. -[source,js] --------------------------------------------------- +NOTE: Second level of collapsing doesn't allow `inner_hits`. + +/////////////// +[source,console] +---- +PUT my-index-000001/ +{"mappings":{"properties":{"@timestamp":{"type":"date"},"geo":{"properties":{"country_name":{"type":"keyword"}}},"http":{"properties":{"request":{"properties":{"method":{"type":"keyword"}}}}},"message":{"type":"text","fields":{"keyword":{"type":"keyword"}}},"user":{"properties":{"id":{"type":"keyword","doc_values":true}}}}}} +---- + +[source,console] +---- +POST my-index-000001/_doc/oX9uXXoB0da05OCR3adK?refresh=true +{"@timestamp":"2099-11-15T14:12:12","geo":{"country_name":"Amsterdam"},"http":{"request":{"method":"get"},"response":{"bytes":1070000,"status_code":200},"version":"1.1"},"message":"GET /search HTTP/1.1 200 1070000","source":{"ip":"127.0.0.1"},"user":{"id":"kimchy"}} +---- +// TEST[continued] +/////////////// + +[source,console] +---- GET /my-index-000001/_search { "query": { @@ -175,79 +219,95 @@ GET /my-index-000001/_search } } } --------------------------------------------------- -// NOTCONSOLE +---- +// TEST[continued] +// TEST[s/_search/_search\?filter_path=hits.hits/] - -Response: -[source,js] --------------------------------------------------- +[source,console-result] +---- { - ... - "hits": [ - { - "_index": "my-index-000001", - "_type": "_doc", - "_id": "9", - "_score": ..., - "_source": {...}, - "fields": { "geo": { "country_name": [ "UK" ] }}, - "inner_hits": { - "by_location": { - "hits": { - ..., - "hits": [ - { - ... - "fields": { "user": "id": { [ "user124" ] }} - }, - { - ... - "fields": { "user": "id": { [ "user589" ] }} - }, - { - ... - "fields": { "user": "id": { [ "user001" ] }} - } - ] + "hits" : { + "hits" : [ + { + "_index" : "my-index-000001", + "_id" : "oX9uXXoB0da05OCR3adK", + "_score" : 0.5753642, + "_source" : { + "@timestamp" : "2099-11-15T14:12:12", + "geo" : { + "country_name" : "Amsterdam" + }, + "http" : { + "request" : { + "method" : "get" + }, + "response" : { + "bytes" : 1070000, + "status_code" : 200 + }, + "version" : "1.1" + }, + "message" : "GET /search HTTP/1.1 200 1070000", + "source" : { + "ip" : "127.0.0.1" + }, + "user" : { + "id" : "kimchy" } - } - } - }, - { - "_index": "my-index-000001", - "_type": "_doc", - "_id": "1", - "_score": .., - "_source": {... - }, - "fields": { "geo": { "country_name": [ "Canada" ] }}, - "inner_hits": { - "by_location": { - "hits": { - ..., - "hits": [ - { - ... - "fields": { "user": "id": { [ "user444" ] }} - }, - { - ... - "fields": { "user": "id": { [ "user1111" ] } + }, + "fields" : { + "geo.country_name" : [ + "Amsterdam" + ] + }, + "inner_hits" : { + "by_location" : { + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" }, - { - ... - "fields": { "user": "id": { [ "user999" ] }} - } - ] + "max_score" : null, + "hits" : [ + { + "_index" : "my-index-000001", + "_id" : "oX9uXXoB0da05OCR3adK", + "_score" : 0.5753642, + "_source" : { + "@timestamp" : "2099-11-15T14:12:12", + "geo" : { + "country_name" : "Amsterdam" + }, + "http" : { + "request" : { + "method" : "get" + }, + "response" : { + "bytes" : 1070000, + "status_code" : 200 + }, + "version" : "1.1" + }, + "message" : "GET /search HTTP/1.1 200 1070000", + "source" : { + "ip" : "127.0.0.1" + }, + "user" : { + "id" : "kimchy" + } + }, + "fields" : { + "user.id" : [ + "kimchy" + ] + } + } + ] + } } } } - }, - ... - ] + ] + } } --------------------------------------------------- -// NOTCONSOLE - -NOTE: Second level of collapsing doesn't allow `inner_hits`. +---- diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index 6f2d6bd1e1a8a..710010e1c4782 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -98,8 +98,8 @@ GET /_search } }, "pit": { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> - "keep_alive": "1m" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> + "keep_alive": "1m" }, "sort": [ <2> {"@timestamp": {"order": "asc", "format": "strict_date_optional_time_nanos", "numeric_type" : "date_nanos" }} @@ -129,8 +129,8 @@ GET /_search } }, "pit": { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> - "keep_alive": "1m" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> + "keep_alive": "1m" }, "sort": [ <2> {"@timestamp": {"order": "asc", "format": "strict_date_optional_time_nanos"}}, @@ -192,8 +192,8 @@ GET /_search } }, "pit": { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> - "keep_alive": "1m" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <1> + "keep_alive": "1m" }, "sort": [ {"@timestamp": {"order": "asc", "format": "strict_date_optional_time_nanos"}} @@ -226,7 +226,6 @@ DELETE /_pit ---- // TEST[catch:missing] - [discrete] [[scroll-search-results]] === Scroll search results @@ -437,8 +436,8 @@ DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMN [[slice-scroll]] ==== Sliced scroll -For scroll queries that return a lot of documents it is possible to split the scroll in multiple slices which -can be consumed independently: +When paging through a large number of documents, it can be helpful to split the search into multiple slices +to consume them independently: [source,console] -------------------------------------------------- @@ -472,24 +471,27 @@ GET /my-index-000001/_search?scroll=1m <1> The id of the slice <2> The maximum number of slices -The result from the first request returned documents that belong to the first slice (id: 0) and the result from the -second request returned documents that belong to the second slice. Since the maximum number of slices is set to 2 - the union of the results of the two requests is equivalent to the results of a scroll query without slicing. -By default the splitting is done on the shards first and then locally on each shard using the _id field -with the following formula: -`slice(doc) = floorMod(hashCode(doc._id), max)` -For instance if the number of shards is equal to 2 and the user requested 4 slices then the slices 0 and 2 are assigned -to the first shard and the slices 1 and 3 are assigned to the second shard. +The result from the first request returned documents that belong to the first slice (id: 0) and +the result from the second request returned documents that belong to the second slice. Since the +maximum number of slices is set to 2 the union of the results of the two requests is equivalent +to the results of a scroll query without slicing. By default the splitting is done first on the +shards, then locally on each shard using the `_id` field. The local splitting follows the formula +`slice(doc) = floorMod(hashCode(doc._id), max))`. Each scroll is independent and can be processed in parallel like any scroll request. -NOTE: If the number of slices is bigger than the number of shards the slice filter is very slow on the first calls, it has a complexity of O(N) and a memory cost equals -to N bits per slice where N is the total number of documents in the shard. -After few calls the filter should be cached and subsequent calls should be faster but you should limit the number of - sliced query you perform in parallel to avoid the memory explosion. +NOTE: If the number of slices is bigger than the number of shards the slice filter is very slow on +the first calls, it has a complexity of O(N) and a memory cost equals to N bits per slice where N +is the total number of documents in the shard. After few calls the filter should be cached and +subsequent calls should be faster but you should limit the number of sliced query you perform in +parallel to avoid the memory explosion. + +The <> API supports a more efficient partitioning strategy and +does not suffer from this problem. When possible, it's recommended to use a point-in-time search +with slicing instead of a scroll. -To avoid this cost entirely it is possible to use the `doc_values` of another field to do the slicing -but the user must ensure that the field has the following properties: +Another way to avoid this high cost is to use the `doc_values` of another field to do the slicing. +The field must have the following properties: * The field is numeric. @@ -521,6 +523,3 @@ GET /my-index-000001/_search?scroll=1m // TEST[setup:my_index_big] For append only time-based indices, the `timestamp` field can be used safely. - -NOTE: By default the maximum number of slices allowed per scroll is limited to 1024. -You can update the `index.max_slices_per_scroll` index setting to bypass this limit. diff --git a/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc b/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc index c630ab5dd8e06..a39c817c3a83e 100644 --- a/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc +++ b/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc @@ -49,14 +49,8 @@ The following search request uses the `fields` parameter to retrieve values for the `user.id` field, all fields starting with `http.response.`, and the `@timestamp` field. -Using object notation, you can pass a `format` parameter for certain fields to -apply a custom format for the field's values: - -* <> and <> fields accept a <> -* <> accept either `geojson` for http://www.geojson.org[GeoJSON] (the default) or `wkt` for -{wikipedia}/Well-known_text_representation_of_geometry[Well Known Text] - -Other field types do not support the `format` parameter. +Using object notation, you can pass a <> argument to +customize the format of returned date or geospatial values. [source,console] ---- diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 703eac88d1d07..c5ed2fb8d063a 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -19,6 +19,7 @@ The following APIs support {ccs}: * <> * <> * <> +* experimental:[] <> [discrete] [[ccs-example]] @@ -409,9 +410,14 @@ image:images/ccs/ccs-min-roundtrip-client-response.svg[] [[ccs-supported-configurations]] === Supported configurations -Generally, <> can search remote +Generally, <> can search remote clusters that are one major version ahead or behind the coordinating node's -version. Cross cluster search can also search remote clusters that are being +version. + +IMPORTANT: For the <>, the local and remote +clusters must use the same {es} version. + +Cross-cluster search can also search remote clusters that are being <> so long as both the "upgrade from" and "upgrade to" version are compatible with the gateway node. diff --git a/docs/reference/search/search-your-data/search-template.asciidoc b/docs/reference/search/search-your-data/search-template.asciidoc new file mode 100644 index 0000000000000..1dd9db50089d7 --- /dev/null +++ b/docs/reference/search/search-your-data/search-template.asciidoc @@ -0,0 +1,632 @@ +[[search-template]] +== Search templates + +A search template is a stored search you can run with different variables. + +If you use {es} as a search backend, you can pass user input from a search bar +as parameters for a search template. This lets you run searches without exposing +{es}'s query syntax to your users. + +If you use {es} for a custom application, search templates let you change +your searches without modifying your app's code. + +[discrete] +[[create-search-template]] +=== Create a search template + +To create or update a search template, use the <>. + +The request's `source` supports the same parameters as the +<>'s request body. `source` also +supports https://mustache.github.io/[Mustache] variables, typically enclosed in +double curly brackets: `{{my-var}}`. When you run a templated search, {es} +replaces these variables with values from `params`. + +Search templates must use a `lang` of `mustache`. + +The following request creates a search template with an `id` of +`my-search-template`. + +[source,console] +---- +PUT _scripts/my-search-template +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}", + "size": "{{size}}" + }, + "params": { + "query_string": "My query string" + } + } +} +---- + +{es} stores search templates as Mustache <> in the +cluster state. {es} compiles search templates in the `template` script context. +Settings that limit or disable scripts also affect search templates. + +[discrete] +[[validate-search-template]] +=== Validate a search template + +[[_validating_templates]] +To test a template with different `params`, use the +<>. + +[source,console] +---- +POST _render/template +{ + "id": "my-search-template", + "params": { + "query_string": "hello world", + "from": 20, + "size": 10 + } +} +---- +// TEST[continued] + +When rendered, the template outputs a <>. + +[source,console-result] +---- +{ + "template_output": { + "query": { + "match": { + "message": "hello world" + } + }, + "from": "20", + "size": "10" + } +} +---- + +You can also use the API to test inline templates. + +[source,console] +---- +POST _render/template +{ + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}", + "size": "{{size}}" + }, + "params": { + "query_string": "hello world", + "from": 20, + "size": 10 + } +} +---- +// TEST[continued] + +[discrete] +[[run-templated-search]] +=== Run a templated search + +To run a search with a search template, use the <>. You can specify different `params` with each request. + +//// +[source,console] +---- +PUT my-index/_doc/1?refresh +{ + "message": "hello world" +} +---- +// TEST[continued] +//// + +[source,console] +---- +GET my-index/_search/template +{ + "id": "my-search-template", + "params": { + "query_string": "hello world", + "from": 0, + "size": 10 + } +} +---- +// TEST[continued] + +The response uses the same properties as the <>'s +response. + +[source,console-result] +---- +{ + "took": 36, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 0.5753642, + "hits": [ + { + "_index": "my-index", + "_id": "1", + "_score": 0.5753642, + "_source": { + "message": "hello world" + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 36/"took": "$body.took"/] + +[discrete] +[[run-multiple-templated-searches]] +=== Run multiple templated searches + +To run multiple templated searches with a single request, use the +<>. These requests often have +less overhead and faster speeds than multiple individual searches. + +[source,console] +---- +GET my-index/_msearch/template +{ } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} +---- +// TEST[continued] +// TEST[s/my-other-search-template/my-search-template/] + +[discrete] +[[get-search-templates]] +=== Get search templates + +To retrieve a search template, use the <>. + +[source,console] +---- +GET _scripts/my-search-template +---- +// TEST[continued] + +To get a list of all search templates and other stored scripts, use the +<>. + +[source,console] +---- +GET _cluster/state/metadata?pretty&filter_path=metadata.stored_scripts +---- +// TEST[continued] + +[discrete] +[[delete-search-template]] +=== Delete a search template + +To delete a search template, use the <>. + +[source,console] +---- +DELETE _scripts/my-search-template +---- +// TEST[continued] + +[discrete] +[[search-template-set-default-values]] +=== Set default values + +To set a default value for a variable, use the following syntax: + +[source,mustache] +---- +{{my-var}}{{^my-var}}default value{{/my-var}} +---- + +If a templated search doesn't specify a value in its `params`, the search uses +the default value instead. For example, the following template sets defaults +for `from` and `size`. + +[source,console] +---- +POST _render/template +{ + "source": { + "query": { + "match": { + "message": "{{query_string}}" + } + }, + "from": "{{from}}{{^from}}0{{/from}}", + "size": "{{size}}{{^size}}10{{/size}}" + }, + "params": { + "query_string": "hello world" + } +} +---- + +[discrete] +[[search-template-url-encode-strings]] +=== URL encode strings + +Use the `{{#url}}` function to URL encode a string. + +[source,console] +---- +POST _render/template +{ + "source": { + "query": { + "term": { + "url.full": "{{#url}}{{host}}/{{page}}{{/url}}" + } + } + }, + "params": { + "host": "http://example.com", + "page": "hello-world" + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output": { + "query": { + "term": { + "url.full": "http%3A%2F%2Fexample.com%2Fhello-world" + } + } + } +} +---- + +[discrete] +[[search-template-concatenate-values]] +=== Concatenate values + +Use the `{{#join}}` function to concatenate array values as a comma-delimited +string. For example, the following template concatenates two email addresses. + +[source,console] +---- +POST _render/template +{ + "source": { + "query": { + "match": { + "user.group.emails": "{{#join}}emails{{/join}}" + } + } + }, + "params": { + "emails": [ "user1@example.com", "user_one@example.com" ] + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output": { + "query": { + "match": { + "user.group.emails": "user1@example.com,user_one@example.com" + } + } + } +} +---- + +You can also specify a custom delimiter. + +[source,console] +---- +POST _render/template +{ + "source": { + "query": { + "range": { + "user.effective.date": { + "gte": "{{date.min}}", + "lte": "{{date.max}}", + "format": "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}" + } + } + } + }, + "params": { + "date": { + "min": "2098", + "max": "06/05/2099", + "formats": ["dd/MM/yyyy", "yyyy"] + } + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output": { + "query": { + "range": { + "user.effective.date": { + "gte": "2098", + "lte": "06/05/2099", + "format": "dd/MM/yyyy||yyyy" + } + } + } + } +} +---- + +[discrete] +[[search-template-convert-json]] +=== Convert to JSON + +Use the `{{#toJson}}` function to convert a variable value to its JSON +representation. + +For example, the following template uses `{{#toJson}}` to pass an array. To +ensure the request body is valid JSON, the `source` is written in the string +format. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": { \"terms\": { \"tags\": {{#toJson}}tags{{/toJson}} }}}", + "params": { + "tags": [ + "prod", + "es01" + ] + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output": { + "query": { + "terms": { + "tags": [ + "prod", + "es01" + ] + } + } + } +} +---- + +You can also use `{{#toJson}}` to pass objects. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": {{#toJson}}my_query{{/toJson}} }", + "params": { + "my_query": { + "match_all": { } + } + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output" : { + "query" : { + "match_all" : { } + } + } +} + +---- + +You can also pass an array of objects. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": { \"bool\": { \"must\": {{#toJson}}clauses{{/toJson}} }}}", + "params": { + "clauses": [ + { + "term": { + "user.id": "kimchy" + } + }, + { + "term": { + "url.domain": "example.com" + } + } + ] + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output": { + "query": { + "bool": { + "must": [ + { + "term": { + "user.id": "kimchy" + } + }, + { + "term": { + "url.domain": "example.com" + } + } + ] + } + } + } +} +---- + +[discrete] +[[search-template-use-conditions]] +=== Use conditions + +To create if conditions, use the following syntax: + +[source,mustache] +---- +{{#condition}}content{{/condition}} +---- + +If the condition variable is `true`, {es} displays its content. For example, the +following template searches data from the past year if `year_scope` is `true`. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": { \"bool\": { \"filter\": [ {{#year_scope}} { \"range\": { \"@timestamp\": { \"gte\": \"now-1y/d\", \"lt\": \"now/d\" } } }, {{/year_scope}} { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", + "params": { + "year_scope": true, + "user_id": "kimchy" + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output" : { + "query" : { + "bool" : { + "filter" : [ + { + "range" : { + "@timestamp" : { + "gte" : "now-1y/d", + "lt" : "now/d" + } + } + }, + { + "term" : { + "user.id" : "kimchy" + } + } + ] + } + } + } +} +---- + +If `year_scope` is `false`, the template searches data from any time period. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": { \"bool\": { \"filter\": [ {{#year_scope}} { \"range\": { \"@timestamp\": { \"gte\": \"now-1y/d\", \"lt\": \"now/d\" } } }, {{/year_scope}} { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", + "params": { + "year_scope": false, + "user_id": "kimchy" + } +} +---- + +The template renders as: + +[source,console-result] +---- +{ + "template_output" : { + "query" : { + "bool" : { + "filter" : [ + { + "term" : { + "user.id" : "kimchy" + } + } + ] + } + } + } +} +---- + +To create if-else conditions, use the following syntax: + +[source,mustache] +---- +{{#condition}}if content{{/condition}} {{^condition}}else content{{/condition}} +---- + +For example, the following template searches data from the past year if +`year_scope` is `true`. Otherwise, it searches data from the past day. + +[source,console] +---- +POST _render/template +{ + "source": "{ \"query\": { \"bool\": { \"filter\": [ { \"range\": { \"@timestamp\": { \"gte\": {{#year_scope}} \"now-1y/d\" {{/year_scope}} {{^year_scope}} \"now-1d/d\" {{/year_scope}} , \"lt\": \"now/d\" }}}, { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", + "params": { + "year_scope": true, + "user_id": "kimchy" + } +} +---- diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index d185ee5970d54..59952312e18e5 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -524,4 +524,5 @@ include::retrieve-selected-fields.asciidoc[] include::search-across-clusters.asciidoc[] include::search-multiple-indices.asciidoc[] include::search-shard-routing.asciidoc[] +include::search-template.asciidoc[] include::sort-search-results.asciidoc[] diff --git a/docs/reference/search/search-your-data/sort-search-results.asciidoc b/docs/reference/search/search-your-data/sort-search-results.asciidoc index be0da5ca51300..73aecf42c3cd8 100644 --- a/docs/reference/search/search-your-data/sort-search-results.asciidoc +++ b/docs/reference/search/search-your-data/sort-search-results.asciidoc @@ -591,6 +591,7 @@ The final distance for a document will then be `min`/`max`/`avg` (defined via `m [discrete] +[[script-based-sorting]] === Script Based Sorting Allow to sort based on custom scripts, here is an example: diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 4f17b6cf2e6d7..793b45720c223 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -339,31 +339,60 @@ pattern]. For other field data types, this parameter is not supported. ==== +[[search-api-fields]] `fields`:: (Optional, array of strings and objects) Array of wildcard (`*`) patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. + You can specify items in the array as a string or object. -See <> for more details. + .Properties of `fields` objects [%collapsible%open] ==== +// tag::fields-api-props[] `field`:: -(Required, string) -Wildcard pattern. The request returns values for field names matching this pattern. +(Required, string) Field to return. Supports wildcards (`*`). `format`:: (Optional, string) -Format in which the values are returned. +Format for date and geospatial fields. Other field data types do not support +this parameter. + -The date fields <> and <> accept a -<>. <> accept either -`geojson` for http://www.geojson.org[GeoJSON] (the default) or `wkt` for -{wikipedia}/Well-known_text_representation_of_geometry[Well Known Text]. +-- +<> and <> fields accept a +<>. <> and +<> fields accept: + +`geojson` (default)::: +http://www.geojson.org[GeoJSON] + +`wkt`::: +{wikipedia}/Well-known_text_representation_of_geometry[Well Known Text] + +`mvt(//@)` or `mvt(//)`::: +experimental:[] Binary +https://docs.mapbox.com/vector-tiles/specification[Mapbox vector tile]. The API +returns the tile as a base64-encoded string. + -For other field data types, this parameter is not supported. +.`mvt` parameters +[%collapsible%open] +======== +``:: +(Required, integer) Zoom level for the tile. Accepts `0`-`29`. + +``:: +(Required, integer) X coordinate for the tile. + +``:: +(Required, integer) Y coordinate for the tile. + +``:: +(Optional, integer) Size, in pixels, of a side of the tile. Vector tiles are +square with equal sides. Defaults to `4096`. +======== +-- +// end::fields-api-props[] ==== [[request-body-search-explain]] @@ -599,6 +628,7 @@ next batch of search results for the request. See This parameter is only returned if the <> is specified in the request. +[[search-api-took]] `took`:: + -- @@ -629,6 +659,7 @@ If `true`, the request timed out before completion; returned results may be partial or empty. +[[search-api-shards]] `_shards`:: (object) Contains a count of shards used for the request. diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 921510b2ad5ba..7c912e63c202e 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -206,8 +206,8 @@ The response contains suggestions scored by the most likely spelling correction Checks each suggestion against the specified `query` to prune suggestions for which no matching docs exist in the index. The collate query for a suggestion is run only on the local shard from which the suggestion has - been generated from. The `query` must be specified and it can be templated, - see <> for more information. + been generated from. The `query` must be specified and it can be templated. + See <>. The current suggestion is automatically made available as the `{{suggestion}}` variable, which should be used in your query. You can still specify your own template `params` -- the `suggestion` value will be added to the diff --git a/docs/reference/search/terms-enum.asciidoc b/docs/reference/search/terms-enum.asciidoc index 4fc997ecdbdd1..daa542c6ffd4a 100644 --- a/docs/reference/search/terms-enum.asciidoc +++ b/docs/reference/search/terms-enum.asciidoc @@ -35,8 +35,9 @@ The API returns the following response: } -------------------------------------------------- -The "complete" flag is false if time or space constraints were met and the -set of terms examined was not the full set of available values. +If the `complete` flag is `false`, the returned `terms` set may be incomplete +and should be treated as approximate. This can occur due to a few reasons, such +as a request timeout or a node error. [[search-terms-enum-api-request]] ==== {api-request-title} @@ -102,4 +103,3 @@ query rewrites to `match_none`. The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. - diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index c762671f43d42..af5d8a6ed49f5 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -129,7 +129,7 @@ POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true "index_settings": { <3> "index.number_of_replicas": 0 }, - "ignored_index_settings": [ "index.refresh_interval" ] <4> + "ignore_index_settings": [ "index.refresh_interval" ] <4> } -------------------------------------------------- // TEST[continued] diff --git a/docs/reference/settings/common-defs.asciidoc b/docs/reference/settings/common-defs.asciidoc index 538f2686684e3..1fc9a8bfecf2a 100644 --- a/docs/reference/settings/common-defs.asciidoc +++ b/docs/reference/settings/common-defs.asciidoc @@ -148,12 +148,6 @@ file name ends in ".p12", ".pfx" or "pkcs12", the default is `PKCS12`. Otherwise, it defaults to `jks`. end::ssl-truststore-type[] -tag::ssl-truststore-type-pkcs11[] -The format of the truststore file. For the Java keystore format, use `jks`. For -PKCS#12 files, use `PKCS12`. For a PKCS#11 token, use `PKCS11`. The default is -`jks`. -end::ssl-truststore-type-pkcs11[] - tag::ssl-verification-mode-values[] Controls the verification of certificates. + diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc index c94809f886db2..395890d4349d2 100644 --- a/docs/reference/settings/ilm-settings.asciidoc +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -37,8 +37,9 @@ You can explicitly set it to <>. Defaults to `false`. `index.lifecycle.name`:: -(<>, string) -The name of the policy to use to manage the index. +(<>, string) +The name of the policy to use to manage the index. For information about how +{es} applies policy changes, see <>. [[index-lifecycle-origination-date]] `index.lifecycle.origination_date`:: diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index 80ba1216eea90..e9ab1f82e6ddc 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -32,25 +32,21 @@ the node. To learn more, refer to <>. ==== * On dedicated coordinating nodes or dedicated master nodes, do not set the `ml` role. -* It is strongly recommended that dedicated {ml} nodes also have the `remote_cluster_client` role; otherwise, {ccs} fails when used in {ml} jobs or {dfeeds}. See <>. +* It is strongly recommended that dedicated {ml} nodes also have the +`remote_cluster_client` role; otherwise, {ccs} fails when used in {ml} jobs or +{dfeeds}. See <>. ==== `xpack.ml.enabled`:: -(<>) Set to `true` (default) to enable {ml} APIs +(<>) The default value (`true`) enables {ml} APIs on the node. + -If set to `false`, the {ml} APIs are disabled on the node. Therefore the node -cannot open jobs, start {dfeeds}, or receive transport (internal) communication -requests related to {ml} APIs. If the node is a coordinating node, {ml} requests -from clients (including {kib}) also fail. For more information about disabling -{ml} in specific {kib} instances, see -{kibana-ref}/ml-settings-kb.html[{kib} {ml} settings]. -+ IMPORTANT: If you want to use {ml-features} in your cluster, it is recommended -that you set `xpack.ml.enabled` to `true` on all nodes. This is the default -behavior. At a minimum, it must be enabled on all master-eligible nodes. If you -want to use {ml-features} in clients or {kib}, it must also be enabled on all -coordinating nodes. +that you use the default value for this setting on all nodes. ++ +If set to `false`, the {ml} APIs are disabled on the node. For example, the node +cannot open jobs, start {dfeeds}, receive transport (internal) communication +requests, or requests from clients (including {kib}) related to {ml} APIs. `xpack.ml.inference_model.cache_size`:: (<>) The maximum inference cache size allowed. diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index cb8986961a5b3..874b41eb6b096 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -65,11 +65,11 @@ connection is being initiated. The maximum period of inactivity between two data packets, before the request is aborted. -`xpack.http.tcp.keep_alive` +`xpack.http.tcp.keep_alive`:: (<>) Whether to enable TCP keepalives on HTTP connections. Defaults to `true`. -`xpack.http.connection_pool_ttl` +`xpack.http.connection_pool_ttl`:: (<>) The time-to-live of connections in the connection pool. If a connection is not re-used within this timeout, it is closed. By default, the time-to-live is diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index b0ad720fb6b32..f6bdbcb8a1224 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -63,7 +63,9 @@ Enables fips mode of operation. Set this to `true` if you run this {es} instance `xpack.security.authc.password_hashing.algorithm`:: (<>) Specifies the hashing algorithm that is used for secure user credential storage. -See <>. Defaults to `bcrypt`. +See <>. +If `xpack.security.fips_mode.enabled` is true (see <>), defaults to `pbkdf2_stretch`. +In all other cases, defaults to `bcrypt`. [discrete] [[anonymous-access-settings]] @@ -598,7 +600,7 @@ include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-p `ssl.truststore.type`:: (<>) -include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-type-pkcs11] +include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-type] `ssl.verification_mode`:: (<>) @@ -901,7 +903,7 @@ You cannot use this setting and `ssl.certificate_authorities` at the same time. `ssl.truststore.type`:: (<>) -include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-type-pkcs11] +include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-type] `ssl.verification_mode`:: (<>) diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 5caa75afe6027..e98f07ee61356 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -150,7 +150,7 @@ include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-path] +{ssl-prefix}.ssl.truststore.type+:: (<>) Set this to `PKCS12` to indicate that the truststore is a PKCS#12 file. -//TBD:Should this use the ssl-truststore-type-pkcs11 or ssl-truststore-type definition and default values? +//TBD:Should this use the ssl-truststore-type definition and default values? +{ssl-prefix}.ssl.truststore.password+:: (<>) @@ -160,30 +160,3 @@ include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-password (<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-password] -[#{ssl-context}-pkcs11-tokens] -===== PKCS#11 tokens - -{es} can be configured to use a PKCS#11 token that contains the private key, -certificate and certificates that should be trusted. - -PKCS#11 token require additional configuration on the JVM level and can be enabled -via the following settings: - -+{ssl-prefix}.keystore.type+:: -(<>) -Set this to `PKCS11` to indicate that the PKCS#11 token should be used as a keystore. -//TBD: Is the default value `jks`? - -+{ssl-prefix}.truststore.type+:: -(<>) -include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-type-pkcs11] - -[NOTE] -When configuring the PKCS#11 token that your JVM is configured to use as -a keystore or a truststore for Elasticsearch, the PIN for the token can be -configured by setting the appropriate value to `ssl.truststore.password` -or `ssl.truststore.secure_password` in the context that you are configuring. -Since there can only be one PKCS#11 token configured, only one keystore and -truststore will be usable for configuration in {es}. This in turn means -that only one certificate can be used for TLS both in the transport and the -http layer. diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index a19bacb47fa44..7bfc4120ef956 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -339,11 +339,24 @@ over the configuration files in the image. You can set individual {es} configuration parameters using Docker environment variables. The <> and the -<> use this method. +<> use this method. You can +use the setting name directly as the environment variable name. If +you cannot do this, for example because your orchestration platform forbids +periods in environment variable names, then you can use an alternative +style by converting the setting name as follows. -To use the contents of a file to set an environment variable, suffix the environment -variable name with `_FILE`. This is useful for passing secrets such as passwords to {es} -without specifying them directly. +. Change the setting name to uppercase +. Prefix it with `ES_SETTING_` +. Escape any underscores (`_`) by duplicating them +. Convert all periods (`.`) to underscores (`_`) + +For example, `-e bootstrap.memory_lock=true` becomes +`-e ES_SETTING_BOOTSTRAP_MEMORY__LOCK=true`. + +You can use the contents of a file to set the value of the +`ELASTIC_PASSWORD` or `KEYSTORE_PASSWORD` environment variables, by +suffixing the environment variable name with `_FILE`. This is useful for +passing secrets such as passwords to {es} without specifying them directly. For example, to set the {es} bootstrap password from a file, you can bind mount the file and set the `ELASTIC_PASSWORD_FILE` environment variable to the mount location. @@ -354,7 +367,7 @@ If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify: -e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -------------------------------------------- -You can also override the default command for the image to pass {es} configuration +You can override the default command for the image to pass {es} configuration parameters as command line options. For example: [source,sh] diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index e035c678b73f1..7d25fec157f82 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -192,33 +192,21 @@ particular logger to another file. However, these use cases are rare. [[deprecation-logging]] === Deprecation logging -In addition to regular logging, Elasticsearch allows you to enable logging -of deprecated actions. For example this allows you to determine early, if -you need to migrate certain functionality in the future. By default, -deprecation logging is enabled at the WARN level, the level at which all -deprecation log messages will be emitted. +{es} also writes deprecation logs to the log directory. These logs record a +message when you use deprecated {es} functionality. You can use the deprecation +logs to update your application before upgrading {es} to a new major version. -[source,properties] --------------------------------------------------- -logger.deprecation.level = warn --------------------------------------------------- +By default, {es} rolls and compresses deprecation logs at 1GB. The default +configuration preserves a maximum of five log files: four rolled logs and an +active log. -This will create a daily rolling deprecation log file in your log directory. -Check this file regularly, especially when you intend to upgrade to a new -major version. +{es} emits deprecation log messages at the `DEPRECATION` level. To stop writing +deprecation log messages, set `logger.deprecation.level` to `error`: -The default logging configuration has set the roll policy for the deprecation -logs to roll and compress after 1 GB, and to preserve a maximum of five log -files (four rolled logs, and the active log). - -You can disable it in the `config/log4j2.properties` file by setting the deprecation -log level to `error` like this: [source,properties] --------------------------------------------------- -logger.deprecation.name = org.elasticsearch.deprecation +---- logger.deprecation.level = error --------------------------------------------------- - +---- You can identify what is triggering deprecated functionality if `X-Opaque-Id` was used as an HTTP header. The user ID is included in the `X-Opaque-ID` field in deprecation JSON logs. diff --git a/docs/reference/setup/sysconfig/tcpretries.asciidoc b/docs/reference/setup/sysconfig/tcpretries.asciidoc index e25c07c388a1a..a7cbe728e212d 100644 --- a/docs/reference/setup/sysconfig/tcpretries.asciidoc +++ b/docs/reference/setup/sysconfig/tcpretries.asciidoc @@ -1,32 +1,38 @@ [[system-config-tcpretries]] === TCP retransmission timeout -Each pair of nodes in a cluster communicates via a number of TCP connections -which <> until one of the nodes shuts down -or communication between the nodes is disrupted by a failure in the underlying +Each pair of {es} nodes communicates via a number of TCP connections which +<> until one of the nodes shuts down or +communication between the nodes is disrupted by a failure in the underlying infrastructure. -TCP provides reliable communication over occasionally-unreliable networks by +TCP provides reliable communication over occasionally unreliable networks by hiding temporary network disruptions from the communicating applications. Your operating system will retransmit any lost messages a number of times before -informing the sender of any problem. Most Linux distributions default to -retransmitting any lost packets 15 times. Retransmissions back off -exponentially, so these 15 retransmissions take over 900 seconds to complete. -This means it takes Linux many minutes to detect a network partition or a -failed node with this method. Windows defaults to just 5 retransmissions which -corresponds with a timeout of around 6 seconds. +informing the sender of any problem. {es} must wait while the retransmissions +are happening and can only react once the operating system decides to give up. +Users must therefore also wait for a sequence of retransmissions to complete. + +Most Linux distributions default to retransmitting any lost packets 15 times. +Retransmissions back off exponentially, so these 15 retransmissions take over +900 seconds to complete. This means it takes Linux many minutes to detect a +network partition or a failed node with this method. Windows defaults to just 5 +retransmissions which corresponds with a timeout of around 6 seconds. The Linux default allows for communication over networks that may experience -very long periods of packet loss, but this default is excessive for production -networks within a single data centre as is the case for most {es} clusters. -Highly-available clusters must be able to detect node failures quickly so that -they can react promptly by reallocating lost shards, rerouting searches and -perhaps electing a new master node. Linux users should therefore reduce the -maximum number of TCP retransmissions. +very long periods of packet loss, but this default is excessive and even harmful +on the high quality networks used by most {es} installations. When a cluster +detects a node failure it reacts by reallocating lost shards, rerouting +searches, and maybe electing a new master node. Highly available clusters must +be able to detect node failures promptly, which can be achieved by reducing the +permitted number of retransmissions. Connections to +<> should also prefer to detect +failures much more quickly than the Linux default allows. Linux users should +therefore reduce the maximum number of TCP retransmissions. -You can decrease the maximum number of TCP retransmissions to `5` by running -the following command as `root`. Five retransmissions corresponds with a -timeout of around six seconds. +You can decrease the maximum number of TCP retransmissions to `5` by running the +following command as `root`. Five retransmissions corresponds with a timeout of +around six seconds. [source,sh] ------------------------------------- @@ -38,8 +44,8 @@ To set this value permanently, update the `net.ipv4.tcp_retries2` setting in `sysctl net.ipv4.tcp_retries2`. IMPORTANT: This setting applies to all TCP connections and will affect the -reliability of communication with systems outside your cluster too. If your -cluster communicates with external systems over an unreliable network then you +reliability of communication with systems other than {es} clusters too. If your +clusters communicate with external systems over a low quality network then you may need to select a higher value for `net.ipv4.tcp_retries2`. For this reason, {es} does not adjust this setting automatically. @@ -54,6 +60,6 @@ related to these application-level health checks. You must also ensure your network infrastructure does not interfere with the long-lived connections between nodes, <>. Devices which drop connections when they reach -a certain age are a common source of problems to Elasticsearch clusters, and -must not be used. +a certain age are a common source of problems to {es} clusters, and must not be +used. diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc index 9f13c4b25549f..12c2d485e5fbb 100644 --- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc @@ -79,6 +79,16 @@ Name of the repository to create a snapshot in. (Required, string) Name of the snapshot to create. This name must be unique in the snapshot repository. +[[create-snapshot-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] + +`wait_for_completion`:: +(Optional, Boolean) If `true`, the request returns a response when the snapshot +is complete. If `false`, the request returns a response when the snapshot +initializes. Defaults to `false`. + [role="child_attributes"] [[create-snapshot-api-request-body]] ==== {api-request-body-title} @@ -130,10 +140,8 @@ By default, all available feature states will be included in the snapshot if `include_global_state` is `true`, or no feature states if `include_global_state` is `false`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] - `metadata`:: -(Optional, string) +(Optional, object) Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. [[create-snapshot-api-partial]] @@ -143,12 +151,6 @@ If `false`, the entire snapshot will fail if one or more indices included in the + If `true`, allows taking a partial snapshot of indices with unavailable shards. -`wait_for_completion`:: -(Optional, Boolean) -If `true`, the request returns a response when the snapshot is complete. -If `false`, the request returns a response when the snapshot initializes. -Defaults to `false`. - [[create-snapshot-api-example]] ==== {api-examples-title} @@ -176,6 +178,7 @@ The API returns the following response: "snapshot": { "snapshot": "snapshot_2", "uuid": "vdRctLCxSketdKb54xw67g", + "repository": "my_repository", "version_id": , "version": , "indices": [], diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc index b19e922137815..60cf9b34d53c9 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc @@ -19,7 +19,9 @@ PUT /_snapshot/my_repository PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true +PUT /_snapshot/my_repository/snapshot_1?wait_for_completion=true PUT /_snapshot/my_repository/snapshot_2?wait_for_completion=true +PUT /_snapshot/my_repository/snapshot_3?wait_for_completion=true ---- // TESTSETUP //// @@ -128,8 +130,21 @@ Allows setting a sort order for the result. Defaults to `start_time`, i.e. sorti (Optional, string) Sort order. Valid values are `asc` for ascending and `desc` for descending order. Defaults to `asc`, meaning ascending order. -NOTE: The pagination parameters `size`, `order`, and `sort` are not supported when using `verbose=false` and the sort order for -requests with `verbose=false` is undefined. +`after`:: +(Optional, string) +Offset identifier to start pagination from as returned by the `next` field in the response body. + +`offset`:: +(Optional, integer) +Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually +exclusive with using the `after` parameter. Defaults to `0`. + +NOTE: The `after` parameter and `next` field allow for iterating through snapshots with some consistency guarantees regarding concurrent +creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and not concurrently +deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. + +NOTE: The pagination parameters `size`, `order`, `after`, `offset` and `sort` are not supported when using `verbose=false` and the sort +order for requests with `verbose=false` is undefined. [role="child_attributes"] [[get-snapshot-api-response-body]] @@ -268,6 +283,19 @@ The snapshot `state` can be one of the following values: that were not processed correctly. ==== -- +`next`:: +(string) +If the request contained a size limit and there might be more results, a `next` field will be added to the response and can be used as the +`after` query parameter to fetch additional results. + +`total`:: +(integer) +The total number of snapshots that match the request when ignoring size limit or `after` query parameter. + +`remaining`:: +(integer) +The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the `next` +field value. [[get-snapshot-api-example]] ==== {api-examples-title} @@ -284,42 +312,221 @@ The API returns the following response: [source,console-result] ---- { - "responses": [ + "snapshots": [ + { + "snapshot": "snapshot_2", + "uuid": "vdRctLCxSketdKb54xw67g", + "repository": "my_repository", + "version_id": , + "version": , + "indices": [], + "data_streams": [], + "feature_states": [], + "include_global_state": true, + "state": "SUCCESS", + "start_time": "2020-07-06T21:55:18.129Z", + "start_time_in_millis": 1593093628850, + "end_time": "2020-07-06T21:55:18.129Z", + "end_time_in_millis": 1593094752018, + "duration_in_millis": 0, + "failures": [], + "shards": { + "total": 0, + "failed": 0, + "successful": 0 + } + } + ], + "total": 1, + "remaining": 0 +} +---- +// TESTRESPONSE[s/"uuid": "vdRctLCxSketdKb54xw67g"/"uuid": $body.snapshots.0.uuid/] +// TESTRESPONSE[s/"version_id": /"version_id": $body.snapshots.0.version_id/] +// TESTRESPONSE[s/"version": /"version": $body.snapshots.0.version/] +// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.snapshots.0.start_time/] +// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.snapshots.0.start_time_in_millis/] +// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.snapshots.0.end_time/] +// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.snapshots.0.end_time_in_millis/] +// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.snapshots.0.duration_in_millis/] + +The following request returns information for all snapshots with prefix `snapshot` in the `my_repository` repository, +limiting the response size to 2 and sorting by snapshot name. + +[source,console] +---- +GET /_snapshot/my_repository/snapshot*?size=2&sort=name +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "snapshots": [ + { + "snapshot": "snapshot_1", + "uuid": "dKb54xw67gvdRctLCxSket", + "repository": "my_repository", + "version_id": , + "version": , + "indices": [], + "data_streams": [], + "feature_states": [], + "include_global_state": true, + "state": "SUCCESS", + "start_time": "2020-07-06T21:55:18.129Z", + "start_time_in_millis": 1593093628850, + "end_time": "2020-07-06T21:55:18.129Z", + "end_time_in_millis": 1593094752018, + "duration_in_millis": 0, + "failures": [], + "shards": { + "total": 0, + "failed": 0, + "successful": 0 + } + }, + { + "snapshot": "snapshot_2", + "uuid": "vdRctLCxSketdKb54xw67g", + "repository": "my_repository", + "version_id": , + "version": , + "indices": [], + "data_streams": [], + "feature_states": [], + "include_global_state": true, + "state": "SUCCESS", + "start_time": "2020-07-06T21:55:18.130Z", + "start_time_in_millis": 1593093628851, + "end_time": "2020-07-06T21:55:18.130Z", + "end_time_in_millis": 1593094752019, + "duration_in_millis": 1, + "failures": [], + "shards": { + "total": 0, + "failed": 0, + "successful": 0 + }, + } + ], + "next": "c25hcHNob3RfMixteV9yZXBvc2l0b3J5LHNuYXBzaG90XzI=", + "total": 3, + "remaining": 1 +} +---- +// TESTRESPONSE[s/"uuid": "dKb54xw67gvdRctLCxSket"/"uuid": $body.snapshots.0.uuid/] +// TESTRESPONSE[s/"uuid": "vdRctLCxSketdKb54xw67g"/"uuid": $body.snapshots.1.uuid/] +// TESTRESPONSE[s/"version_id": /"version_id": $body.snapshots.0.version_id/] +// TESTRESPONSE[s/"version": /"version": $body.snapshots.0.version/] +// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.snapshots.0.start_time/] +// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.130Z"/"start_time": $body.snapshots.1.start_time/] +// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.snapshots.0.start_time_in_millis/] +// TESTRESPONSE[s/"start_time_in_millis": 1593093628851/"start_time_in_millis": $body.snapshots.1.start_time_in_millis/] +// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.snapshots.0.end_time/] +// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.130Z"/"end_time": $body.snapshots.1.end_time/] +// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.snapshots.0.end_time_in_millis/] +// TESTRESPONSE[s/"end_time_in_millis": 1593094752019/"end_time_in_millis": $body.snapshots.1.end_time_in_millis/] +// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.snapshots.0.duration_in_millis/] +// TESTRESPONSE[s/"duration_in_millis": 1/"duration_in_millis": $body.snapshots.1.duration_in_millis/] + +A subsequent request for the remaining snapshots can then be made using the `next` value from the previous response as `after` parameter. + +[source,console] +---- +GET /_snapshot/my_repository/snapshot*?size=2&sort=name&after=c25hcHNob3RfMixteV9yZXBvc2l0b3J5LHNuYXBzaG90XzI= +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "snapshots": [ + { + "snapshot": "snapshot_3", + "uuid": "dRctdKb54xw67gvLCxSket", + "repository": "my_repository", + "version_id": , + "version": , + "indices": [], + "data_streams": [], + "feature_states": [], + "include_global_state": true, + "state": "SUCCESS", + "start_time": "2020-07-06T21:55:18.129Z", + "start_time_in_millis": 1593093628850, + "end_time": "2020-07-06T21:55:18.129Z", + "end_time_in_millis": 1593094752018, + "duration_in_millis": 0, + "failures": [], + "shards": { + "total": 0, + "failed": 0, + "successful": 0 + } + } + ], + "total": 3, + "remaining": 0 +} +---- +// TESTRESPONSE[s/"uuid": "dRctdKb54xw67gvLCxSket"/"uuid": $body.snapshots.0.uuid/] +// TESTRESPONSE[s/"version_id": /"version_id": $body.snapshots.0.version_id/] +// TESTRESPONSE[s/"version": /"version": $body.snapshots.0.version/] +// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.snapshots.0.start_time/] +// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.snapshots.0.start_time_in_millis/] +// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.snapshots.0.end_time/] +// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.snapshots.0.end_time_in_millis/] +// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.snapshots.0.duration_in_millis/] + +Alternatively, the same result could be retrieved by using an offset value of `2` to skip the two snapshot already seen. + +[source,console] +---- +GET /_snapshot/my_repository/snapshot*?size=2&sort=name&offset=2 +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "snapshots": [ { + "snapshot": "snapshot_3", + "uuid": "dRctdKb54xw67gvLCxSket", "repository": "my_repository", - "snapshots": [ - { - "snapshot": "snapshot_2", - "uuid": "vdRctLCxSketdKb54xw67g", - "version_id": , - "version": , - "indices": [], - "data_streams": [], - "feature_states": [], - "include_global_state": true, - "state": "SUCCESS", - "start_time": "2020-07-06T21:55:18.129Z", - "start_time_in_millis": 1593093628850, - "end_time": "2020-07-06T21:55:18.129Z", - "end_time_in_millis": 1593094752018, - "duration_in_millis": 0, - "failures": [], - "shards": { - "total": 0, - "failed": 0, - "successful": 0 - } - } - ] + "version_id": , + "version": , + "indices": [], + "data_streams": [], + "feature_states": [], + "include_global_state": true, + "state": "SUCCESS", + "start_time": "2020-07-06T21:55:18.129Z", + "start_time_in_millis": 1593093628850, + "end_time": "2020-07-06T21:55:18.129Z", + "end_time_in_millis": 1593094752018, + "duration_in_millis": 0, + "failures": [], + "shards": { + "total": 0, + "failed": 0, + "successful": 0 + } } - ] + ], + "total": 3, + "remaining": 0 } ---- -// TESTRESPONSE[s/"uuid": "vdRctLCxSketdKb54xw67g"/"uuid": $body.responses.0.snapshots.0.uuid/] -// TESTRESPONSE[s/"version_id": /"version_id": $body.responses.0.snapshots.0.version_id/] -// TESTRESPONSE[s/"version": /"version": $body.responses.0.snapshots.0.version/] -// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.responses.0.snapshots.0.start_time/] -// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.responses.0.snapshots.0.start_time_in_millis/] -// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.responses.0.snapshots.0.end_time/] -// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.responses.0.snapshots.0.end_time_in_millis/] -// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.responses.0.snapshots.0.duration_in_millis/] +// TESTRESPONSE[s/"uuid": "dRctdKb54xw67gvLCxSket"/"uuid": $body.snapshots.0.uuid/] +// TESTRESPONSE[s/"version_id": /"version_id": $body.snapshots.0.version_id/] +// TESTRESPONSE[s/"version": /"version": $body.snapshots.0.version/] +// TESTRESPONSE[s/"start_time": "2020-07-06T21:55:18.129Z"/"start_time": $body.snapshots.0.start_time/] +// TESTRESPONSE[s/"start_time_in_millis": 1593093628850/"start_time_in_millis": $body.snapshots.0.start_time_in_millis/] +// TESTRESPONSE[s/"end_time": "2020-07-06T21:55:18.129Z"/"end_time": $body.snapshots.0.end_time/] +// TESTRESPONSE[s/"end_time_in_millis": 1593094752018/"end_time_in_millis": $body.snapshots.0.end_time_in_millis/] +// TESTRESPONSE[s/"duration_in_millis": 0/"duration_in_millis": $body.snapshots.0.duration_in_millis/] \ No newline at end of file diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc index fdf56d9f57d8a..ccbfa328e557e 100644 --- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc @@ -176,11 +176,8 @@ in snapshots. Data files are not compressed. Defaults to `true`. `max_number_of_snapshots`:: (Optional, integer) -Maximum number of snapshots the repository can contain. Defaults to `500`. -+ -WARNING: We do not recommend increasing `max_number_of_snapshots`. Larger -snapshot repositories may degrade master node performance and cause stability -issues. Instead, delete older snapshots or use multiple repositories. +Maximum number of snapshots the repository can contain. +Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. `max_restore_bytes_per_sec`:: (Optional, <>) diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc index ad8be3790e4f5..6cdf65ba54e7e 100644 --- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc +++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc @@ -9,7 +9,7 @@ For more information, see <>. [discrete] [[snapshot-restore-repo-apis]] === Snapshot repository management APIs -* <> +* <> * <> * <> * <> diff --git a/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc b/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc index 54ebdd56bd633..3e47082b6115c 100644 --- a/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc +++ b/docs/reference/snapshot-restore/monitor-snapshot-restore.asciidoc @@ -31,6 +31,8 @@ PUT /_snapshot/my_fs_backup } PUT /_snapshot/my_backup/snapshot_1?wait_for_completion=true + +PUT /_snapshot/my_backup/some_other_snapshot?wait_for_completion=true ----------------------------------- // TESTSETUP diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index db0c6172ccee4..3020cca60e357 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -29,7 +29,7 @@ POST /_snapshot/my_backup/snapshot_1/_restore By default, all data streams and indices in the snapshot are restored, but the cluster state is *not* restored. Use the `indices` parameter to restore only specific data streams or indices. This parameter -supports <>. To include the global cluster state, set +supports <>. To include the global cluster state, set `include_global_state` to `true` in the restore request body. Because all indices in the snapshot are restored by default, all system indices will be restored @@ -101,7 +101,7 @@ persistent settings, non-legacy index templates, ingest pipelines and the corresponding items from the snapshot. The restore operation must be performed on a functioning cluster. However, an -existing index can be only restored if it's <> and +existing index can only be restored if it's <> and has the same number of shards as the index in the snapshot. The restore operation automatically opens restored indices if they were closed and creates new indices if they didn't exist in the cluster. diff --git a/docs/reference/snapshot-restore/take-snapshot.asciidoc b/docs/reference/snapshot-restore/take-snapshot.asciidoc index 8a14a9ed28c5e..0bc7c2379c5d1 100644 --- a/docs/reference/snapshot-restore/take-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/take-snapshot.asciidoc @@ -54,7 +54,7 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true // TEST[skip:cannot complete subsequent snapshot] Use the `indices` parameter to list the data streams and indices that should be included in the snapshot. This parameter supports -<>, although the options that control the behavior of multi-index syntax +<>, although the options that control the behavior of multi-index syntax must be supplied in the body of the request, rather than as request parameters. Data stream backups include the stream's backing indices and metadata, such as diff --git a/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc new file mode 100644 index 0000000000000..60a17cebd433f --- /dev/null +++ b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[testenv="basic"] +[[clear-sql-cursor-api]] +=== Clear SQL cursor API +++++ +Clear SQL cursor +++++ + +Clears an <>. + +//// +[source,console] +---- +POST _sql +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +---- +// TEST[setup:library] +//// + +[source,console] +---- +POST _sql/close +{ + "cursor": "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=" +} +---- +// TEST[continued] +// TEST[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f\/\/\/w8=/$body.cursor/] + +[[clear-sql-cursor-api-request]] +==== {api-request-title} + +`POST _sql/close` + +[[clear-sql-cursor-api-limitations]] +===== Limitations + +See <>. + +[role="child_attributes"] +[[clear-sql-cursor-api-request-body]] +==== {api-request-body-title} + +`cursor`:: +(Required, string) Cursor to clear. diff --git a/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc new file mode 100644 index 0000000000000..59293a9f3bbe0 --- /dev/null +++ b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc @@ -0,0 +1,41 @@ +[role="xpack"] +[testenv="basic"] +[[delete-async-sql-search-api]] +=== Delete async SQL search API +++++ +Delete async SQL search +++++ + +Deletes an <> or a <>. If the search is still running, the API cancels it. + +[source,console] +---- +DELETE _sql/async/delete/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM= +---- +// TEST[skip: no access to search ID] + +[[delete-async-sql-search-api-request]] +==== {api-request-title} + +`DELETE _sql/async/delete/` + +[[delete-async-sql-search-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, only the following users can +use this API to delete a search: + +** Users with the `cancel_task` <> +** The user who first submitted the search + +[[delete-async-sql-search-api-limitations]] +===== Limitations + +See <>. + +[[delete-async-sql-search-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Identifier for the search. diff --git a/docs/reference/sql/apis/get-async-sql-search-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc new file mode 100644 index 0000000000000..f9a517258b5a2 --- /dev/null +++ b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc @@ -0,0 +1,64 @@ +[role="xpack"] +[testenv="basic"] +[[get-async-sql-search-api]] +=== Get async SQL search API +++++ +Get async SQL search +++++ + +Returns results for an <> or a +<>. + +[source,console] +---- +GET _sql/async/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=?format=json +---- +// TEST[skip: no access to search ID] + +[[get-async-sql-search-api-request]] +==== {api-request-title} + +`GET _sql/async/` + +[[get-async-sql-search-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, only the user who first submitted +the SQL search can retrieve the search using this API. + +[[get-async-sql-search-api-limitations]] +===== Limitations + +See <>. + +[[get-async-sql-search-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Identifier for the search. + +[[get-async-sql-search-api-query-params]] +==== {api-query-parms-title} + +`delimiter`:: +(Optional, string) Separator for CSV results. Defaults to `,`. The API only +supports this parameter for CSV responses. + +`format`:: +(Required, string) Format for the response. You must specify a format using this +parameter or the `Accept` HTTP header. If you specify both, the API uses this +parameter. For valid values, see <>. + +`keep_alive`:: +(Optional, <>) Retention period for the search and its +results. Defaults to the `keep_alive` period for the original SQL search. + +`wait_for_completion_timeout`:: +(Optional, <>) Period to wait for complete results. +Defaults to no timeout, meaning the request waits for complete search results. + +[[get-async-sql-search-api-response-body]] +==== {api-response-body-title} + +The get async SQL search API returns the same response body as the +<>. diff --git a/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc new file mode 100644 index 0000000000000..7d4d3b3086927 --- /dev/null +++ b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[testenv="basic"] +[[get-async-sql-search-status-api]] +=== Get async SQL search status API +++++ +Get async SQL search status +++++ + +Returns the current status of an <> or a +<>. + +[source,console] +---- +GET _sql/async/status/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=?format=json +---- +// TEST[skip: no access to search ID] + +[[get-async-sql-search-status-api-request]] +==== {api-request-title} + +`GET _sql/async/status/` + +[[get-async-sql-search-status-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `monitor` +<> to use this API. + +[[get-async-sql-search-status-api-limitations]] +===== Limitations + +See <>. + +[[get-async-sql-search-status-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Identifier for the search. + +[role="child_attributes"] +[[get-async-sql-search-status-api-response-body]] +==== {api-response-body-title} + +`id`:: +(string) Identifier for the search. + +`is_running`:: +(Boolean) If `true`, the search is still running. If `false`, the +search has finished. + +`is_partial`:: +(Boolean) If `true`, the response does not contain complete search results. If +`is_partial` is `true` and `is_running` is `true`, the search is still running. +If `is_partial` is `true` but `is_running` is `false`, the results are partial +due to a failure or timeout. + +`start_time_in_millis`:: +(integer) Timestamp, in milliseconds since the Unix epoch, when the search +started. The API only returns this property for running searches. + +`expiration_time_in_millis`:: +(integer) Timestamp, in milliseconds since the Unix epoch, when {es} will delete +the search and its results, even if the search is still running. + +`completion_status`:: +(integer) HTTP status code for the search. The API only returns this property +for completed searches. diff --git a/docs/reference/sql/apis/sql-apis.asciidoc b/docs/reference/sql/apis/sql-apis.asciidoc new file mode 100644 index 0000000000000..d4b68052ed523 --- /dev/null +++ b/docs/reference/sql/apis/sql-apis.asciidoc @@ -0,0 +1,26 @@ +[role="xpack"] +[testenv="basic"] +[[sql-apis]] +== SQL APIs + +{es}'s SQL APIs let you run SQL queries on {es} indices and data streams. +For an overview of {es}'s SQL features and related tutorials, see <>. + +* <> +* <> +* <> +* <> +* <> +* <> + +include::clear-sql-cursor-api.asciidoc[] + +include::delete-async-sql-search-api.asciidoc[] + +include::get-async-sql-search-api.asciidoc[] + +include::get-async-sql-search-status-api.asciidoc[] + +include::sql-search-api.asciidoc[] + +include::sql-translate-api.asciidoc[] diff --git a/docs/reference/sql/apis/sql-search-api.asciidoc b/docs/reference/sql/apis/sql-search-api.asciidoc new file mode 100644 index 0000000000000..7d82cb563318f --- /dev/null +++ b/docs/reference/sql/apis/sql-search-api.asciidoc @@ -0,0 +1,180 @@ +[role="xpack"] +[testenv="basic"] +[[sql-search-api]] +=== SQL search API +++++ +SQL search +++++ + +Returns results for an <>. + +[source,console] +---- +POST _sql?format=txt +{ + "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5" +} +---- +// TEST[setup:library] + +[[sql-search-api-request]] +==== {api-request-title} + +`GET _sql` + +`POST _sql` + +[[sql-search-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the data stream, index, +or alias you search. + +[[sql-search-api-limitations]] +===== Limitations + +See <>. + +[[search-api-query-params]] +==== {api-query-parms-title} + +`delimiter`:: +(Optional, string) Separator for CSV results. Defaults to `,`. The API only +supports this parameter for CSV responses. + +`format`:: +(Optional, string) Format for the response. For valid values, see +<>. ++ +You can also specify a format using the `Accept` HTTP header. If you specify +both this parameter and the `Accept` HTTP header, this parameter takes +precedence. + +[role="child_attributes"] +[[sql-search-api-request-body]] +==== {api-request-body-title} + +`columnar`:: +(Optional, Boolean) If `true`, returns results in a columnar format. Defaults to +`false`. The API only supports this parameter for CBOR, JSON, SMILE, and YAML +responses. See <>. + +`cursor`:: +(Optional, string) <> used to retrieve a set of paginated +results. If you specify a `cursor`, the API only uses the `columnar` and +`time_zone` request body parameters. It ignores other request body parameters. + +[[sql-search-api-fetch-size]] +`fetch_size`:: +(Optional, integer) Maximum number of rows to return in the response. Defaults +to `1000`. + +[[sql-search-api-field-multi-value-leniency]] +`field_multi_value_leniency`:: +(Optional, Boolean) If `false`, the API returns an error for fields containing +<>. If `true`, the API returns the first value from the +array with no guarantee of consistent results. Defaults to `false`. + +`filter`:: +(Optional, object) <> used to filter documents for the SQL +search. See <>. + +`index_include_frozen`:: +(Optional, Boolean) If `true`, the search can run on frozen indices. Defaults to +`false`. + +`keep_alive`:: +(Optional, <>) Retention period for an +<> or <>. Defaults +to `5d` (five days). + +`keep_on_completion`:: +(Optional, Boolean) If `true`, {es} <> if you also specify the `wait_for_completion_timeout` parameter. If +`false`, {es} only stores <> that don't finish before +the `wait_for_completion_timeout`. Defaults to `false`. + +`page_timeout`:: +(Optional, <>) Timeout before a +<> fails. Defaults to `45s` (45 seconds). + +`params`:: +(Optional, array) Values for parameters in the `query`. For syntax, see +<>. + +`query`:: +(Required, object) SQL query to run. For syntax, see <>. + +`request_timeout`:: +(Optional, <>) Timeout before the request fails. Defaults +to `90s` (90 seconds). + +include::{es-repo-dir}/search/search.asciidoc[tag=runtime-mappings-def] + +[[sql-search-api-time-zone]] +`time_zone`:: +(Optional, string) ISO-8601 time zone ID for the search. Several +<> use this time zone. Defaults +to `Z` (UTC). + +`wait_for_completion_timeout`:: +(Optional, <>) Period to wait for complete results. +Defaults to no timeout, meaning the request waits for complete search results. +If the search doesn’t finish within this period, the search becomes +<>. ++ +To <>, you must specify this +parameter and the `keep_on_completion` parameter. + +[role="child_attributes"] +[[sql-search-api-response-body]] +==== {api-response-body-title} + +The SQL search API supports <>. Most +response formats use a tabular layout. JSON responses contain the following +properties: + +`id`:: +(string) Identifier for the search. This value is only returned for +<> and <>. For +CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP +header. + +`is_running`:: +(Boolean) If `true`, the search is still running. If `false`, the search has +finished. This value is only returned for <> and +<>. For CSV, TSV, and TXT +responses, this value is returned in the `Async-partial` HTTP header. + +`is_partial`:: +(Boolean) If `true`, the response does not contain complete search results. If +`is_partial` is `true` and `is_running` is `true`, the search is still running. +If `is_partial` is `true` but `is_running` is `false`, the results are partial +due to a failure or timeout. ++ +This value is only returned for <> and +<>. For CSV, TSV, and TXT +responses, this value is returned in the `Async-partial` HTTP header. + +`rows`:: +(array of arrays) +Values for the search results. + +`columns`:: +(array of objects) +Column headings for the search results. Each object is a column. ++ +.Properties of `columns` objects +[%collapsible%open] +==== +`name`:: +(string) Name of the column. + +`type`:: +(string) Data type for the column. +==== + +`cursor`:: +(string) <> for the next set of paginated results. For +CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. diff --git a/docs/reference/sql/apis/sql-translate-api.asciidoc b/docs/reference/sql/apis/sql-translate-api.asciidoc new file mode 100644 index 0000000000000..82ac8ef74edb9 --- /dev/null +++ b/docs/reference/sql/apis/sql-translate-api.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[testenv="basic"] +[[sql-translate-api]] +=== SQL translate API +++++ +SQL translate +++++ + +Translates an <> into a <> +request containing <>. See <>. + +[source,console] +---- +POST _sql/translate +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 10 +} +---- +// TEST[setup:library] + +[[sql-translate-api-request]] +==== {api-request-title} + +`GET _sql/translate` + +`POST _sql/translate` + +[[sql-translate-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the data stream, index, +or alias you search. + +[[sql-translate-api-limitations]] +===== Limitations + +See <>. + +[role="child_attributes"] +[[sql-translate-api-request-body]] +==== {api-request-body-title} + +The SQL translate API accepts the same request body parameters as the +<>, excluding `cursor`. + +[role="child_attributes"] +[[sql-translate-api-response-body]] +==== {api-response-body-title} + +The SQL translate API returns the same response body as the +<>. diff --git a/docs/reference/sql/endpoints/odbc/configuration.asciidoc b/docs/reference/sql/endpoints/odbc/configuration.asciidoc index 9a4e3dac6a0f9..c6b0bf65e8787 100644 --- a/docs/reference/sql/endpoints/odbc/configuration.asciidoc +++ b/docs/reference/sql/endpoints/odbc/configuration.asciidoc @@ -220,8 +220,9 @@ timeout. * Max page size (rows) + The maximum number of rows that {es-sql} server should send the driver for one -page. This corresponds to {es-sql}'s request parameter `fetch_size` (see -<>). The value 0 means server default. +page. This corresponds to the SQL search API's +<> parameter. A `0` value indicates a +server default. + * Max page length (MB) + @@ -314,8 +315,9 @@ multi-value field is queried. In case this is set and the server encounters such a field, it will pick a value in the set - without any guarantees of what that will be, but typically the first in natural ascending order - and return it as the value for the column. If not set, the server will return an error. -This corresponds to {es-sql}'s request parameter `field_multi_value_leniency` -(see <>). +This corresponds to the SQL search API's +<> +parameter. + * Include frozen indices + diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index 7ca7e70774b72..1978583f4cfae 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -10,14 +10,13 @@ * <> * <> * <> -* <> +* <> [[sql-rest-overview]] === Overview -The SQL REST API accepts SQL in a JSON document, executes it, -and returns the results. -For example: +The <> accepts SQL in a JSON document, executes +it, and returns the results. For example: [source,console] -------------------------------------------------- @@ -51,6 +50,7 @@ If you are using {kibana-ref}/console-kibana.html[Kibana Console] (which is highly recommended), take advantage of the triple quotes `"""` when creating the query. This not only automatically escapes double quotes (`"`) inside the query string but also support multi-line as shown below: + image:images/sql/rest/console-triple-quotes.png[] ==== @@ -343,7 +343,7 @@ SQL may keep state in Elasticsearch to support the cursor. Unlike scroll, receiving the last page is enough to guarantee that the Elasticsearch state is cleared. -To clear the state earlier, you can use the clear cursor command: +To clear the state earlier, use the <>: [source,console] -------------------------------------------------- @@ -561,69 +561,175 @@ Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z|TUESDAY ---- // TESTRESPONSE[non_json] -[[sql-rest-fields]] -=== Supported REST parameters +[[sql-async]] +=== Run an async SQL search + +By default, SQL searches are synchronous. They wait for complete results before +returning a response. However, results can take longer for searches across large +data sets or <>. -In addition to the `query` and `fetch_size`, a request a number of user-defined fields for specifying -the request time-outs or localization information (such as timezone). +To avoid long waits, run an async SQL search. Set `wait_for_completion_timeout` +to a duration you’d like to wait for synchronous results. + +[source,console] +---- +POST _sql?format=json +{ + "wait_for_completion_timeout": "2s", + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[setup:library] +// TEST[s/"wait_for_completion_timeout": "2s"/"wait_for_completion_timeout": "0"/] -The table below lists the supported parameters: +If the search doesn’t finish within this period, the search becomes async. The +API returns: -[cols=">. + +[source,console] +---- +GET _sql/async/status/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU= +---- +// TEST[skip: no access to search ID] + +If `is_running` and `is_partial` are `false`, the async search has finished with +complete results. + +[source,console-result] +---- +{ + "id": "FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", + "is_running": false, + "is_partial": false, + "expiration_time_in_millis": 1611690295000, + "completion_status": 200 +} +---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TESTRESPONSE[s/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=/$body.id/] +// TESTRESPONSE[s/"expiration_time_in_millis": 1611690295000/"expiration_time_in_millis": $body.expiration_time_in_millis/] + +To get the results, use the search ID with the <>. If the search is still running, specify how long you’d +like to wait using `wait_for_completion_timeout`. You can also specify the +response `format`. -|query -|Mandatory -|SQL query to execute +[source,console] +---- +GET _sql/async/FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=?wait_for_completion_timeout=2s&format=json +---- +// TEST[skip: no access to search ID] -|fetch_size -|1000 -|The maximum number of rows (or entries) to return in one response +[discrete] +[[sql-async-retention]] +==== Change the search retention period -|filter -|none -|Optional {es} Query DSL for additional <>. +By default, {es} stores async SQL searches for five days. After this period, +{es} deletes the search and its results, even if the search is still running. To +change this retention period, use the `keep_alive` parameter. -|request_timeout -|90s -|The timeout before the request fails. +[source,console] +---- +POST _sql?format=json +{ + "keep_alive": "2d", + "wait_for_completion_timeout": "2s", + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[setup:library] -|page_timeout -|45s -|The timeout before a pagination request fails. +You can use the get async SQL search API's `keep_alive` parameter to later +change the retention period. The new period starts after the request runs. -|[[sql-rest-fields-timezone]]time_zone -|`Z` (or `UTC`) -|Time-zone in ISO 8601 used for executing the query on the server. -More information available https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html[here]. +[source,console] +---- +GET _sql/async/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=?keep_alive=5d&wait_for_completion_timeout=2s&format=json +---- +// TEST[skip: no access to search ID] -|columnar -|false -|Return the results in a columnar fashion, rather than row-based fashion. Valid for `json`, `yaml`, `cbor` and `smile`. +Use the <> to delete an +async search before the `keep_alive` period ends. If the search is still +running, {es} cancels it. -|field_multi_value_leniency -|false -|Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). +[source,console] +---- +DELETE _sql/async/delete/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI= +---- +// TEST[skip: no access to search ID] -|index_include_frozen -|false -|Whether to include <> in the query execution or not (default). +[discrete] +[[sql-store-searches]] +==== Store synchronous SQL searches -|params -|none -|Optional list of parameters to replace question mark (`?`) placeholders inside the query. +By default, {es} only stores async SQL searches. To save a synchronous search, +specify `wait_for_completion_timeout` and set `keep_on_completion` to `true`. -|runtime_mappings -|none -|Defines one or more <> in the search -request. These fields take precedence over mapped fields with the same name. +[source,console] +---- +POST _sql?format=json +{ + "keep_on_completion": true, + "wait_for_completion_timeout": "2s", + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +---- +// TEST[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TEST[setup:library] -|=== +If `is_partial` and `is_running` are `false`, the search was synchronous and +returned complete results. -Do note that most parameters (outside the timeout and `columnar` ones) make sense only during the initial query - any follow-up pagination request only requires the `cursor` parameter as explained in the <> chapter. -That's because the query has already been executed and the calls are simply about returning the found results - thus the parameters are simply ignored. +[source,console-result] +---- +{ + "id": "Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=", + "is_partial": false, + "is_running": false, + "rows": ..., + "columns": ..., + "cursor": ... +} +---- +// TESTRESPONSE[skip:waiting on https://github.com/elastic/elasticsearch/issues/75069] +// TESTRESPONSE[s/Fnc5UllQdUVWU0NxRFNMbWxNYXplaFEaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQTo0NzA=/$body.id/] +// TESTRESPONSE[s/"rows": \.\.\./"rows": $body.rows/] +// TESTRESPONSE[s/"columns": \.\.\./"columns": $body.columns/] +// TESTRESPONSE[s/"cursor": \.\.\./"cursor": $body.cursor/] + +You can get the same results later using the search ID with the +<>. + +Saved synchronous searches are still subject to the `keep_alive` retention +period. When this period ends, {es} deletes the search results. You can also +delete saved searches using the <>. diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index 41f0d98805380..bc4dbd7ade5f0 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -55,5 +55,5 @@ In this case, SQL will use the <> API. If the result contained an aggregation then SQL would use the normal <> API. -The request body accepts all of the <> that -the <> accepts except `cursor`. +The request body accepts the same <> as +the <>, excluding `cursor`. diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc index c90ec1c6f8b3e..1e44cfbee981b 100644 --- a/docs/reference/sql/functions/aggs.asciidoc +++ b/docs/reference/sql/functions/aggs.asciidoc @@ -21,7 +21,8 @@ AVG(numeric_field) <1> *Input*: -<1> numeric field +<1> numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -48,16 +49,14 @@ COUNT(expression) <1> *Input*: -<1> a field name, wildcard (`*`) or any numeric value +<1> a field name, wildcard (`*`) or any numeric value. For `COUNT(*)` or +`COUNT()`, all values are considered, including `null` or missing +ones. For `COUNT()`, `null` values are not considered. *Output*: numeric value *Description*: Returns the total number (count) of input values. -In case of `COUNT(*)` or `COUNT()`, _all_ values are considered (including `null` or missing ones). - -In case of `COUNT()` `null` values are not considered. - ["source","sql",subs="attributes,macros"] -------------------------------------------------- @@ -76,7 +75,8 @@ COUNT(ALL field_name) <1> *Input*: -<1> a field name +<1> a field name. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: numeric value @@ -105,7 +105,8 @@ COUNT(DISTINCT field_name) <1> <1> a field name -*Output*: numeric value +*Output*: numeric value. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Description*: Returns the total number of _distinct non-null_ values in input values. @@ -137,7 +138,7 @@ FIRST( *Output*: same type as the input -*Description*: Returns the first **non-NULL** value (if such exists) of the `field_name` input column sorted by +*Description*: Returns the first non-`null` value (if such exists) of the `field_name` input column sorted by the `ordering_field_name` column. If `ordering_field_name` is not provided, only the `field_name` column is used for the sorting. E.g.: @@ -237,7 +238,7 @@ LAST( *Output*: same type as the input -*Description*: It's the inverse of <>. Returns the last **non-NULL** value (if such exists) of the +*Description*: It's the inverse of <>. Returns the last non-`null` value (if such exists) of the `field_name` input column sorted descending by the `ordering_field_name` column. If `ordering_field_name` is not provided, only the `field_name` column is used for the sorting. E.g.: @@ -330,7 +331,8 @@ MAX(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: same type as the input @@ -361,7 +363,8 @@ MIN(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: same type as the input @@ -387,7 +390,8 @@ SUM(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `bigint` for integer input, `double` for floating points @@ -418,7 +422,8 @@ KURTOSIS(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -452,7 +457,8 @@ MAD(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -485,8 +491,10 @@ PERCENTILE( *Input*: -<1> a numeric field -<2> a numeric expression (must be a constant and not based on a field) +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. +<2> a numeric expression (must be a constant and not based on a field). If +`null`, the function returns `null`. <3> optional string literal for the <>. Possible values: `tdigest` or `hdr`. Defaults to `tdigest`. <4> optional numeric literal that configures the <>. Configures `compression` for `tdigest` or `number_of_significant_value_digits` for `hdr`. The default is the same as that of the backing algorithm. @@ -527,8 +535,10 @@ PERCENTILE_RANK( *Input*: -<1> a numeric field -<2> a numeric expression (must be a constant and not based on a field) +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. +<2> a numeric expression (must be a constant and not based on a field). If +`null`, the function returns `null`. <3> optional string literal for the <>. Possible values: `tdigest` or `hdr`. Defaults to `tdigest`. <4> optional numeric literal that configures the <>. Configures `compression` for `tdigest` or `number_of_significant_value_digits` for `hdr`. The default is the same as that of the backing algorithm. @@ -566,7 +576,8 @@ SKEWNESS(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -600,7 +611,8 @@ STDDEV_POP(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -629,7 +641,8 @@ STDDEV_SAMP(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -658,7 +671,8 @@ SUM_OF_SQUARES(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -687,7 +701,8 @@ VAR_POP(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value @@ -717,7 +732,8 @@ VAR_SAMP(field_name) <1> *Input*: -<1> a numeric field +<1> a numeric field. If this field contains only `null` values, the function +returns `null`. Otherwise, the function ignores `null` values in this field. *Output*: `double` numeric value diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index f0e1e59b1e665..c43d926514fe7 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -256,15 +256,17 @@ DATE_ADD( *Input*: -<1> string expression denoting the date/time unit to add to the date/datetime -<2> integer expression denoting how many times the above unit should be added to/from the date/datetime, -if a negative value is used it results to a subtraction from the date/datetime -<3> date/datetime expression +<1> string expression denoting the date/time unit to add to the date/datetime. +If `null`, the function returns `null`. +<2> integer expression denoting how many times the above unit should be added +to/from the date/datetime, if a negative value is used it results to a +subtraction from the date/datetime. If `null`, the function returns `null`. +<3> date/datetime expression. If `null`, the function returns `null`. *Output*: datetime *Description*: Add the given number of date/time units to a date/datetime. If the number of units is negative then it's subtracted from -the date/datetime. If any of the three arguments is `null` a `null` is returned. +the date/datetime. [WARNING] If the second argument is a long there is possibility of truncation since an integer value will be extracted and @@ -332,15 +334,16 @@ DATE_DIFF( *Input*: -<1> string expression denoting the date/time unit difference between the following two date/datetime expressions -<2> start date/datetime expression -<3> end date/datetime expression +<1> string expression denoting the date/time unit difference between the +following two date/datetime expressions. If `null`, the function returns `null`. +<2> start date/datetime expression. If `null`, the function returns `null`. +<3> end date/datetime expression. If `null`, the function returns `null`. *Output*: integer *Description*: Subtract the second argument from the third argument and return their difference in multiples of the unit specified in the first argument. If the second argument (start) is greater than the third argument (end), -then negative values are returned. If any of the three arguments is `null`, a `null` is returned. +then negative values are returned. [cols="^,^"] |=== @@ -417,15 +420,15 @@ DATE_PARSE( *Input*: -<1> date expression as a string -<2> parsing pattern +<1> date expression as a string. If `null` or an empty string, the function +returns `null`. +<2> parsing pattern. If `null` or an empty string, the function returns `null`. *Output*: date *Description*: Returns a date by parsing the 1st argument using the format specified in the 2nd argument. The parsing format pattern used is the one from https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/time/format/DateTimeFormatter.html[`java.time.format.DateTimeFormatter`]. -If any of the two arguments is `null` or an empty string, then `null` is returned. [NOTE] If the parsing pattern does not contain all valid date units (e.g. 'HH:mm:ss', 'dd-MM HH:mm:ss', etc.) an error is returned @@ -439,7 +442,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dateParse1] [NOTE] ==== The resulting `date` will have the time zone specified by the user through the -<>/<> REST/driver parameters +<>/<> REST/driver parameters with no conversion applied. [source, sql] @@ -461,15 +464,14 @@ DATETIME_FORMAT( *Input*: -<1> date/datetime/time expression -<2> format pattern +<1> date/datetime/time expression. If `null`, the function returns `null`. +<2> format pattern. If `null` or an empty string, the function returns `null`. *Output*: string *Description*: Returns the date/datetime/time as a string using the format specified in the 2nd argument. The formatting pattern used is the one from https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/time/format/DateTimeFormatter.html[`java.time.format.DateTimeFormatter`]. -If any of the two arguments is `null` or the pattern is an empty string `null` is returned. [NOTE] If the 1st argument is of type `time`, then pattern specified by the 2nd argument cannot contain date related units @@ -503,15 +505,15 @@ DATETIME_PARSE( *Input*: -<1> datetime expression as a string -<2> parsing pattern +<1> datetime expression as a string. If `null` or an empty string, the function +returns `null`. +<2> parsing pattern. If `null` or an empty string, the function returns `null`. *Output*: datetime *Description*: Returns a datetime by parsing the 1st argument using the format specified in the 2nd argument. The parsing format pattern used is the one from https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/time/format/DateTimeFormatter.html[`java.time.format.DateTimeFormatter`]. -If any of the two arguments is `null` or an empty string `null` is returned. [NOTE] @@ -531,7 +533,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dateTimeParse2] [NOTE] ==== If timezone is not specified in the datetime string expression and the parsing pattern, the resulting `datetime` will have the -time zone specified by the user through the <>/<> REST/driver parameters +time zone specified by the user through the <>/<> REST/driver parameters with no conversion applied. [source, sql] @@ -553,15 +555,15 @@ TIME_PARSE( *Input*: -<1> time expression as a string -<2> parsing pattern +<1> time expression as a string. If `null` or an empty string, the function +returns `null`. +<2> parsing pattern. If `null` or an empty string, the function returns `null`. *Output*: time *Description*: Returns a time by parsing the 1st argument using the format specified in the 2nd argument. The parsing format pattern used is the one from https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/time/format/DateTimeFormatter.html[`java.time.format.DateTimeFormatter`]. -If any of the two arguments is `null` or an empty string `null` is returned. [NOTE] If the parsing pattern contains only date units (e.g. 'dd/MM/yyyy') an error is returned @@ -581,7 +583,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[timeParse2] ==== If timezone is not specified in the time string expression and the parsing pattern, the resulting `time` will have the offset of the time zone specified by the user through the - <>/<> REST/driver + <>/<> REST/driver parameters at the Unix epoch date (`1970-01-01`) with no conversion applied. [source, sql] @@ -603,14 +605,15 @@ DATE_PART( *Input*: -<1> string expression denoting the unit to extract from the date/datetime -<2> date/datetime expression +<1> string expression denoting the unit to extract from the date/datetime. If +`null`, the function returns `null`. +<2> date/datetime expression. If `null`, the function returns `null`. *Output*: integer -*Description*: Extract the specified unit from a date/datetime. If any of the two arguments is `null` a `null` is returned. -It's similar to <> but with different names and aliases for the units and -provides more options (e.g.: `TZOFFSET`). +*Description*: Extract the specified unit from a date/datetime. It's similar to +<> but with different names and aliases for the +units and provides more options (e.g.: `TZOFFSET`). [cols="^,^"] |=== @@ -691,14 +694,17 @@ DATE_TRUNC( *Input*: -<1> string expression denoting the unit to which the date/datetime/interval should be truncated to -<2> date/datetime/interval expression +<1> string expression denoting the unit to which the date/datetime/interval +should be truncated to. If `null`, the function returns `null`. +<2> date/datetime/interval expression. If `null`, the function returns `null`. *Output*: datetime/interval -*Description*: Truncate the date/datetime/interval to the specified unit by setting all fields that are less significant than the specified -one to zero (or one, for day, day of week and month). If any of the two arguments is `null` a `null` is returned. -If the first argument is `week` and the second argument is of `interval` type, an error is thrown since the `interval` data type doesn't support a `week` time unit. +*Description*: Truncate the date/datetime/interval to the specified unit by +setting all fields that are less significant than the specified one to zero (or +one, for day, day of week and month). If the first argument is `week` and the +second argument is of `interval` type, an error is thrown since the `interval` +data type doesn't support a `week` time unit. [cols="^,^"] |=== @@ -778,16 +784,16 @@ FORMAT( *Input*: -<1> date/datetime/time expression -<2> format pattern +<1> date/datetime/time expression. If `null`, the function returns `null`. +<2> format pattern. If `null` or an empty string, the function returns `null`. *Output*: string *Description*: Returns the date/datetime/time as a string using the -https://docs.microsoft.com/en-us/sql/t-sql/functions/format-transact-sql#arguments[format] specified in the 2nd argument. The formatting -pattern used is the one from -https://docs.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings[Microsoft SQL Server Format Specification]. -If any of the two arguments is `null` or the pattern is an empty string `null` is returned. +https://docs.microsoft.com/en-us/sql/t-sql/functions/format-transact-sql#arguments[format] +specified in the 2nd argument. The formatting pattern used is the one from +https://docs.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings[Microsoft +SQL Server Format Specification]. [NOTE] If the 1st argument is of type `time`, then pattern specified by the 2nd argument cannot contain date related units @@ -829,15 +835,15 @@ TO_CHAR( *Input*: -<1> date/datetime/time expression -<2> format pattern +<1> date/datetime/time expression. If `null`, the function returns `null`. +<2> format pattern. If `null` or an empty string, the function returns `null`. *Output*: string -*Description*: Returns the date/datetime/time as a string using the format specified in the 2nd argument. The formatting -pattern conforms to -https://www.postgresql.org/docs/13/functions-formatting.html[PostgreSQL Template Patterns for Date/Time Formatting]. -If any of the two arguments is `null` or the pattern is an empty string `null` is returned. +*Description*: Returns the date/datetime/time as a string using the format +specified in the 2nd argument. The formatting pattern conforms to +https://www.postgresql.org/docs/13/functions-formatting.html[PostgreSQL Template +Patterns for Date/Time Formatting]. [NOTE] If the 1st argument is of type `time`, then the pattern specified by the 2nd argument cannot contain date related units @@ -875,7 +881,7 @@ DAY_OF_MONTH(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -897,7 +903,7 @@ DAY_OF_WEEK(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -919,7 +925,7 @@ DAY_OF_YEAR(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -941,7 +947,7 @@ DAY_NAME(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: string @@ -963,7 +969,7 @@ HOUR_OF_DAY(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -985,7 +991,7 @@ ISO_DAY_OF_WEEK(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1008,7 +1014,7 @@ ISO_WEEK_OF_YEAR(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1031,7 +1037,7 @@ MINUTE_OF_DAY(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1053,7 +1059,7 @@ MINUTE_OF_HOUR(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1075,7 +1081,7 @@ MONTH(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1097,7 +1103,7 @@ MONTH_NAME(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: string @@ -1149,7 +1155,7 @@ SECOND_OF_MINUTE(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1171,7 +1177,7 @@ QUARTER(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1223,7 +1229,7 @@ WEEK_OF_YEAR(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer @@ -1245,7 +1251,7 @@ YEAR(datetime_exp) <1> *Input*: -<1> date/datetime expression +<1> date/datetime expression. If `null`, the function returns `null`. *Output*: integer diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index fe1c2f28bdf9a..4f4b0f8022931 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -35,7 +35,7 @@ ST_AsWKT( *Input*: -<1> geometry +<1> geometry. If `null`, the function returns `null`. *Output*: string @@ -60,7 +60,8 @@ ST_WKTToSQL( *Input*: -<1> string WKT representation of geometry +<1> string WKT representation of geometry. If `null`, the function returns +`null`. *Output*: geometry @@ -86,7 +87,7 @@ ST_GeometryType( *Input*: -<1> geometry +<1> geometry. If `null`, the function returns `null`. *Output*: string @@ -110,7 +111,7 @@ ST_X( *Input*: -<1> geometry +<1> geometry. If `null`, the function returns `null`. *Output*: double @@ -134,7 +135,7 @@ ST_Y( *Input*: -<1> geometry +<1> geometry. If `null`, the function returns `null`. *Output*: double @@ -158,7 +159,7 @@ ST_Z( *Input*: -<1> geometry +<1> geometry. If `null`, the function returns `null`. *Output*: double @@ -183,8 +184,8 @@ ST_Distance( *Input*: -<1> source geometry -<2> target geometry +<1> source geometry. If `null`, the function returns `null`. +<2> target geometry. If `null`, the function returns `null`. *Output*: Double diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 1f5b67504838d..b31637f140c69 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -23,10 +23,15 @@ HISTOGRAM( *Input*: -<1> numeric expression (typically a field) -<2> numeric interval -<3> date/time expression (typically a field) -<4> date/time <> +<1> numeric expression (typically a field). If this field contains only `null` +values, the function returns `null`. Otherwise, the function ignores `null` +values in this field. +<2> numeric interval. If `null`, the function returns `null`. +<3> date/time expression (typically a field). If this field contains only `null` +values, the function returns `null`. Otherwise, the function ignores `null` +values in this field. +<4> date/time <>. If `null`, the +function returns `null`. *Output*: non-empty buckets or groups of the given expression divided according to the given interval @@ -38,7 +43,7 @@ bucket_key = Math.floor(value / interval) * interval ---- [NOTE] -The histogram in SQL does *NOT* return empty buckets for missing intervals as the traditional <> and <>. Such behavior does not fit conceptually in SQL which treats all missing values as `NULL`; as such the histogram places all missing values in the `NULL` group. +The histogram in SQL does *NOT* return empty buckets for missing intervals as the traditional <> and <>. Such behavior does not fit conceptually in SQL which treats all missing values as `null`; as such the histogram places all missing values in the `null` group. `Histogram` can be applied on either numeric fields: diff --git a/docs/reference/sql/functions/math.asciidoc b/docs/reference/sql/functions/math.asciidoc index 25b233d697e06..194d3f700c795 100644 --- a/docs/reference/sql/functions/math.asciidoc +++ b/docs/reference/sql/functions/math.asciidoc @@ -21,7 +21,7 @@ ABS(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: numeric @@ -43,7 +43,7 @@ CBRT(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -65,7 +65,7 @@ CEIL(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: integer or long numeric value @@ -107,7 +107,7 @@ EXP(numeric_exp) <1> *Input*: -<1> float numeric expression +<1> float numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -129,7 +129,7 @@ EXPM1(numeric_exp) <1> *Input*: -<1> float numeric expression +<1> float numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -151,7 +151,7 @@ FLOOR(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: integer or long numeric value @@ -173,7 +173,7 @@ LOG(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -195,7 +195,7 @@ LOG10(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -239,8 +239,8 @@ POWER( *Input*: -<1> numeric expression -<2> integer expression +<1> numeric expression. If `null`, the function returns `null`. +<2> integer expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -267,7 +267,7 @@ RANDOM(seed) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -290,8 +290,8 @@ ROUND( ---- *Input*: -<1> numeric expression -<2> integer expression; optional +<1> numeric expression. If `null`, the function returns `null`. +<2> integer expression; optional. If `null`, the function returns `null`. *Output*: numeric @@ -321,7 +321,7 @@ SIGN(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: [-1, 0, 1] @@ -344,7 +344,7 @@ SQRT(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -367,8 +367,8 @@ TRUNCATE( ---- *Input*: -<1> numeric expression -<2> integer expression; optional +<1> numeric expression. If `null`, the function returns `null`. +<2> integer expression; optional. If `null`, the function returns `null`. *Output*: numeric @@ -402,7 +402,7 @@ ACOS(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -424,7 +424,7 @@ ASIN(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -446,7 +446,7 @@ ATAN(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -470,8 +470,8 @@ ATAN2( *Input*: -<1> numeric expression -<2> numeric expression +<1> numeric expression. If `null`, the function returns `null`. +<2> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -493,7 +493,7 @@ COS(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -515,7 +515,7 @@ COSH(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -537,7 +537,7 @@ COT(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -559,7 +559,7 @@ DEGREES(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -582,7 +582,7 @@ RADIANS(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -605,7 +605,7 @@ SIN(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -627,7 +627,7 @@ SINH(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value @@ -649,7 +649,7 @@ TAN(numeric_exp) <1> *Input*: -<1> numeric expression +<1> numeric expression. If `null`, the function returns `null`. *Output*: double numeric value diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc index 2f4f44698a9fc..8ae46e776bf74 100644 --- a/docs/reference/sql/functions/type-conversion.asciidoc +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -16,7 +16,7 @@ CAST( AS data_type) <2> ---- -<1> Expression to cast +<1> Expression to cast. If `null`, the function returns `null`. <2> Target data type to cast to *Description*: Casts the result of the given expression to the target <>. @@ -55,7 +55,7 @@ CONVERT( data_type) <2> ---- -<1> Expression to convert +<1> Expression to convert. If `null`, the function returns `null`. <2> Target data type to convert to *Description*: Works exactly like <> with slightly different syntax. diff --git a/docs/reference/sql/getting-started.asciidoc b/docs/reference/sql/getting-started.asciidoc index 4f711f4fd73b7..2bb7d83614a73 100644 --- a/docs/reference/sql/getting-started.asciidoc +++ b/docs/reference/sql/getting-started.asciidoc @@ -17,7 +17,7 @@ PUT /library/_bulk?refresh {"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} -------------------------------------------------- -And now you can execute SQL using the <> right away: +And now you can execute SQL using the <>: [source,console] -------------------------------------------------- diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index d2ccf210f8681..9b7561f50987b 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[xpack-sql]] -= SQL access += SQL :sql-tests: {xes-repo-dir}/../../plugin/sql/qa/ :sql-specs: {sql-tests}server/src/main/resources/ diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index c275f7ed92bd9..6180f5b1c07ca 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -9,7 +9,7 @@ [discrete] ==== {es} multi-index -The {es} notation for enumerating, including or excluding <> +The {es} notation for enumerating, including or excluding <> is supported _as long_ as it is quoted or escaped as a table identifier. For example: @@ -92,7 +92,7 @@ By default, {es-sql} doesn't search <>. To search frozen indices, use one of the following features: dedicated configuration parameter:: -Set to `true` properties `index_include_frozen` in the <> or `index.include.frozen` in the drivers to include frozen indices. +Set to `true` properties `index_include_frozen` in the <> or `index.include.frozen` in the drivers to include frozen indices. dedicated keyword:: Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW` commands: diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 663ec46b6df70..660fc7cf66f93 100644 --- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -27,7 +27,7 @@ patterns. include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] ---- -Match multiple indices by using {es} <> +Match multiple indices by using {es} <> notation: [source, sql] diff --git a/docs/reference/tab-widgets/cpu-usage-widget.asciidoc b/docs/reference/tab-widgets/cpu-usage-widget.asciidoc new file mode 100644 index 0000000000000..a57d45790d518 --- /dev/null +++ b/docs/reference/tab-widgets/cpu-usage-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
+
+ + +
+
+++++ + +include::cpu-usage.asciidoc[tag=cloud] + +++++ +
+ +
+++++ diff --git a/docs/reference/tab-widgets/cpu-usage.asciidoc b/docs/reference/tab-widgets/cpu-usage.asciidoc new file mode 100644 index 0000000000000..1c4913cc0f6d4 --- /dev/null +++ b/docs/reference/tab-widgets/cpu-usage.asciidoc @@ -0,0 +1,30 @@ +// tag::cloud[] +From your deployment menu, click **Performance**. The page's **CPU Usage** chart +shows your deployment's CPU usage as a percentage. + +High CPU usage can also deplete your CPU credits. CPU credits let {ess} provide +smaller clusters with a performance boost when needed. The **CPU credits** +chart shows your remaining CPU credits, measured in seconds of CPU time. + +You can also use the <> to get the current CPU usage +for each node. + +// tag::cpu-usage-cat-nodes[] +[source,console] +---- +GET _cat/nodes?v=true&s=cpu:desc +---- + +The response's `cpu` column contains the current CPU usage as a percentage. The +`node` column contains the node's name. +// end::cpu-usage-cat-nodes[] + +// end::cloud[] + +// tag::self-managed[] + +Use the <> to get the current CPU usage for each node. + +include::cpu-usage.asciidoc[tag=cpu-usage-cat-nodes] + +// end::self-managed[] diff --git a/docs/reference/tab-widgets/data-tiers.asciidoc b/docs/reference/tab-widgets/data-tiers.asciidoc index 83038153abb2d..318acdeb8f4f6 100644 --- a/docs/reference/tab-widgets/data-tiers.asciidoc +++ b/docs/reference/tab-widgets/data-tiers.asciidoc @@ -23,6 +23,9 @@ a <>. [source,yaml] ---- +# Content tier +node.roles: [ data_content ] + # Hot tier node.roles: [ data_hot ] @@ -41,7 +44,7 @@ assign other nodes to more than one tier. [source,yaml] ---- -node.roles: [ data_hot, data_warm ] +node.roles: [ data_content, data_hot, data_warm ] ---- Assign your nodes any other roles needed for your cluster. For example, a small diff --git a/docs/reference/transform/examples.asciidoc b/docs/reference/transform/examples.asciidoc index c32be466f60d1..246f4be74af49 100644 --- a/docs/reference/transform/examples.asciidoc +++ b/docs/reference/transform/examples.asciidoc @@ -247,9 +247,9 @@ PUT _transform/suspicious_client_ips "filter": { "term": { "response" : "404"}} }, - "error503" : { - "filter": { - "term": { "response" : "503"}} + "error5xx" : { + "filter": { + "range": { "response" : { "gte": 500, "lt": 600}}} }, "timestamp.min": { "min": { "field": "timestamp" }}, "timestamp.max": { "max": { "field": "timestamp" }}, @@ -273,9 +273,10 @@ PUT _transform/suspicious_client_ips field to synchronize the source and destination indices. The worst case ingestion delay is 60 seconds. <3> The data is grouped by the `clientip` field. -<4> Filter aggregation that counts the occurrences of successful (`200`) -responses in the `response` field. The following two aggregations (`error404` -and `error503`) count the error responses by error codes. +<4> Filter aggregation that counts the occurrences of successful (`200`) +responses in the `response` field. The following two aggregations (`error404` +and `error5xx`) count the error responses by error codes, matching an exact +value or a range of response codes. <5> This `bucket_script` calculates the duration of the `clientip` access based on the results of the aggregation. diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index e7662cb51ca93..59157439da139 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -158,7 +158,9 @@ cluster and remove nodes from the old one. faster reindexing. .. Use the <> to pull documents from the - remote index into the new {version} index: +remote index into the new {version} index. ++ +include::{es-ref-dir}/docs/reindex.asciidoc[tag=remote-reindex-slicing] + -- [source,console] diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index ed19318388cca..a211486a47110 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.1.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=a9e356a21595348b6f04b024ed0b08ac8aea6b2ac37e6c0ef58e51549cd7b9cb +distributionSha256Sum=9bb8bc05f562f2d42bdf1ba8db62f6b6fa1c3bf6c392228802cc7cb0578fe7e0 diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java index 52aeb1b7224a8..3aa999ae11a97 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java @@ -86,9 +86,7 @@ public final int main(String[] args, Terminal terminal) throws Exception { if (e.exitCode == ExitCodes.USAGE) { printHelp(terminal, true); } - if (e.getMessage() != null) { - terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); - } + printUserException(terminal, e); return e.exitCode; } return ExitCodes.OK; @@ -133,6 +131,13 @@ private void printHelp(Terminal terminal, boolean toStdError) throws IOException /** Prints additional help information, specific to the command */ protected void printAdditionalHelp(Terminal terminal) {} + protected void printUserException(Terminal terminal, UserException e) { + if (e.getMessage() != null) { + terminal.errorPrintln(""); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + } + } + @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") protected static void exit(int status) { System.exit(status); diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java index 7edb6bade44f0..2f66a858f4e72 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java @@ -13,6 +13,7 @@ */ public class ExitCodes { public static final int OK = 0; + public static final int NOOP = 63; /* nothing to do */ public static final int USAGE = 64; /* command line usage error */ public static final int DATA_ERROR = 65; /* data format error */ public static final int NO_INPUT = 66; /* cannot open input */ diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java index 5fcc7de8f5402..acda0c70c592e 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -12,6 +12,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.KeyValuePair; + import org.elasticsearch.core.internal.io.IOUtils; import java.io.IOException; @@ -19,6 +20,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.function.Consumer; /** * A cli tool which is made up of multiple subcommands. @@ -44,15 +46,28 @@ public MultiCommand(final String description, final Runnable beforeMain) { @Override protected void printAdditionalHelp(Terminal terminal) { + printSubCommandList(terminal::println); + } + + @Override + protected void printUserException(Terminal terminal, UserException e) { + super.printUserException(terminal, e); + if (e instanceof MissingCommandException) { + terminal.errorPrintln(""); + printSubCommandList(terminal::errorPrintln); + } + } + + private void printSubCommandList(Consumer println) { if (subcommands.isEmpty()) { throw new IllegalStateException("No subcommands configured"); } - terminal.println("Commands"); - terminal.println("--------"); + println.accept("Commands"); + println.accept("--------"); for (Map.Entry subcommand : subcommands.entrySet()) { - terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description); + println.accept(subcommand.getKey() + " - " + subcommand.getValue().description); } - terminal.println(""); + println.accept(""); } @Override @@ -64,7 +79,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { // .values(...) returns an unmodifiable list final List args = new ArrayList<>(arguments.values(options)); if (args.isEmpty()) { - throw new UserException(ExitCodes.USAGE, "Missing command"); + throw new MissingCommandException(); } String subcommandName = args.remove(0); @@ -85,4 +100,9 @@ public void close() throws IOException { IOUtils.close(subcommands.values()); } + static final class MissingCommandException extends UserException { + MissingCommandException() { + super(ExitCodes.USAGE, "Missing required command"); + } + } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/Types.java b/libs/core/src/main/java/org/elasticsearch/core/Types.java new file mode 100644 index 0000000000000..e1cf533d4eab4 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/Types.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +/** + * Contains utilities for working with Java types. + */ +public abstract class Types { + + /** + * There are some situations where we cannot appease javac's type checking, and we + * need to forcibly cast an object's type. Please don't use this method unless you + * have no choice. + * @param argument the object to cast + * @param the inferred type to which to cast the argument + * @return a cast version of the argument + */ + @SuppressWarnings("unchecked") + public static T forciblyCast(Object argument) { + return (T) argument; + } +} diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 2cff00b11b2d2..625a8b3536283 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -79,11 +79,7 @@ private Grok(Map patternBank, String grokPattern, boolean namedC this.namedCaptures = namedCaptures; this.matcherWatchdog = matcherWatchdog; - for (Map.Entry entry : patternBank.entrySet()) { - String name = entry.getKey(); - String pattern = entry.getValue(); - forbidCircularReferences(name, new ArrayList<>(), pattern); - } + forbidCircularReferences(); String expression = toRegex(grokPattern); byte[] expressionBytes = expression.getBytes(StandardCharsets.UTF_8); @@ -104,8 +100,27 @@ private Grok(Map patternBank, String grokPattern, boolean namedC * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ - private void forbidCircularReferences(String patternName, List path, String pattern) { - if (pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":")) { + private void forbidCircularReferences() { + + // first ensure that the pattern bank contains no simple circular references (i.e., any pattern + // containing an immediate reference to itself) as those can cause the remainder of this algorithm + // to recurse infinitely + for (Map.Entry entry : patternBank.entrySet()) { + if (patternReferencesItself(entry.getValue(), entry.getKey())) { + throw new IllegalArgumentException("circular reference in pattern [" + entry.getKey() + "][" + entry.getValue() + "]"); + } + } + + // next, recursively check any other pattern names referenced in each pattern + for (Map.Entry entry : patternBank.entrySet()) { + String name = entry.getKey(); + String pattern = entry.getValue(); + innerForbidCircularReferences(name, new ArrayList<>(), pattern); + } + } + + private void innerForbidCircularReferences(String patternName, List path, String pattern) { + if (patternReferencesItself(pattern, patternName)) { String message; if (path.isEmpty()) { message = "circular reference in pattern [" + patternName + "][" + pattern + "]"; @@ -120,26 +135,31 @@ private void forbidCircularReferences(String patternName, List path, Str throw new IllegalArgumentException(message); } + // next check any other pattern names found in the pattern for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { int begin = i + 2; - int brackedIndex = pattern.indexOf('}', begin); + int bracketIndex = pattern.indexOf('}', begin); int columnIndex = pattern.indexOf(':', begin); int end; - if (brackedIndex != -1 && columnIndex == -1) { - end = brackedIndex; - } else if (columnIndex != -1 && brackedIndex == -1) { + if (bracketIndex != -1 && columnIndex == -1) { + end = bracketIndex; + } else if (columnIndex != -1 && bracketIndex == -1) { end = columnIndex; - } else if (brackedIndex != -1 && columnIndex != -1) { - end = Math.min(brackedIndex, columnIndex); + } else if (bracketIndex != -1 && columnIndex != -1) { + end = Math.min(bracketIndex, columnIndex); } else { throw new IllegalArgumentException("pattern [" + pattern + "] has circular references to other pattern definitions"); } String otherPatternName = pattern.substring(begin, end); path.add(otherPatternName); - forbidCircularReferences(patternName, path, patternBank.get(otherPatternName)); + innerForbidCircularReferences(patternName, path, patternBank.get(otherPatternName)); } } + private static boolean patternReferencesItself(String pattern, String patternName) { + return pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); + } + private String groupMatch(String name, Region region, String pattern) { int number = GROK_PATTERN_REGEX.nameToBackrefNumber(name.getBytes(StandardCharsets.UTF_8), 0, name.getBytes(StandardCharsets.UTF_8).length, region); diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index 1d0f253beea18..df43b84b23dfb 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -320,8 +320,7 @@ public void testCircularReference() { String pattern = "%{NAME1}"; new Grok(bank, pattern, false, logger::warn); }); - assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", - e.getMessage()); + assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { Map bank = new TreeMap<>(); @@ -331,10 +330,23 @@ public void testCircularReference() { bank.put("NAME4", "!!!%{NAME5}!!!"); bank.put("NAME5", "!!!%{NAME1}!!!"); String pattern = "%{NAME1}"; - new Grok(bank, pattern, false, logger::warn ); + new Grok(bank, pattern, false, logger::warn); + }); + assertEquals( + "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2=>NAME3=>NAME4]", + e.getMessage() + ); + } + + public void testCircularSelfReference() { + Exception e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new HashMap<>(); + bank.put("ANOTHER", "%{INT}"); + bank.put("INT", "%{INT}"); + String pattern = "does_not_matter"; + new Grok(bank, pattern, false, logger::warn); }); - assertEquals("circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] " + - "via patterns [NAME2=>NAME3=>NAME4]", e.getMessage()); + assertEquals("circular reference in pattern [INT][%{INT}]", e.getMessage()); } public void testBooleanCaptures() { diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/CompositeTrustConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/CompositeTrustConfig.java new file mode 100644 index 0000000000000..f27fca593a111 --- /dev/null +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/CompositeTrustConfig.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.ssl; + +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509ExtendedTrustManager; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * A TrustConfiguration that merges trust anchors from a number of other trust configs to produce a single {@link X509ExtendedTrustManager}. + */ +public class CompositeTrustConfig implements SslTrustConfig { + private final List configs; + + CompositeTrustConfig(List configs) { + this.configs = List.copyOf(configs); + } + + @Override + public Collection getDependentFiles() { + return configs.stream().map(SslTrustConfig::getDependentFiles).flatMap(Collection::stream).collect(Collectors.toUnmodifiableSet()); + } + + @Override + public boolean isSystemDefault() { + return configs.stream().allMatch(SslTrustConfig::isSystemDefault); + } + + @Override + public X509ExtendedTrustManager createTrustManager() { + try { + Collection trustedIssuers = configs.stream() + .map(c -> c.createTrustManager()) + .map(tm -> tm.getAcceptedIssuers()) + .flatMap(Arrays::stream) + .collect(Collectors.toSet()); + final KeyStore store = KeyStoreUtil.buildTrustStore(trustedIssuers); + return KeyStoreUtil.createTrustManager(store, TrustManagerFactory.getDefaultAlgorithm()); + } catch (GeneralSecurityException e) { + throw new SslConfigException("Cannot combine trust configurations [" + + configs.stream().map(SslTrustConfig::toString).collect(Collectors.joining(",")) + + "]", + e); + } + } + + @Override + public Collection getConfiguredCertificates() { + return configs.stream().map(SslTrustConfig::getConfiguredCertificates) + .flatMap(Collection::stream) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CompositeTrustConfig that = (CompositeTrustConfig) o; + return configs.equals(that.configs); + } + + @Override + public int hashCode() { + return Objects.hash(configs); + } + + @Override + public String toString() { + return "Composite-Trust{" + configs.stream().map(SslTrustConfig::toString).collect(Collectors.joining(",")) + '}'; + } +} diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DefaultJdkTrustConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DefaultJdkTrustConfig.java index 36b086c500df5..7ea8f1240c95c 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DefaultJdkTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DefaultJdkTrustConfig.java @@ -18,13 +18,15 @@ import java.security.KeyStore; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.function.BiFunction; /** * This class represents a trust configuration that corresponds to the default trusted CAs of the JDK */ -final class DefaultJdkTrustConfig implements SslTrustConfig { +public final class DefaultJdkTrustConfig implements SslTrustConfig { + + public static final DefaultJdkTrustConfig DEFAULT_INSTANCE = new DefaultJdkTrustConfig(); private final BiFunction systemProperties; private final char[] trustStorePassword; @@ -51,6 +53,11 @@ final class DefaultJdkTrustConfig implements SslTrustConfig { this.trustStorePassword = trustStorePassword; } + @Override + public boolean isSystemDefault() { + return true; + } + @Override public X509ExtendedTrustManager createTrustManager() { try { @@ -90,7 +97,12 @@ private static char[] getSystemTrustStorePassword(BiFunction getDependentFiles() { - return Collections.emptyList(); + return List.of(); + } + + @Override + public Collection getConfiguredCertificates() { + return List.of(); } @Override diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java index dd4f92f21920f..a188636b1c9fa 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java @@ -31,7 +31,7 @@ * Based on https://github.com/groovenauts/jmeter_oauth_plugin/blob/master/jmeter/src/ * main/java/org/apache/jmeter/protocol/oauth/sampler/PrivateKeyReader.java */ -final class DerParser { +public final class DerParser { // Constructed Flag private static final int CONSTRUCTED = 0x20; @@ -55,12 +55,12 @@ final class DerParser { private InputStream derInputStream; private int maxAsnObjectLength; - DerParser(byte[] bytes) { + public DerParser(byte[] bytes) { this.derInputStream = new ByteArrayInputStream(bytes); this.maxAsnObjectLength = bytes.length; } - Asn1Object readAsn1Object() throws IOException { + public Asn1Object readAsn1Object() throws IOException { int tag = derInputStream.read(); if (tag == -1) { throw new IOException("Invalid DER: stream too short, missing tag"); @@ -133,7 +133,7 @@ private int getLength() throws IOException { * * @author zhang */ - static class Asn1Object { + public static class Asn1Object { protected final int type; protected final int length; diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/EmptyKeyConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/EmptyKeyConfig.java index cbd355b555abb..363b47dbdc4bd 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/EmptyKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/EmptyKeyConfig.java @@ -8,17 +8,22 @@ package org.elasticsearch.common.ssl; -import javax.net.ssl.X509ExtendedKeyManager; +import org.elasticsearch.core.Tuple; + import java.nio.file.Path; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; import java.util.Collection; -import java.util.Collections; +import java.util.List; + +import javax.net.ssl.X509ExtendedKeyManager; /** * A {@link SslKeyConfig} that does nothing (provides a null key manager) */ -final class EmptyKeyConfig implements SslKeyConfig { +public final class EmptyKeyConfig implements SslKeyConfig { - static final EmptyKeyConfig INSTANCE = new EmptyKeyConfig(); + public static final EmptyKeyConfig INSTANCE = new EmptyKeyConfig(); private EmptyKeyConfig() { // Enforce a single instance @@ -26,7 +31,22 @@ private EmptyKeyConfig() { @Override public Collection getDependentFiles() { - return Collections.emptyList(); + return List.of(); + } + + @Override + public List> getKeys() { + return List.of(); + } + + @Override + public Collection getConfiguredCertificates() { + return List.of(); + } + + @Override + public boolean hasKeyMaterial() { + return false; } @Override diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java index 1be2c94985209..bbf895aa2e9d8 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java @@ -21,18 +21,26 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.GeneralSecurityException; +import java.security.Key; import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.PrivateKey; import java.security.cert.Certificate; +import java.security.cert.X509Certificate; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Locale; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A variety of utility methods for working with or constructing {@link KeyStore} instances. */ -final class KeyStoreUtil { +public final class KeyStoreUtil { private KeyStoreUtil() { throw new IllegalStateException("Utility class should not be instantiated"); @@ -42,8 +50,8 @@ private KeyStoreUtil() { * Make a best guess about the "type" (see {@link KeyStore#getType()}) of the keystore file located at the given {@code Path}. * This method only references the file name of the keystore, it does not look at its contents. */ - static String inferKeyStoreType(Path path) { - String name = path == null ? "" : path.toString().toLowerCase(Locale.ROOT); + public static String inferKeyStoreType(String path) { + String name = path == null ? "" : path.toLowerCase(Locale.ROOT); if (name.endsWith(".p12") || name.endsWith(".pfx") || name.endsWith(".pkcs12")) { return "PKCS12"; } else { @@ -57,43 +65,48 @@ static String inferKeyStoreType(Path path) { * @throws SslConfigException If there is a problem reading from the provided path * @throws GeneralSecurityException If there is a problem with the keystore contents */ - static KeyStore readKeyStore(Path path, String type, char[] password) throws GeneralSecurityException { - if (Files.notExists(path)) { - throw new SslConfigException("cannot read a [" + type + "] keystore from [" + path.toAbsolutePath() - + "] because the file does not exist"); - } - try { - KeyStore keyStore = KeyStore.getInstance(type); + public static KeyStore readKeyStore(Path path, String ksType, char[] password) throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(ksType); + if (path != null) { try (InputStream in = Files.newInputStream(path)) { keyStore.load(in, password); } - return keyStore; - } catch (IOException e) { - throw new SslConfigException("cannot read a [" + type + "] keystore from [" + path.toAbsolutePath() + "] - " + e.getMessage(), - e); } + return keyStore; } /** * Construct an in-memory keystore with a single key entry. - * @param certificateChain A certificate chain (ordered from subject to issuer) - * @param privateKey The private key that corresponds to the subject certificate (index 0 of {@code certificateChain}) - * @param password The password for the private key * + * @param certificateChain A certificate chain (ordered from subject to issuer) + * @param privateKey The private key that corresponds to the subject certificate (index 0 of {@code certificateChain}) + * @param password The password for the private key * @throws GeneralSecurityException If there is a problem with the provided certificates/key */ - static KeyStore buildKeyStore(Collection certificateChain, PrivateKey privateKey, char[] password) + public static KeyStore buildKeyStore(Collection certificateChain, PrivateKey privateKey, char[] password) throws GeneralSecurityException { KeyStore keyStore = buildNewKeyStore(); keyStore.setKeyEntry("key", privateKey, password, certificateChain.toArray(new Certificate[0])); return keyStore; } + /** + * Filters a keystore using a predicate. + * The provided keystore is modified in place. + */ + public static KeyStore filter(KeyStore store, Predicate filter) { + stream(store, e -> new SslConfigException("Failed to apply filter to existing keystore", e)) + .filter(filter.negate()) + .forEach(e -> e.delete()); + return store; + } + /** * Construct an in-memory keystore with multiple trusted cert entries. + * * @param certificates The root certificates to trust */ - static KeyStore buildTrustStore(Iterable certificates) throws GeneralSecurityException { + public static KeyStore buildTrustStore(Iterable certificates) throws GeneralSecurityException { assert certificates != null : "Cannot create keystore with null certificates"; KeyStore store = buildNewKeyStore(); int counter = 0; @@ -115,10 +128,20 @@ private static KeyStore buildNewKeyStore() throws GeneralSecurityException { return keyStore; } + /** + * Returns a {@link X509ExtendedKeyManager} that is built from the provided private key and certificate chain + */ + public static X509ExtendedKeyManager createKeyManager(Certificate[] certificateChain, PrivateKey privateKey, char[] password) + throws GeneralSecurityException, IOException { + KeyStore keyStore = buildKeyStore(List.of(certificateChain), privateKey, password); + return createKeyManager(keyStore, password, KeyManagerFactory.getDefaultAlgorithm()); + } + /** * Creates a {@link X509ExtendedKeyManager} based on the key material in the provided {@link KeyStore} */ - static X509ExtendedKeyManager createKeyManager(KeyStore keyStore, char[] password, String algorithm) throws GeneralSecurityException { + public static X509ExtendedKeyManager createKeyManager(KeyStore keyStore, char[] password, + String algorithm) throws GeneralSecurityException { KeyManagerFactory kmf = KeyManagerFactory.getInstance(algorithm); kmf.init(keyStore, password); KeyManager[] keyManagers = kmf.getKeyManagers(); @@ -134,7 +157,7 @@ static X509ExtendedKeyManager createKeyManager(KeyStore keyStore, char[] passwor /** * Creates a {@link X509ExtendedTrustManager} based on the trust material in the provided {@link KeyStore} */ - static X509ExtendedTrustManager createTrustManager(@Nullable KeyStore trustStore, String algorithm) + public static X509ExtendedTrustManager createTrustManager(@Nullable KeyStore trustStore, String algorithm) throws NoSuchAlgorithmException, KeyStoreException { TrustManagerFactory tmf = TrustManagerFactory.getInstance(algorithm); tmf.init(trustStore); @@ -148,5 +171,129 @@ static X509ExtendedTrustManager createTrustManager(@Nullable KeyStore trustStore + "] and truststore [" + trustStore + "]"); } + /** + * Creates a {@link X509ExtendedTrustManager} based on the provided certificates + * + * @param certificates the certificates to trust + * @return a trust manager that trusts the provided certificates + */ + public static X509ExtendedTrustManager createTrustManager(Collection certificates) throws GeneralSecurityException { + KeyStore store = buildTrustStore(certificates); + return createTrustManager(store, TrustManagerFactory.getDefaultAlgorithm()); + } + + public static Stream stream(KeyStore keyStore, + Function exceptionHandler) { + try { + return Collections.list(keyStore.aliases()).stream().map(a -> new KeyStoreEntry(keyStore, a, exceptionHandler)); + } catch (KeyStoreException e) { + throw exceptionHandler.apply(e); + } + } + + public static class KeyStoreEntry { + private final KeyStore store; + private final String alias; + private final Function exceptionHandler; + + KeyStoreEntry(KeyStore store, String alias, Function exceptionHandler) { + this.store = store; + this.alias = alias; + this.exceptionHandler = exceptionHandler; + } + + public String getAlias() { + return alias; + } + + /** + * If this entry is a private key entry (see {@link #isKeyEntry()}), + * and the entry includes a certificate chain, + * and the leaf (first) element of that chain is an X.509 certificate, + * then that leaf certificate is returned. + * + * If this entry is a trusted certificate entry + * and the trusted certificate is an X.509 certificate, + * then the trusted certificate is returned. + * + * In all other cases, returns {@code null}. + * + * @see KeyStore#getCertificate(String) + */ + public X509Certificate getX509Certificate() { + try { + final Certificate c = store.getCertificate(alias); + if (c instanceof X509Certificate) { + return (X509Certificate) c; + } else { + return null; + } + } catch (KeyStoreException e) { + throw exceptionHandler.apply(e); + } + } + + /** + * @see KeyStore#isKeyEntry(String) + */ + public boolean isKeyEntry() { + try { + return store.isKeyEntry(alias); + } catch (KeyStoreException e) { + throw exceptionHandler.apply(e); + } + } + + /** + * If the current entry stores a private key, returns that key. + * Otherwise returns {@code null}. + * + * @see KeyStore#getKey(String, char[]) + */ + public PrivateKey getKey(char[] password) { + try { + final Key key = store.getKey(alias, password); + if (key instanceof PrivateKey) { + return (PrivateKey) key; + } + return null; + } catch (GeneralSecurityException e) { + throw exceptionHandler.apply(e); + } + } + + /** + * If this entry is a private key entry (see {@link #isKeyEntry()}), returns the certificate chain that is stored in the entry. + * If the entry contains any certificates that are not X.509 certificates, they are ignored. + * If the entry is not a private key entry, or it does not contain any X.509 certificates, then an empty list is returned. + */ + public List getX509CertificateChain() { + try { + final Certificate[] certificates = store.getCertificateChain(alias); + if (certificates == null || certificates.length == 0) { + return List.of(); + } + return Stream.of(certificates) + .filter(c -> c instanceof X509Certificate) + .map(X509Certificate.class::cast) + .collect(Collectors.toUnmodifiableList()); + } catch (KeyStoreException e) { + throw exceptionHandler.apply(e); + } + } + + /** + * Remove this entry from the underlying keystore + */ + public void delete() { + try { + store.deleteEntry(alias); + } catch (KeyStoreException e) { + throw exceptionHandler.apply(e); + } + } + + } + } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemKeyConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemKeyConfig.java index 6081c6191b7ac..1a6608f78bd40 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemKeyConfig.java @@ -8,84 +8,148 @@ package org.elasticsearch.common.ssl; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.X509ExtendedKeyManager; -import java.io.FileNotFoundException; +import org.elasticsearch.core.Tuple; + import java.io.IOException; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.AccessControlException; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.PrivateKey; import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Objects; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.X509ExtendedKeyManager; + + /** * A {@link SslKeyConfig} that reads from PEM formatted paths. */ public final class PemKeyConfig implements SslKeyConfig { - private final Path certificate; - private final Path key; + + private static final String KEY_FILE_TYPE = "PEM private key"; + private static final String CERT_FILE_TYPE = "PEM certificate"; + + private final String certificate; + private final String key; private final char[] keyPassword; + private final Path configBasePath; - public PemKeyConfig(Path certificate, Path key, char[] keyPassword) { - this.certificate = Objects.requireNonNull(certificate, "Certificate cannot be null"); - this.key = Objects.requireNonNull(key, "Key cannot be null"); + /** + * @param certificatePath Path to the PEM formatted certificate + * @param keyPath Path to the PEM formatted private key for {@code certificate} + * @param keyPassword Password for the private key (or empty is the key is not encrypted) + * @param configBasePath The base directory from which config files should be read (used for diagnostic exceptions) + */ + public PemKeyConfig(String certificatePath, String keyPath, char[] keyPassword, Path configBasePath) { + this.certificate = Objects.requireNonNull(certificatePath, "Certificate path cannot be null"); + this.key = Objects.requireNonNull(keyPath, "Key path cannot be null"); this.keyPassword = Objects.requireNonNull(keyPassword, "Key password cannot be null (but may be empty)"); + this.configBasePath = Objects.requireNonNull(configBasePath, "Config base path cannot be null"); + } + + @Override + public boolean hasKeyMaterial() { + return true; } @Override public Collection getDependentFiles() { - return Arrays.asList(certificate, key); + return Arrays.asList(resolve(certificate), resolve(key)); + } + + private Path resolve(String fileName) { + return configBasePath.resolve(fileName); + } + + @Override + public Collection getConfiguredCertificates() { + final List certificates = getCertificates(resolve(this.certificate)); + final List info = new ArrayList<>(certificates.size()); + boolean first = true; + for (Certificate cert : certificates) { + if (cert instanceof X509Certificate) { + info.add(new StoredCertificate((X509Certificate) cert, this.certificate, "PEM", null, first)); + } + first = false; + } + return info; } @Override public X509ExtendedKeyManager createKeyManager() { - PrivateKey privateKey = getPrivateKey(); - List certificates = getCertificates(); + final Path keyPath = resolve(key); + final PrivateKey privateKey = getPrivateKey(keyPath); + final Path certPath = resolve(this.certificate); + final List certificates = getCertificates(certPath); try { final KeyStore keyStore = KeyStoreUtil.buildKeyStore(certificates, privateKey, keyPassword); return KeyStoreUtil.createKeyManager(keyStore, keyPassword, KeyManagerFactory.getDefaultAlgorithm()); } catch (GeneralSecurityException e) { - throw new SslConfigException("failed to load a KeyManager for certificate/key pair [" + certificate + "], [" + key + "]", e); + throw new SslConfigException( + "failed to load a KeyManager for certificate/key pair [" + certPath + "], [" + keyPath + "]", e); } } - private PrivateKey getPrivateKey() { + @Override + public List> getKeys() { + final Path keyPath = resolve(key); + final Path certPath = resolve(this.certificate); + final List certificates = getCertificates(certPath); + if (certificates.isEmpty()) { + return List.of(); + } + final Certificate leafCertificate = certificates.get(0); + if (leafCertificate instanceof X509Certificate) { + return List.of(Tuple.tuple(getPrivateKey(keyPath), (X509Certificate) leafCertificate)); + } else { + return List.of(); + } + } + + @Override + public SslTrustConfig asTrustConfig() { + return new PemTrustConfig(List.of(certificate), configBasePath); + } + + private PrivateKey getPrivateKey(Path path) { try { - final PrivateKey privateKey = PemUtils.readPrivateKey(key, () -> keyPassword); + final PrivateKey privateKey = PemUtils.parsePrivateKey(path, () -> keyPassword); if (privateKey == null) { - throw new SslConfigException("could not load ssl private key file [" + key + "]"); + throw new SslConfigException("could not load ssl private key file [" + path + "]"); } return privateKey; - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] does not exist", e); + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure(KEY_FILE_TYPE, List.of(path), e, configBasePath); } catch (IOException e) { - throw new SslConfigException("the configured ssl private key file [" + key.toAbsolutePath() + "] cannot be read", e); + throw SslFileUtil.ioException(KEY_FILE_TYPE, List.of(path), e); } catch (GeneralSecurityException e) { - throw new SslConfigException("cannot load ssl private key file [" + key.toAbsolutePath() + "]", e); + throw SslFileUtil.securityException(KEY_FILE_TYPE, List.of(path), e); } } - private List getCertificates() { + private List getCertificates(Path path) { try { - return PemUtils.readCertificates(Collections.singleton(certificate)); - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("the configured ssl certificate file [" + certificate.toAbsolutePath() + "] does not exist", e); + return PemUtils.readCertificates(Collections.singleton(path)); + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure(CERT_FILE_TYPE, List.of(path), e, configBasePath); } catch (IOException e) { - throw new SslConfigException("the configured ssl certificate file [" + certificate .toAbsolutePath()+ "] cannot be read", e); + throw SslFileUtil.ioException(CERT_FILE_TYPE, List.of(path), e); } catch (GeneralSecurityException e) { - throw new SslConfigException("cannot load ssl certificate from [" + certificate.toAbsolutePath() + "]", e); + throw SslFileUtil.securityException(CERT_FILE_TYPE, List.of(path), e); } } @Override public String toString() { - return "PEM-key-config{cert=" + certificate.toAbsolutePath() + " key=" + key.toAbsolutePath() + "}"; + return "PEM-key-config{cert=" + certificate + " key=" + key + "}"; } @Override diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemTrustConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemTrustConfig.java index 044ef433748b7..d34bea5130225 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemTrustConfig.java @@ -10,15 +10,15 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.AccessControlException; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.cert.Certificate; -import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -32,10 +32,13 @@ * {@link java.security.cert.CertificateFactory#generateCertificate(InputStream)}. */ public final class PemTrustConfig implements SslTrustConfig { - private final List certificateAuthorities; + + private static final String CA_FILE_TYPE = "PEM " + SslConfigurationKeys.CERTIFICATE_AUTHORITIES; + private final List certificateAuthorities; + private final Path basePath; /** - * Construct a new trust config for the provided paths. + * Construct a new trust config for the provided paths (which will be resolved relative to the basePath). * The paths are stored as-is, and are not read until {@link #createTrustManager()} is called. * This means that *
    @@ -47,41 +50,67 @@ public final class PemTrustConfig implements SslTrustConfig { * *
*/ - public PemTrustConfig(List certificateAuthorities) { + public PemTrustConfig(List certificateAuthorities, Path basePath) { this.certificateAuthorities = Collections.unmodifiableList(certificateAuthorities); + this.basePath = basePath; } @Override public Collection getDependentFiles() { - return certificateAuthorities; + return resolveFiles(); + } + + @Override + public Collection getConfiguredCertificates() { + final List info = new ArrayList<>(certificateAuthorities.size()); + for (String caPath : certificateAuthorities) { + for (Certificate cert : readCertificates(List.of(resolveFile(caPath)))) { + if (cert instanceof X509Certificate) { + info.add(new StoredCertificate((X509Certificate) cert, caPath, "PEM", null, false)); + } + } + } + return info; } @Override public X509ExtendedTrustManager createTrustManager() { + final List paths = resolveFiles(); try { - final List certificates = loadCertificates(); - KeyStore store = KeyStoreUtil.buildTrustStore(certificates); + final List certificates = readCertificates(paths); + final KeyStore store = KeyStoreUtil.buildTrustStore(certificates); return KeyStoreUtil.createTrustManager(store, TrustManagerFactory.getDefaultAlgorithm()); } catch (GeneralSecurityException e) { - throw new SslConfigException("cannot create trust using PEM certificates [" + caPathsAsString() + "]", e); + throw new SslConfigException( + "cannot create trust using PEM certificates [" + SslFileUtil.pathsToString(paths) + "]", e); } } - private List loadCertificates() throws CertificateException { + private List resolveFiles() { + return this.certificateAuthorities.stream().map(this::resolveFile).collect(Collectors.toUnmodifiableList()); + } + + private Path resolveFile(String other) { + return basePath.resolve(other); + } + + private List readCertificates(List paths) { try { - return PemUtils.readCertificates(this.certificateAuthorities); - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("cannot configure trust using PEM certificates [" + caPathsAsString() - + "] because one or more files do not exist", e); + return PemUtils.readCertificates(paths); + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure(CA_FILE_TYPE, paths, e, basePath); } catch (IOException e) { - throw new SslConfigException("cannot configure trust using PEM certificates [" + caPathsAsString() - + "] because one or more files cannot be read", e); + throw SslFileUtil.ioException(CA_FILE_TYPE, paths, e); + } catch (GeneralSecurityException e) { + throw SslFileUtil.securityException(CA_FILE_TYPE, paths, e); + } catch (SslConfigException e) { + throw SslFileUtil.configException(CA_FILE_TYPE, paths, e); } } @Override public String toString() { - return "PEM-trust{" + caPathsAsString() + "}"; + return "PEM-trust{" + SslFileUtil.pathsToString(resolveFiles()) + "}"; } @Override @@ -101,11 +130,4 @@ public int hashCode() { return Objects.hash(certificateAuthorities); } - private String caPathsAsString() { - return certificateAuthorities.stream() - .map(Path::toAbsolutePath) - .map(Object::toString) - .collect(Collectors.joining(",")); - } - } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java index 46deade3b3d22..eaccf729cc13b 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java @@ -18,14 +18,13 @@ import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; import java.io.BufferedReader; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.AccessControlException; import java.security.GeneralSecurityException; import java.security.KeyFactory; import java.security.KeyPairGenerator; @@ -73,6 +72,29 @@ private PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); } + /** + * Creates a {@link PrivateKey} from the contents of a file and handles any exceptions + * + * @param path the path for the key file + * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key + * @return a private key from the contents of the file + */ + public static PrivateKey readPrivateKey(Path path, Supplier passwordSupplier) throws IOException, GeneralSecurityException { + try { + final PrivateKey privateKey = PemUtils.parsePrivateKey(path, passwordSupplier); + if (privateKey == null) { + throw new SslConfigException("could not load ssl private key file [" + path + "]"); + } + return privateKey; + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure("PEM private key", List.of(path), e, null); + } catch (IOException e) { + throw SslFileUtil.ioException("PEM private key", List.of(path), e); + } catch (GeneralSecurityException e) { + throw SslFileUtil.securityException("PEM private key", List.of(path), e); + } + } + /** * Creates a {@link PrivateKey} from the contents of a file. Supports PKCS#1, PKCS#8 * encoded formats of encrypted and plaintext RSA, DSA and EC(secp256r1) keys @@ -81,7 +103,7 @@ private PemUtils() { * @param passwordSupplier A password supplier for the potentially encrypted (password protected) key * @return a private key from the contents of the file */ - public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, GeneralSecurityException { + static PrivateKey parsePrivateKey(Path keyPath, Supplier passwordSupplier) throws IOException, GeneralSecurityException { try (BufferedReader bReader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { String line = bReader.readLine(); while (null != line && line.startsWith(HEADER) == false) { @@ -109,13 +131,9 @@ public static PrivateKey readPrivateKey(Path keyPath, Supplier passwordS } else if (OPENSSL_EC_PARAMS_HEADER.equals(line.trim())) { return parseOpenSslEC(removeECHeaders(bReader), passwordSupplier); } else { - throw new SslConfigException("error parsing Private Key [" + keyPath.toAbsolutePath() - + "], file does not contain a supported key format"); + throw new SslConfigException("cannot read PEM private key [" + keyPath.toAbsolutePath() + + "] because the file does not contain a supported key format"); } - } catch (FileNotFoundException | NoSuchFileException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] does not exist", e); - } catch (IOException | GeneralSecurityException e) { - throw new SslConfigException("private key file [" + keyPath.toAbsolutePath() + "] cannot be parsed", e); } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java index 63dea86bb6d74..ec06f629edd65 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java @@ -14,6 +14,7 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -56,6 +57,7 @@ public class SslConfiguration { ORDERED_PROTOCOL_ALGORITHM_MAP = Collections.unmodifiableMap(protocolAlgorithmMap); } + private final boolean explicitlyConfigured; private final SslTrustConfig trustConfig; private final SslKeyConfig keyConfig; private final SslVerificationMode verificationMode; @@ -63,8 +65,10 @@ public class SslConfiguration { private final List ciphers; private final List supportedProtocols; - public SslConfiguration(SslTrustConfig trustConfig, SslKeyConfig keyConfig, SslVerificationMode verificationMode, - SslClientAuthenticationMode clientAuth, List ciphers, List supportedProtocols) { + public SslConfiguration(boolean explicitlyConfigured, SslTrustConfig trustConfig, SslKeyConfig keyConfig, + SslVerificationMode verificationMode, SslClientAuthenticationMode clientAuth, + List ciphers, List supportedProtocols) { + this.explicitlyConfigured = explicitlyConfigured; if (ciphers == null || ciphers.isEmpty()) { throw new SslConfigException("cannot configure SSL/TLS without any supported cipher suites"); } @@ -114,6 +118,18 @@ public Collection getDependentFiles() { return paths; } + /** + * @return A collection of {@link StoredCertificate certificates} that are used by this SSL configuration. + * This includes certificates used for identity (with a private key) and those used for trust, but excludes + * certificates that are provided by the JRE. + */ + public Collection getConfiguredCertificates() { + List certificates = new ArrayList<>(); + certificates.addAll(keyConfig.getConfiguredCertificates()); + certificates.addAll(trustConfig.getConfiguredCertificates()); + return certificates; + } + /** * Dynamically create a new SSL context based on the current state of the configuration. * Because the {@link #getKeyConfig() key config} and {@link #getTrustConfig() trust config} may change based on the @@ -178,4 +194,8 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(trustConfig, keyConfig, verificationMode, clientAuth, ciphers, supportedProtocols); } + + public boolean isExplicitlyConfigured() { + return explicitlyConfigured; + } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index 9a1021d0df0be..2935666440d4e 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -13,6 +13,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; import java.nio.file.Path; +import java.security.KeyStore; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -166,6 +167,8 @@ public abstract class SslConfigurationLoader { private List defaultCiphers; private List defaultProtocols; + private Function keyStoreFilter; + /** * Construct a new loader with the "standard" default values. * @@ -235,6 +238,21 @@ public void setDefaultProtocols(List defaultProtocols) { this.defaultProtocols = defaultProtocols; } + + /** + * Apply a filter function to any keystore that is loaded. + * @see StoreKeyConfig + */ + public void setKeyStoreFilter(Function keyStoreFilter) { + this.keyStoreFilter = keyStoreFilter; + } + + /** + * Clients of this class should implement this method to determine whether there are any settings for a given prefix. + * This is used to populate {@link SslConfiguration#isExplicitlyConfigured()}. + */ + protected abstract boolean hasSettings(String prefix); + /** * Clients of this class should implement this method to load a fully-qualified key from the preferred settings source. * This method will be called for basic string settings (see {@link SslConfigurationKeys#getStringKeys()}). @@ -281,8 +299,8 @@ public SslConfiguration load(Path basePath) { final SslVerificationMode verificationMode = resolveSetting(VERIFICATION_MODE, SslVerificationMode::parse, defaultVerificationMode); final SslClientAuthenticationMode clientAuth = resolveSetting(CLIENT_AUTH, SslClientAuthenticationMode::parse, defaultClientAuth); - final SslTrustConfig trustConfig = buildTrustConfig(basePath, verificationMode); final SslKeyConfig keyConfig = buildKeyConfig(basePath); + final SslTrustConfig trustConfig = buildTrustConfig(basePath, verificationMode, keyConfig); if (protocols == null || protocols.isEmpty()) { throw new SslConfigException("no protocols configured in [" + settingPrefix + PROTOCOLS + "]"); @@ -290,12 +308,13 @@ public SslConfiguration load(Path basePath) { if (ciphers == null || ciphers.isEmpty()) { throw new SslConfigException("no cipher suites configured in [" + settingPrefix + CIPHERS + "]"); } - return new SslConfiguration(trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); + final boolean isExplicitlyConfigured = hasSettings(settingPrefix); + return new SslConfiguration(isExplicitlyConfigured, trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); } - private SslTrustConfig buildTrustConfig(Path basePath, SslVerificationMode verificationMode) { - final List certificateAuthorities = resolveListSetting(CERTIFICATE_AUTHORITIES, basePath::resolve, null); - final Path trustStorePath = resolveSetting(TRUSTSTORE_PATH, basePath::resolve, null); + protected SslTrustConfig buildTrustConfig(Path basePath, SslVerificationMode verificationMode, SslKeyConfig keyConfig) { + final List certificateAuthorities = resolveListSetting(CERTIFICATE_AUTHORITIES, Function.identity(), null); + final String trustStorePath = resolveSetting(TRUSTSTORE_PATH, Function.identity(), null); if (certificateAuthorities != null && trustStorePath != null) { throw new SslConfigException("cannot specify both [" + settingPrefix + CERTIFICATE_AUTHORITIES + "] and [" + @@ -305,21 +324,30 @@ private SslTrustConfig buildTrustConfig(Path basePath, SslVerificationMode verif return TrustEverythingConfig.TRUST_EVERYTHING; } if (certificateAuthorities != null) { - return new PemTrustConfig(certificateAuthorities); + return new PemTrustConfig(certificateAuthorities, basePath); } if (trustStorePath != null) { final char[] password = resolvePasswordSetting(TRUSTSTORE_SECURE_PASSWORD, TRUSTSTORE_LEGACY_PASSWORD); final String storeType = resolveSetting(TRUSTSTORE_TYPE, Function.identity(), inferKeyStoreType(trustStorePath)); final String algorithm = resolveSetting(TRUSTSTORE_ALGORITHM, Function.identity(), TrustManagerFactory.getDefaultAlgorithm()); - return new StoreTrustConfig(trustStorePath, password, storeType, algorithm); + return new StoreTrustConfig(trustStorePath, password, storeType, algorithm, true, basePath); } - return defaultTrustConfig; + return buildDefaultTrustConfig(defaultTrustConfig, keyConfig); } - private SslKeyConfig buildKeyConfig(Path basePath) { - final Path certificatePath = resolveSetting(CERTIFICATE, basePath::resolve, null); - final Path keyPath = resolveSetting(KEY, basePath::resolve, null); - final Path keyStorePath = resolveSetting(KEYSTORE_PATH, basePath::resolve, null); + protected SslTrustConfig buildDefaultTrustConfig(SslTrustConfig defaultTrustConfig, SslKeyConfig keyConfig) { + final SslTrustConfig trust = keyConfig.asTrustConfig(); + if (trust == null) { + return defaultTrustConfig; + } else { + return new CompositeTrustConfig(List.of(defaultTrustConfig, trust)); + } + } + + public SslKeyConfig buildKeyConfig(Path basePath) { + final String certificatePath = stringSetting(CERTIFICATE); + final String keyPath = stringSetting(KEY); + final String keyStorePath = stringSetting(KEYSTORE_PATH); if (certificatePath != null && keyStorePath != null) { throw new SslConfigException("cannot specify both [" + settingPrefix + CERTIFICATE + "] and [" + @@ -336,7 +364,7 @@ private SslKeyConfig buildKeyConfig(Path basePath) { settingPrefix + CERTIFICATE + "]"); } final char[] password = resolvePasswordSetting(KEY_SECURE_PASSPHRASE, KEY_LEGACY_PASSPHRASE); - return new PemKeyConfig(certificatePath, keyPath, password); + return new PemKeyConfig(certificatePath, keyPath, password, basePath); } if (keyStorePath != null) { @@ -347,15 +375,23 @@ private SslKeyConfig buildKeyConfig(Path basePath) { } final String storeType = resolveSetting(KEYSTORE_TYPE, Function.identity(), inferKeyStoreType(keyStorePath)); final String algorithm = resolveSetting(KEYSTORE_ALGORITHM, Function.identity(), KeyManagerFactory.getDefaultAlgorithm()); - return new StoreKeyConfig(keyStorePath, storePassword, storeType, keyPassword, algorithm); + return new StoreKeyConfig(keyStorePath, storePassword, storeType, keyStoreFilter, keyPassword, algorithm, basePath); } return defaultKeyConfig; } + protected Path resolvePath(String settingKey, Path basePath) { + return resolveSetting(settingKey, basePath::resolve, null); + } + + private String expandSettingKey(String key) { + return settingPrefix + key; + } + private char[] resolvePasswordSetting(String secureSettingKey, String legacySettingKey) { final char[] securePassword = resolveSecureSetting(secureSettingKey, null); - final String legacyPassword = resolveSetting(legacySettingKey, Function.identity(), null); + final String legacyPassword = stringSetting(legacySettingKey); if (securePassword == null) { if (legacyPassword == null) { return EMPTY_PASSWORD; @@ -372,9 +408,13 @@ private char[] resolvePasswordSetting(String secureSettingKey, String legacySett } } + private String stringSetting(String key) { + return resolveSetting(key, Function.identity(), null); + } + private V resolveSetting(String key, Function parser, V defaultValue) { try { - String setting = getSettingAsString(settingPrefix + key); + String setting = getSettingAsString(expandSettingKey(key)); if (setting == null || setting.isEmpty()) { return defaultValue; } @@ -388,7 +428,7 @@ private V resolveSetting(String key, Function parser, V defaultVa private char[] resolveSecureSetting(String key, char[] defaultValue) { try { - char[] setting = getSecureSetting(settingPrefix + key); + char[] setting = getSecureSetting(expandSettingKey(key)); if (setting == null || setting.length == 0) { return defaultValue; } @@ -403,7 +443,7 @@ private char[] resolveSecureSetting(String key, char[] defaultValue) { private List resolveListSetting(String key, Function parser, List defaultValue) { try { - final List list = getSettingAsList(settingPrefix + key); + final List list = getSettingAsList(expandSettingKey(key)); if (list == null || list.isEmpty()) { return defaultValue; } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslDiagnostics.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslDiagnostics.java index c7a085cbe206a..a3d4ad5086781 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslDiagnostics.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslDiagnostics.java @@ -20,11 +20,14 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; +import java.util.stream.IntStream; public class SslDiagnostics { + public static List describeValidHostnames(X509Certificate certificate) { try { final Collection> names = certificate.getSubjectAlternativeNames(); @@ -140,6 +143,44 @@ boolean isSameCertificate() { } } + /** + * These names align with the values (and indices) defined by {@link X509Certificate#getKeyUsage()} + */ + private static final String[] KEY_USAGE_NAMES = new String[] { + "digitalSignature", + "nonRepudiation", + "keyEncipherment", + "dataEncipherment", + "keyAgreement", + "keyCertSign", + "cRLSign", + "encipherOnly", + "decipherOnly" }; + + private enum ExtendedKeyUsage { + serverAuth ("1.3.6.1.5.5.7.3.1"), + clientAuth ("1.3.6.1.5.5.7.3.2"), + codeSigning ("1.3.6.1.5.5.7.3.3"), + emailProtection ("1.3.6.1.5.5.7.3.4"), + timeStamping ("1.3.6.1.5.5.7.3.8"), + ocspSigning ("1.3.6.1.5.5.7.3.9"); + + private String oid; + + ExtendedKeyUsage(String oid) { + this.oid = Objects.requireNonNull(oid); + } + + public static String decodeOid(String oid) { + for (ExtendedKeyUsage e : values()) { + if (e.oid.equals(oid)) { + return e.name(); + } + } + return oid; + } + } + /** * @param contextName The descriptive name of this SSL context (e.g. "xpack.security.transport.ssl") * @param trustedIssuers A Map of DN to Certificate, for the issuers that were trusted in the context in which this failure occurred @@ -166,8 +207,14 @@ public static String getTrustDiagnosticFailure(X509Certificate[] chain, PeerType .append(peerType.name().toLowerCase(Locale.ROOT)) .append(" provided a certificate with subject name [") .append(peerCert.getSubjectX500Principal().getName()) - .append("] and ") - .append(fingerprintDescription(peerCert)); + .append("], ") + .append(fingerprintDescription(peerCert)) + .append(", ") + .append(keyUsageDescription(peerCert)) + .append(" and ") + .append(extendedKeyUsageDescription(peerCert)); + + addSessionDescription(session, message); if (peerType == PeerType.SERVER) { try { @@ -376,7 +423,7 @@ private static String fingerprintDescription(List certificates) private static String fingerprintDescription(X509Certificate certificate) { try { - final String fingerprint = SslUtil.calculateFingerprint(certificate); + final String fingerprint = SslUtil.calculateFingerprint(certificate, "SHA-1"); return "fingerprint [" + fingerprint + "]"; } catch (CertificateEncodingException e) { return "invalid encoding [" + e.toString() + "]"; @@ -395,4 +442,47 @@ private static boolean checkIssuer(X509Certificate certificate, X509Certificate private static boolean isSelfIssued(X509Certificate certificate) { return certificate.getIssuerX500Principal().equals(certificate.getSubjectX500Principal()); } + + private static String keyUsageDescription(X509Certificate certificate) { + boolean[] keyUsage = certificate.getKeyUsage(); + if (keyUsage == null || keyUsage.length == 0) { + return "no keyUsage"; + } + final String keyUsageDescription = IntStream.range(0, keyUsage.length) + .filter(i -> keyUsage[i]) + .mapToObj(i -> (i < KEY_USAGE_NAMES.length) ? KEY_USAGE_NAMES[i] : ("#" + i)) + .collect(Collectors.joining(", ")); + return keyUsageDescription.isEmpty() ? "no keyUsage" : ("keyUsage [" + keyUsageDescription + "]"); + } + + private static String extendedKeyUsageDescription(X509Certificate certificate) { + try { + return Optional.ofNullable(certificate.getExtendedKeyUsage()) + .flatMap(keyUsage -> generateExtendedKeyUsageDescription(keyUsage)) + .orElse("no extendedKeyUsage"); + } catch (CertificateParsingException e) { + return "invalid extendedKeyUsage [" + e + "]"; + } + } + + private static Optional generateExtendedKeyUsageDescription(List oids) { + return oids.stream() + .map(ExtendedKeyUsage::decodeOid) + .reduce((x, y) -> x + ", " + y) + .map(str -> "extendedKeyUsage [" + str + "]"); + } + + private static void addSessionDescription(SSLSession session, StringBuilder message) { + String cipherSuite = Optional.ofNullable(session) + .map(SSLSession::getCipherSuite) + .orElse(""); + String protocol = Optional.ofNullable(session) + .map(SSLSession::getProtocol) + .orElse(""); + message.append("; the session uses cipher suite [") + .append(cipherSuite) + .append("] and protocol [") + .append(protocol) + .append("]"); + } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslFileUtil.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslFileUtil.java new file mode 100644 index 0000000000000..2adf5ddc0974d --- /dev/null +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslFileUtil.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.ssl; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.AccessDeniedException; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.security.AccessControlException; +import java.security.GeneralSecurityException; +import java.security.UnrecoverableKeyException; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Utility methods for common file handling in SSL configuration + */ +final class SslFileUtil { + + static String pathsToString(List paths) { + return paths.stream() + .map(Path::toAbsolutePath) + .map(Object::toString) + .collect(Collectors.joining(",")); + } + + static SslConfigException ioException(String fileType, List paths, IOException cause) { + return ioException(fileType, paths, cause, null); + } + + static SslConfigException ioException(String fileType, List paths, IOException cause, String detail) { + if (cause instanceof FileNotFoundException || cause instanceof NoSuchFileException) { + return fileNotFound(fileType, paths, cause); + } + if (cause instanceof AccessDeniedException) { + return accessDenied(fileType, paths, (AccessDeniedException) cause); + } + String message = "cannot read configured " + fileType; + if (paths.isEmpty() == false) { + message += " [" + pathsToString(paths) + "]"; + } + + if (hasCause(UnrecoverableKeyException.class, cause)) { + message += " - this is usually caused by an incorrect password"; + } else if (cause != null && cause.getMessage() != null) { + message += " - " + cause.getMessage(); + } + + if (detail != null) { + message = message + "; " + detail; + } + return new SslConfigException(message, cause); + } + + static SslConfigException fileNotFound(String fileType, List paths, IOException cause) { + String message = "cannot read configured " + fileType + " [" + pathsToString(paths) + "] because "; + if (paths.size() == 1) { + message += "the file does not exist"; + } else { + message += "one or more files do not exist"; + } + return new SslConfigException(message, cause); + } + + static SslConfigException accessDenied(String fileType, List paths, AccessDeniedException cause) { + String message = "not permitted to read "; + if (paths.size() == 1) { + message += "the " + fileType + " file"; + } else { + message += "one of more " + fileType + "files"; + } + message += " [" + pathsToString(paths) + "]"; + return new SslConfigException(message, cause); + } + + static SslConfigException accessControlFailure(String fileType, List paths, AccessControlException cause, Path basePath) { + String message = "cannot read configured " + fileType + " [" + pathsToString(paths) + "] because "; + if (paths.size() == 1) { + message += "access to read the file is blocked"; + } else { + message += "access to read one or more files is blocked"; + } + message += "; SSL resources should be placed in the " ; + if (basePath == null) { + message += "Elasticsearch config directory"; + } else { + message += "[" + basePath + "] directory"; + } + return new SslConfigException(message, cause); + } + + public static SslConfigException securityException(String fileType, List paths, GeneralSecurityException cause) { + return securityException(fileType, paths, cause, null); + } + + public static SslConfigException securityException(String fileType, List paths, GeneralSecurityException cause, String detail) { + String message = "cannot load " + fileType; + if (paths.isEmpty() == false) { + message += " from [" + pathsToString(paths) + "]"; + } + message += " due to " + cause.getClass().getSimpleName(); + if (cause.getMessage() != null) { + message += " (" + cause.getMessage() + ")"; + } + if (detail != null) { + message = message + "; " + detail; + } else if (hasCause(UnrecoverableKeyException.class, cause)) { + message += "; this is usually caused by an incorrect password"; + } + + return new SslConfigException(message, cause); + } + + private static boolean hasCause(Class exceptionType, Throwable exception) { + if (exception == null) { + return false; + } + if (exceptionType.isInstance(exception)) { + return true; + } + + final Throwable cause = exception.getCause(); + if (cause == null || cause == exception) { + return false; + } + return hasCause(exceptionType, cause); + } + + public static SslConfigException configException(String fileType, List paths, SslConfigException cause) { + String message = "cannot load " + fileType; + if (paths.isEmpty() == false) { + message += " from [" + pathsToString(paths) + "]"; + } + message += " - " + cause.getMessage(); + return new SslConfigException(message, cause); + } +} diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslKeyConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslKeyConfig.java index 100f11b80e62b..210f2221089d9 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslKeyConfig.java @@ -8,9 +8,14 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.core.Tuple; + import javax.net.ssl.X509ExtendedKeyManager; import java.nio.file.Path; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; import java.util.Collection; +import java.util.List; /** * An interface for building a key manager at runtime. @@ -31,5 +36,26 @@ public interface SslKeyConfig { */ X509ExtendedKeyManager createKeyManager(); + /** + * @return A list of private keys and their associated certificates + */ + List> getKeys(); + + /** + * @return A collection of {@link StoredCertificate certificates} used by this config. + */ + Collection getConfiguredCertificates(); + + default boolean hasKeyMaterial() { + return getKeys().isEmpty() == false; + } + + /** + * Create a {@link SslTrustConfig} based on the underlying file store that backs this key config + */ + default SslTrustConfig asTrustConfig() { + return null; + } + } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslTrustConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslTrustConfig.java index 6cdc488db78d4..e6b73583f09e4 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslTrustConfig.java @@ -10,6 +10,7 @@ import javax.net.ssl.X509ExtendedTrustManager; import java.nio.file.Path; +import java.security.cert.Certificate; import java.util.Collection; /** @@ -31,5 +32,16 @@ public interface SslTrustConfig { */ X509ExtendedTrustManager createTrustManager(); + /** + * @return A collection of {@link Certificate certificates} used by this config, excluding those shipped with the JDK + */ + Collection getConfiguredCertificates(); + + /** + * @return {@code true} if this trust config is based on the system default truststore + */ + default boolean isSystemDefault() { + return false; + } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslUtil.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslUtil.java index f63dc068e8ce8..841f46d13a135 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslUtil.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslUtil.java @@ -20,8 +20,8 @@ private SslUtil() { // utility class } - public static String calculateFingerprint(X509Certificate certificate) throws CertificateEncodingException { - final MessageDigest sha1 = messageDigest("SHA-1"); + public static String calculateFingerprint(X509Certificate certificate, String algorithm) throws CertificateEncodingException { + final MessageDigest sha1 = messageDigest(algorithm); sha1.update(certificate.getEncoded()); return toHexString(sha1.digest()); } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreKeyConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreKeyConfig.java index fbacc8edc986c..5258fc8e10e4a 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreKeyConfig.java @@ -8,74 +8,184 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; + import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; +import java.io.IOException; import java.nio.file.Path; +import java.security.AccessControlException; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStoreException; +import java.security.PrivateKey; import java.security.UnrecoverableKeyException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; /** * A {@link SslKeyConfig} that builds a Key Manager from a keystore file. */ public class StoreKeyConfig implements SslKeyConfig { - private final Path path; - private final char[] storePassword; + private final String keystorePath; private final String type; + private final char[] storePassword; + private final Function filter; private final char[] keyPassword; private final String algorithm; + private final Path configBasePath; /** * @param path The path to the keystore file * @param storePassword The password for the keystore * @param type The {@link KeyStore#getType() type} of the keystore (typically "PKCS12" or "jks"). - * See {@link KeyStoreUtil#inferKeyStoreType(Path)}. + * See {@link KeyStoreUtil#inferKeyStoreType}. + * @param filter A function to process the keystore after it is loaded. See {@link KeyStoreUtil#filter} * @param keyPassword The password for the key(s) within the keystore - * (see {@link javax.net.ssl.KeyManagerFactory#init(KeyStore, char[])}). + * (see {@link KeyManagerFactory#init(KeyStore, char[])}). * @param algorithm The algorithm to use for the Key Manager (see {@link KeyManagerFactory#getAlgorithm()}). + * @param configBasePath The base path for configuration files (used for error handling) */ - StoreKeyConfig(Path path, char[] storePassword, String type, char[] keyPassword, String algorithm) { - this.path = path; - this.storePassword = storePassword; - this.type = type; - this.keyPassword = keyPassword; - this.algorithm = algorithm; + public StoreKeyConfig(String path, char[] storePassword, String type, @Nullable Function filter, + char[] keyPassword, String algorithm, Path configBasePath) { + this.keystorePath = Objects.requireNonNull(path, "Keystore path cannot be null"); + this.storePassword = Objects.requireNonNull(storePassword, "Keystore password cannot be null (but may be empty)"); + this.type = Objects.requireNonNull(type, "Keystore type cannot be null"); + this.filter = filter; + this.keyPassword = Objects.requireNonNull(keyPassword, "Key password cannot be null (but may be empty)"); + this.algorithm = Objects.requireNonNull(algorithm, "Keystore algorithm cannot be null"); + this.configBasePath = Objects.requireNonNull(configBasePath, "Config path cannot be null"); + } + + @Override + public SslTrustConfig asTrustConfig() { + final String trustStoreAlgorithm = TrustManagerFactory.getDefaultAlgorithm(); + return new StoreTrustConfig(keystorePath, storePassword, type, trustStoreAlgorithm, false, configBasePath); } @Override public Collection getDependentFiles() { - return Collections.singleton(path); + return List.of(resolvePath()); + } + + @Override + public boolean hasKeyMaterial() { + return true; + } + + private Path resolvePath() { + return configBasePath.resolve(keystorePath); + } + + /** + * Equivalent to {@link #getKeys(boolean) getKeys(false)}. + */ + @Override + public List> getKeys() { + return getKeys(false); + } + + /** + * Return the list of keys inside the configured keystore, optionally applying the {@code filter} that was set during construction. + */ + public List> getKeys(boolean filterKeystore) { + final Path path = resolvePath(); + KeyStore keyStore = readKeyStore(path); + if (filterKeystore) { + keyStore = this.processKeyStore(keyStore); + } + return KeyStoreUtil.stream(keyStore, ex -> keystoreException(path, ex)) + .filter(KeyStoreUtil.KeyStoreEntry::isKeyEntry) + .map(entry -> { + final X509Certificate certificate = entry.getX509Certificate(); + if (certificate != null) { + return new Tuple<>(entry.getKey(keyPassword), certificate); + } + return null; + }) + .filter(Objects::nonNull) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public Collection getConfiguredCertificates() { + final Path path = resolvePath(); + final KeyStore keyStore = readKeyStore(path); + return KeyStoreUtil.stream(keyStore, ex -> keystoreException(path, ex)) + .flatMap(entry -> { + final List certificates = new ArrayList<>(); + boolean firstElement = true; + for (X509Certificate certificate : entry.getX509CertificateChain()) { + certificates.add(new StoredCertificate(certificate, keystorePath, type, entry.getAlias(), firstElement)); + firstElement = false; + } + return certificates.stream(); + }) + .collect(Collectors.toUnmodifiableList()); } @Override public X509ExtendedKeyManager createKeyManager() { + final Path path = resolvePath(); + return createKeyManager(path); + } + + private X509ExtendedKeyManager createKeyManager(Path path) { try { - final KeyStore keyStore = KeyStoreUtil.readKeyStore(path, type, storePassword); - checkKeyStore(keyStore); + KeyStore keyStore = readKeyStore(path); + keyStore = processKeyStore(keyStore); + checkKeyStore(keyStore, path); return KeyStoreUtil.createKeyManager(keyStore, keyPassword, algorithm); - } catch (UnrecoverableKeyException e) { - String message = "failed to load a KeyManager for keystore [" + path.toAbsolutePath() - + "], this is usually caused by an incorrect key-password"; + } catch (GeneralSecurityException e) { + throw keystoreException(path, e); + } + } + + private KeyStore processKeyStore(KeyStore keyStore) { + if (filter == null) { + return keyStore; + } + return Objects.requireNonNull(filter.apply(keyStore), "A keystore filter may not return null"); + } + + private KeyStore readKeyStore(Path path) { + try { + return KeyStoreUtil.readKeyStore(path, type, storePassword); + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure("[" + type + "] keystore", List.of(path), e, configBasePath); + } catch (IOException e) { + throw SslFileUtil.ioException("[" + type + "] keystore", List.of(path), e); + } catch (GeneralSecurityException e) { + throw keystoreException(path, e); + } + } + + private SslConfigException keystoreException(Path path, GeneralSecurityException e) { + String extra = null; + if (e instanceof UnrecoverableKeyException) { + extra = "this is usually caused by an incorrect key-password"; if (keyPassword.length == 0) { - message += " (no key-password was provided)"; + extra += " (no key-password was provided)"; } else if (Arrays.equals(storePassword, keyPassword)) { - message += " (we tried to access the key using the same password as the keystore)"; + extra += " (we tried to access the key using the same password as the keystore)"; } - throw new SslConfigException(message, e); - } catch (GeneralSecurityException e) { - throw new SslConfigException("failed to load a KeyManager for keystore [" + path + "] of type [" + type + "]", e); } + return SslFileUtil.securityException("[" + type + "] keystore", path == null ? List.of() : List.of(path), e, extra); } /** * Verifies that the keystore contains at least 1 private key entry. */ - private void checkKeyStore(KeyStore keyStore) throws KeyStoreException { + private void checkKeyStore(KeyStore keyStore, Path path) throws KeyStoreException { Enumeration aliases = keyStore.aliases(); while (aliases.hasMoreElements()) { String alias = aliases.nextElement(); @@ -83,13 +193,55 @@ private void checkKeyStore(KeyStore keyStore) throws KeyStoreException { return; } } - final String message; + String message = "the " + keyStore.getType() + " keystore"; if (path != null) { - message = "the keystore [" + path + "] does not contain a private key entry"; - } else { - message = "the configured PKCS#11 token does not contain a private key entry"; + message += " [" + path + "]"; } + message += "does not contain a private key entry"; throw new SslConfigException(message); } + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(getClass().getSimpleName()); + sb.append('{'); + + String path = keystorePath; + if (path != null) { + sb.append("path=").append(path).append(", "); + } + sb.append("type=").append(type); + sb.append(", storePassword=").append(storePassword.length == 0 ? "" : ""); + sb.append(", keyPassword="); + if (keyPassword.length == 0) { + sb.append(""); + } else if (Arrays.equals(storePassword, keyPassword)) { + sb.append(""); + } else { + sb.append(""); + } + sb.append(", algorithm=").append(algorithm); + sb.append('}'); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StoreKeyConfig that = (StoreKeyConfig) o; + return this.keystorePath.equals(that.keystorePath) + && this.type.equals(that.type) + && this.algorithm.equals(that.algorithm) + && Arrays.equals(this.storePassword, that.storePassword) + && Arrays.equals(this.keyPassword, that.keyPassword); + } + + @Override + public int hashCode() { + int result = Objects.hash(keystorePath, type, algorithm); + result = 31 * result + Arrays.hashCode(storePassword); + result = 31 * result + Arrays.hashCode(keyPassword); + return result; + } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java index 4edc3b5999c0e..47c0c31218e1c 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java @@ -9,57 +9,124 @@ package org.elasticsearch.common.ssl; import javax.net.ssl.X509ExtendedTrustManager; +import java.io.IOException; import java.nio.file.Path; +import java.security.AccessControlException; import java.security.GeneralSecurityException; import java.security.KeyStore; +import java.security.cert.X509Certificate; +import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; /** * A {@link SslTrustConfig} that builds a Trust Manager from a keystore file. */ -final class StoreTrustConfig implements SslTrustConfig { - private final Path path; +public final class StoreTrustConfig implements SslTrustConfig { + private final String truststorePath; private final char[] password; private final String type; private final String algorithm; + private final boolean requireTrustAnchors; + private final Path configBasePath; /** * @param path The path to the keystore file * @param password The password for the keystore * @param type The {@link KeyStore#getType() type} of the keystore (typically "PKCS12" or "jks"). - * See {@link KeyStoreUtil#inferKeyStoreType(Path)}. + * See {@link KeyStoreUtil#inferKeyStoreType}. * @param algorithm The algorithm to use for the Trust Manager (see {@link javax.net.ssl.TrustManagerFactory#getAlgorithm()}). + * @param requireTrustAnchors If true, the truststore will be checked to ensure that it contains at least one valid trust anchor. + * @param configBasePath The base path for the configuration directory */ - StoreTrustConfig(Path path, char[] password, String type, String algorithm) { - this.path = path; - this.type = type; - this.algorithm = algorithm; - this.password = password; + public StoreTrustConfig(String path, char[] password, String type, String algorithm, boolean requireTrustAnchors, Path configBasePath) { + this.truststorePath = Objects.requireNonNull(path, "Truststore path cannot be null"); + this.type = Objects.requireNonNull(type, "Truststore type cannot be null"); + this.algorithm = Objects.requireNonNull(algorithm, "Truststore algorithm cannot be null"); + this.password = Objects.requireNonNull(password, "Truststore password cannot be null (but may be empty)"); + this.requireTrustAnchors = requireTrustAnchors; + this.configBasePath = configBasePath; } @Override public Collection getDependentFiles() { - return Collections.singleton(path); + return List.of(resolvePath()); + } + + private Path resolvePath() { + return configBasePath.resolve(this.truststorePath); + } + + @Override + public Collection getConfiguredCertificates() { + final Path path = resolvePath(); + final KeyStore trustStore = readKeyStore(path); + return KeyStoreUtil.stream(trustStore, ex -> keystoreException(path, ex)) + .map(entry -> { + final X509Certificate certificate = entry.getX509Certificate(); + if (certificate != null) { + final boolean hasKey = entry.isKeyEntry(); + return new StoredCertificate(certificate, this.truststorePath, this.type, entry.getAlias(), hasKey); + } else { + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toUnmodifiableList()); } @Override public X509ExtendedTrustManager createTrustManager() { + final Path path = resolvePath(); try { - final KeyStore store = KeyStoreUtil.readKeyStore(path, type, password); - checkTrustStore(store); + final KeyStore store = readKeyStore(path); + if (requireTrustAnchors) { + checkTrustStore(store, path); + } return KeyStoreUtil.createTrustManager(store, algorithm); } catch (GeneralSecurityException e) { - throw new SslConfigException("cannot create trust manager for path=[" + (path == null ? null : path.toAbsolutePath()) - + "] type=[" + type + "] password=[" + (password.length == 0 ? "" : "") + "]", e); + throw keystoreException(path, e); } } + private KeyStore readKeyStore(Path path) { + try { + return KeyStoreUtil.readKeyStore(path, type, password); + } catch (AccessControlException e) { + throw SslFileUtil.accessControlFailure(fileTypeForException(), List.of(path), e, configBasePath); + } catch (IOException e) { + throw SslFileUtil.ioException(fileTypeForException(), List.of(path), e, getAdditionalErrorDetails()); + } catch (GeneralSecurityException e) { + throw keystoreException(path, e); + } + } + + private SslConfigException keystoreException(Path path, GeneralSecurityException e) { + final String extra = getAdditionalErrorDetails(); + return SslFileUtil.securityException(fileTypeForException(), List.of(path), e, extra); + } + + private String getAdditionalErrorDetails() { + final String extra; + if (password.length == 0) { + extra = "(no password was provided)"; + } else { + extra = "(a keystore password was provided)"; + } + return extra; + } + + private String fileTypeForException() { + return "[" + type + "] keystore (as a truststore)"; + } + /** * Verifies that the keystore contains at least 1 trusted certificate entry. */ - private void checkTrustStore(KeyStore store) throws GeneralSecurityException { + private void checkTrustStore(KeyStore store, Path path) throws GeneralSecurityException { Enumeration aliases = store.aliases(); while (aliases.hasMoreElements()) { String alias = aliases.nextElement(); @@ -67,13 +134,35 @@ private void checkTrustStore(KeyStore store) throws GeneralSecurityException { return; } } - final String message; - if (path != null) { - message = "the truststore [" + path + "] does not contain any trusted certificate entries"; - } else { - message = "the configured PKCS#11 token does not contain any trusted certificate entries"; - } - throw new SslConfigException(message); + throw new SslConfigException("the truststore [" + path + "] does not contain any trusted certificate entries"); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StoreTrustConfig that = (StoreTrustConfig) o; + return truststorePath.equals(that.truststorePath) + && Arrays.equals(password, that.password) + && type.equals(that.type) + && algorithm.equals(that.algorithm); + } + + @Override + public int hashCode() { + int result = Objects.hash(truststorePath, type, algorithm); + result = 31 * result + Arrays.hashCode(password); + return result; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("StoreTrustConfig{"); + sb.append("path=").append(truststorePath); + sb.append(", password=").append(password.length == 0 ? "" : ""); + sb.append(", type=").append(type); + sb.append(", algorithm=").append(algorithm); + sb.append('}'); + return sb.toString(); + } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoredCertificate.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoredCertificate.java new file mode 100644 index 0000000000000..24f33a1118b5e --- /dev/null +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoredCertificate.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.ssl; + +import org.elasticsearch.core.Nullable; + +import java.security.cert.X509Certificate; +import java.util.Objects; + +/** + * Information about a certificate that is locally stored.It includes a reference to the {@link X509Certificate} itself, + * as well as information about where it was loaded from. + */ +public final class StoredCertificate { + + private final X509Certificate certificate; + + @Nullable + // Will be null in PKCS#11 + private final String path; + + private final String format; + + @Nullable + // Will be null in PEM + private final String alias; + + private final boolean hasPrivateKey; + + public StoredCertificate(X509Certificate certificate, String path, String format, String alias, boolean hasPrivateKey) { + this.certificate = Objects.requireNonNull(certificate, "Certificate may not be null"); + this.path = path; + this.format = Objects.requireNonNull(format, "Format may not be null"); + this.alias = alias; + this.hasPrivateKey = hasPrivateKey; + } + + public X509Certificate getCertificate() { + return certificate; + } + + public String getPath() { + return path; + } + + public String getFormat() { + return format; + } + + public String getAlias() { + return alias; + } + + public boolean hasPrivateKey() { + return hasPrivateKey; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StoredCertificate that = (StoredCertificate) o; + return hasPrivateKey == that.hasPrivateKey + && certificate.equals(that.certificate) + && Objects.equals(path, that.path) + && format.equals(that.format) + && Objects.equals(alias, that.alias); + } + + @Override + public int hashCode() { + return Objects.hash(certificate, path, format, alias, hasPrivateKey); + } +} diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/TrustEverythingConfig.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/TrustEverythingConfig.java index 27a770890f028..4936b3f78e8bb 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/TrustEverythingConfig.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/TrustEverythingConfig.java @@ -14,16 +14,16 @@ import java.nio.file.Path; import java.security.cert.X509Certificate; import java.util.Collection; -import java.util.Collections; +import java.util.List; /** * A {@link SslTrustConfig} that trusts all certificates. Used when {@link SslVerificationMode#isCertificateVerificationEnabled()} is * {@code false}. * This class cannot be used on FIPS-140 JVM as it has its own trust manager implementation. */ -final class TrustEverythingConfig implements SslTrustConfig { +public final class TrustEverythingConfig implements SslTrustConfig { - static final TrustEverythingConfig TRUST_EVERYTHING = new TrustEverythingConfig(); + public static final TrustEverythingConfig TRUST_EVERYTHING = new TrustEverythingConfig(); private TrustEverythingConfig() { // single instances @@ -66,7 +66,12 @@ public X509Certificate[] getAcceptedIssuers() { @Override public Collection getDependentFiles() { - return Collections.emptyList(); + return List.of(); + } + + @Override + public Collection getConfiguredCertificates() { + return List.of(); } @Override diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/KeyStoreUtilTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/KeyStoreUtilTests.java new file mode 100644 index 0000000000000..c586ead12b002 --- /dev/null +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/KeyStoreUtilTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.ssl; + +import org.elasticsearch.test.ESTestCase; + +import java.nio.file.Path; +import java.security.KeyStore; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; + +public class KeyStoreUtilTests extends ESTestCase { + private static final char[] P12_PASS = "p12-pass".toCharArray(); + + public void testFilter() throws Exception { + assumeFalse("Can't use PKCS#12 keystores in a FIPS JVM", inFipsJvm()); + + final Path p12 = getDataPath("/certs/cert-all/certs.p12"); + final KeyStore original = KeyStoreUtil.readKeyStore(p12, "PKCS12", P12_PASS); + + // No-op filter + final KeyStore clone = KeyStoreUtil.filter(KeyStoreUtil.readKeyStore(p12, "PKCS12", P12_PASS), entry -> true); + assertThat(Collections.list(clone.aliases()), containsInAnyOrder("cert1", "cert2")); + assertSameEntry(original, clone, "cert1", P12_PASS); + assertSameEntry(original, clone, "cert2", P12_PASS); + + // Filter by alias + final KeyStore cert1 = KeyStoreUtil.filter( + KeyStoreUtil.readKeyStore(p12, "PKCS12", P12_PASS), + entry -> entry.getAlias().equals("cert1") + ); + assertThat(Collections.list(cert1.aliases()), containsInAnyOrder("cert1")); + assertSameEntry(original, cert1, "cert1", P12_PASS); + + // Filter by cert + final KeyStore cert2 = KeyStoreUtil.filter( + KeyStoreUtil.readKeyStore(p12, "PKCS12", P12_PASS), + entry -> entry.getX509Certificate().getSubjectX500Principal().getName().equals("CN=cert2") + ); + assertThat(Collections.list(cert2.aliases()), containsInAnyOrder("cert2")); + assertSameEntry(original, cert2, "cert2", P12_PASS); + } + + private void assertSameEntry(KeyStore ks1, KeyStore ks2, String alias, char[] keyPassword) throws Exception { + assertThat(ks1.isKeyEntry(alias), equalTo(ks2.isKeyEntry(alias))); + assertThat(ks1.isCertificateEntry(alias), equalTo(ks2.isCertificateEntry(alias))); + assertThat(ks1.getCertificate(alias), equalTo(ks2.getCertificate(alias))); + assertThat(ks1.getCertificateChain(alias), equalTo(ks2.getCertificateChain(alias))); + assertThat(ks1.getKey(alias, P12_PASS), equalTo(ks2.getKey(alias, keyPassword))); + } +} diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemKeyConfigTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemKeyConfigTests.java index c132642dccb58..dbb37e4521794 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemKeyConfigTests.java @@ -8,69 +8,149 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import org.junit.Before; -import javax.net.ssl.X509ExtendedKeyManager; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; import java.security.GeneralSecurityException; import java.security.PrivateKey; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Stream; + +import javax.net.ssl.X509ExtendedKeyManager; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class PemKeyConfigTests extends ESTestCase { private static final int IP_NAME = 7; private static final int DNS_NAME = 2; + private Path configBasePath; + + @Before + public void setupPath(){ + configBasePath = getDataPath("/certs"); + } + public void testBuildKeyConfigFromPkcs1PemFilesWithoutPassword() throws Exception { - final Path cert = getDataPath("/certs/cert1/cert1.crt"); - final Path key = getDataPath("/certs/cert1/cert1.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); + final String cert = "cert1/cert1.crt"; + final String key = "cert1/cert1.key"; + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0], configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(cert, key))); assertCertificateAndKey(keyConfig, "CN=cert1"); } public void testBuildKeyConfigFromPkcs1PemFilesWithPassword() throws Exception { - final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); + final String cert = "cert2/cert2.crt"; + final String key = "cert2/cert2.key"; + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray(), configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(cert, key))); assertCertificateAndKey(keyConfig, "CN=cert2"); } public void testBuildKeyConfigFromPkcs8PemFilesWithoutPassword() throws Exception { - final Path cert = getDataPath("/certs/cert1/cert1.crt"); - final Path key = getDataPath("/certs/cert1/cert1-pkcs8.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); + final String cert = "cert1/cert1.crt"; + final String key = "cert1/cert1-pkcs8.key"; + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0], configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(cert, key))); assertCertificateAndKey(keyConfig, "CN=cert1"); } public void testBuildKeyConfigFromPkcs8PemFilesWithPassword() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); - final Path cert = getDataPath("/certs/cert2/cert2.crt"); - final Path key = getDataPath("/certs/cert2/cert2-pkcs8.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray()); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert, key)); + final String cert = "cert2/cert2.crt"; + final String key = "cert2/cert2-pkcs8.key"; + final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "c2-pass".toCharArray(), configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(cert, key))); assertCertificateAndKey(keyConfig, "CN=cert2"); } + public void testBuildKeyConfigUsingCertificateChain() throws Exception { + final String ca = "ca1/ca.crt"; + final String cert = "cert1/cert1.crt"; + final String key = "cert1/cert1.key"; + + final Path chain = createTempFile("chain", ".crt"); + Files.write(chain, Files.readAllBytes(configBasePath.resolve(cert)), StandardOpenOption.APPEND); + Files.write(chain, Files.readAllBytes(configBasePath.resolve(ca)), StandardOpenOption.APPEND); + + final PemKeyConfig keyConfig = new PemKeyConfig(chain.toString(), key, new char[0], configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(chain, configBasePath.resolve(key))); + assertCertificateAndKey(keyConfig, "CN=cert1", "CN=Test CA 1"); + final Collection certificates = keyConfig.getConfiguredCertificates(); + assertThat(certificates, Matchers.hasSize(2)); + final Iterator iterator = certificates.iterator(); + StoredCertificate c1 = iterator.next(); + StoredCertificate c2 = iterator.next(); + + assertThat(c1.getCertificate().getSubjectDN().toString(), equalTo("CN=cert1")); + assertThat(c1.hasPrivateKey(), equalTo(true)); + assertThat(c1.getAlias(), nullValue()); + assertThat(c1.getFormat(), equalTo("PEM")); + assertThat(c1.getPath(), equalTo(chain.toString())); + + assertThat(c2.getCertificate().getSubjectDN().toString(), equalTo("CN=Test CA 1")); + assertThat(c2.hasPrivateKey(), equalTo(false)); + assertThat(c2.getAlias(), nullValue()); + assertThat(c2.getFormat(), equalTo("PEM")); + assertThat(c2.getPath(), equalTo(chain.toString())); + + final List> keys = keyConfig.getKeys(); + assertThat(keys, iterableWithSize(1)); + assertThat(keys.get(0).v1(), notNullValue()); + assertThat(keys.get(0).v1().getAlgorithm(), equalTo("RSA")); + assertThat(keys.get(0).v2(), notNullValue()); + assertThat(keys.get(0).v2().getSubjectDN().toString(), equalTo("CN=cert1")); + } + + public void testInvertedCertificateChainFailsToCreateKeyManager() throws Exception { + final String ca = "ca1/ca.crt"; + final String cert = "cert1/cert1.crt"; + final String key = "cert1/cert1.key"; + + final Path chain = createTempFile("chain", ".crt"); + // This is (intentionally) the wrong order. It should be cert + ca. + Files.write(chain, Files.readAllBytes(configBasePath.resolve(ca)), StandardOpenOption.APPEND); + Files.write(chain, Files.readAllBytes(configBasePath.resolve(cert)), StandardOpenOption.APPEND); + + final PemKeyConfig keyConfig = new PemKeyConfig(chain.toString(), key, new char[0], configBasePath); + final SslConfigException exception = expectThrows(SslConfigException.class, keyConfig::createKeyManager); + + assertThat(exception.getMessage(), containsString("failed to load a KeyManager")); + final Throwable cause = exception.getCause(); + assertThat(cause, notNullValue()); + if (inFipsJvm()) { + // BC FKS first checks that the key & cert match (they don't because the key is for 'cert1' not 'ca') + assertThat(cause.getMessage(), containsString("RSA keys do not have the same modulus")); + } else { + // SUN PKCS#12 first checks that the chain is correctly structured (it's not, due to the order) + assertThat(cause.getMessage(), containsString("Certificate chain is not valid")); + } + } + public void testKeyManagerFailsWithIncorrectPassword() throws Exception { final Path cert = getDataPath("/certs/cert2/cert2.crt"); final Path key = getDataPath("/certs/cert2/cert2.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, "wrong-password".toCharArray()); + final PemKeyConfig keyConfig = new PemKeyConfig(cert.toString(), key.toString(), "wrong-password".toCharArray(), configBasePath); assertPasswordIsIncorrect(keyConfig, key); } @@ -78,7 +158,7 @@ public void testMissingCertificateFailsWithMeaningfulMessage() throws Exception final Path key = getDataPath("/certs/cert1/cert1.key"); final Path cert = key.getParent().resolve("dne.crt"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); + final PemKeyConfig keyConfig = new PemKeyConfig(cert.toString(), key.toString(), new char[0], configBasePath); assertFileNotFound(keyConfig, "certificate", cert); } @@ -86,7 +166,7 @@ public void testMissingKeyFailsWithMeaningfulMessage() throws Exception { final Path cert = getDataPath("/certs/cert1/cert1.crt"); final Path key = cert.getParent().resolve("dne.key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); + final PemKeyConfig keyConfig = new PemKeyConfig(cert.toString(), key.toString(), new char[0], configBasePath); assertFileNotFound(keyConfig, "private key", key); } @@ -98,7 +178,7 @@ public void testKeyConfigReloadsFileContents() throws Exception { final Path cert = createTempFile("cert", ".crt"); final Path key = createTempFile("cert", ".key"); - final PemKeyConfig keyConfig = new PemKeyConfig(cert, key, new char[0]); + final PemKeyConfig keyConfig = new PemKeyConfig(cert.toString(), key.toString(), new char[0], configBasePath); Files.copy(cert1, cert, StandardCopyOption.REPLACE_EXISTING); Files.copy(key1, key, StandardCopyOption.REPLACE_EXISTING); @@ -116,7 +196,11 @@ public void testKeyConfigReloadsFileContents() throws Exception { assertFileNotFound(keyConfig, "certificate", cert); } - private void assertCertificateAndKey(PemKeyConfig keyConfig, String expectedDN) throws CertificateParsingException { + private Path[] resolve(String ... names) { + return Stream.of(names).map(configBasePath::resolve).toArray(Path[]::new); + } + + private void assertCertificateAndKey(PemKeyConfig keyConfig, String certDN, String... caDN) throws CertificateParsingException { final X509ExtendedKeyManager keyManager = keyConfig.createKeyManager(); assertThat(keyManager, notNullValue()); @@ -126,27 +210,32 @@ private void assertCertificateAndKey(PemKeyConfig keyConfig, String expectedDN) final X509Certificate[] chain = keyManager.getCertificateChain("key"); assertThat(chain, notNullValue()); - assertThat(chain, arrayWithSize(1)); + assertThat(chain, arrayWithSize(1 + caDN.length)); final X509Certificate certificate = chain[0]; assertThat(certificate.getIssuerDN().getName(), is("CN=Test CA 1")); - assertThat(certificate.getSubjectDN().getName(), is(expectedDN)); + assertThat(certificate.getSubjectDN().getName(), is(certDN)); assertThat(certificate.getSubjectAlternativeNames(), iterableWithSize(2)); assertThat(certificate.getSubjectAlternativeNames(), containsInAnyOrder( Arrays.asList(DNS_NAME, "localhost"), Arrays.asList(IP_NAME, "127.0.0.1") )); + + for (int i = 0; i < caDN.length; i++) { + final X509Certificate ca = chain[i + 1]; + assertThat(ca.getSubjectDN().getName(), is(caDN[i])); + } } private void assertPasswordIsIncorrect(PemKeyConfig keyConfig, Path key) { final SslConfigException exception = expectThrows(SslConfigException.class, keyConfig::createKeyManager); - assertThat(exception.getMessage(), containsString("private key file")); + assertThat(exception.getMessage(), containsString("PEM private key")); assertThat(exception.getMessage(), containsString(key.toAbsolutePath().toString())); assertThat(exception.getCause(), instanceOf(GeneralSecurityException.class)); } private void assertFileNotFound(PemKeyConfig keyConfig, String type, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, keyConfig::createKeyManager); - assertThat(exception.getMessage(), containsString(type + " file")); + assertThat(exception.getMessage(), containsString(type + " [")); assertThat(exception.getMessage(), containsString(file.toAbsolutePath().toString())); assertThat(exception.getMessage(), containsString("does not exist")); assertThat(exception.getCause(), instanceOf(NoSuchFileException.class)); diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemTrustConfigTests.java index a69bc0cba2a35..eaaa89ac6a568 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemTrustConfigTests.java @@ -10,7 +10,9 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import org.junit.Before; +import javax.net.ssl.X509ExtendedTrustManager; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -21,59 +23,66 @@ import java.security.cert.X509Certificate; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.net.ssl.X509ExtendedTrustManager; - public class PemTrustConfigTests extends ESTestCase { + private static final String CERTS_DIR = "/certs"; + private Path basePath; + + @Before + public void setupPath() { + basePath = getDataPath(CERTS_DIR); + } + public void testBuildTrustConfigFromSinglePemFile() throws Exception { - final Path cert = getDataPath("/certs/ca1/ca.crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(cert)); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert)); + final String cert = "ca1/ca.crt"; + final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(cert), basePath); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolvePath(cert))); assertCertificateChain(trustConfig, "CN=Test CA 1"); } public void testBuildTrustConfigFromMultiplePemFiles() throws Exception { - final Path cert1 = getDataPath("/certs/ca1/ca.crt"); - final Path cert2 = getDataPath("/certs/ca2/ca.crt"); - final Path cert3 = getDataPath("/certs/ca3/ca.crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(cert1, cert2, cert3)); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert1, cert2, cert3)); + final String cert1 = "ca1/ca.crt"; + final String cert2 = "ca2/ca.crt"; + final String cert3 = "ca3/ca.crt"; + final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(cert1, cert2, cert3), basePath); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolvePaths(cert1, cert2, cert3))); assertCertificateChain(trustConfig, "CN=Test CA 1", "CN=Test CA 2", "CN=Test CA 3"); } public void testBadFileFormatFails() throws Exception { final Path ca = createTempFile("ca", ".crt"); Files.write(ca, generateRandomByteArrayOfLength(128), StandardOpenOption.APPEND); - final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(ca)); + final PemTrustConfig trustConfig = new PemTrustConfig(List.of(ca.toString()), basePath); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ca)); assertInvalidFileFormat(trustConfig, ca); } public void testEmptyFileFails() throws Exception { final Path ca = createTempFile("ca", ".crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(ca)); + final PemTrustConfig trustConfig = new PemTrustConfig(List.of(ca.toString()), basePath); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ca)); assertEmptyFile(trustConfig, ca); } public void testMissingFileFailsWithMeaningfulMessage() throws Exception { - final Path cert = getDataPath("/certs/ca1/ca.crt").getParent().resolve("dne.crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList(cert)); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert)); - assertFileNotFound(trustConfig, cert); + final PemTrustConfig trustConfig = new PemTrustConfig(Collections.singletonList("dne.crt"), basePath); + final Path path = resolvePath("dne.crt"); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); + assertFileNotFound(trustConfig, path); } public void testOneMissingFileFailsWithMeaningfulMessageEvenIfOtherFileExist() throws Exception { - final Path cert1 = getDataPath("/certs/ca1/ca.crt"); - final Path cert2 = getDataPath("/certs/ca2/ca.crt").getParent().resolve("dne.crt"); - final Path cert3 = getDataPath("/certs/ca3/ca.crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(cert1, cert2, cert3)); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(cert1, cert2, cert3)); - assertFileNotFound(trustConfig, cert2); + final String cert1 = "ca1/ca.crt"; + final String cert2 = "ca2/dne.crt"; + final String cert3 = "ca3/ca.crt"; + final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(cert1, cert2, cert3), basePath); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolvePaths(cert1, cert2, cert3))); + assertFileNotFound(trustConfig, resolvePath(cert2)); } public void testTrustConfigReloadsFileContents() throws Exception { @@ -84,7 +93,7 @@ public void testTrustConfigReloadsFileContents() throws Exception { final Path ca1 = createTempFile("ca1", ".crt"); final Path ca2 = createTempFile("ca2", ".crt"); - final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(ca1, ca2)); + final PemTrustConfig trustConfig = new PemTrustConfig(Arrays.asList(ca1.toString(), ca2.toString()), basePath); Files.copy(cert1, ca1, StandardCopyOption.REPLACE_EXISTING); Files.copy(cert2, ca2, StandardCopyOption.REPLACE_EXISTING); @@ -126,19 +135,27 @@ private void assertInvalidFileFormat(PemTrustConfig trustConfig, Path file) { if (inFipsJvm() && exception.getMessage().contains("failed to parse any certificates")) { return; } - assertThat(exception.getMessage(), Matchers.containsString("cannot create trust")); - assertThat(exception.getMessage(), Matchers.containsString("PEM")); + assertThat(exception.getMessage(), Matchers.containsString("cannot load")); + assertThat(exception.getMessage(), Matchers.containsString("PEM certificate")); assertThat(exception.getCause(), Matchers.instanceOf(GeneralSecurityException.class)); } private void assertFileNotFound(PemTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); - assertThat(exception.getMessage(), Matchers.containsString("files do not exist")); + assertThat(exception.getMessage(), Matchers.containsString("not exist")); assertThat(exception.getMessage(), Matchers.containsString("PEM")); assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); assertThat(exception.getCause(), Matchers.instanceOf(NoSuchFileException.class)); } + private Path resolvePath(String relativeName) { + return getDataPath(CERTS_DIR).resolve(relativeName); + } + + private Path[] resolvePaths(String... names) { + return Stream.of(names).map(this::resolvePath).toArray(Path[]::new); + } + private byte[] generateRandomByteArrayOfLength(int length) { byte[] bytes = randomByteArrayOfLength(length); /* diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java index cf2b20ff529d0..72f456daaa557 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -36,7 +37,7 @@ public void testReadPKCS8RsaKey() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/rsa_key_pkcs8_plain.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/rsa_key_pkcs8_plain.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } @@ -45,7 +46,7 @@ public void testReadPKCS8RsaKeyWithBagAttrs() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/testnode_with_bagattrs.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/testnode_with_bagattrs.pem"), EMPTY_PASSWORD); assertThat(privateKey, equalTo(key)); } @@ -53,14 +54,14 @@ public void testReadPKCS8DsaKey() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_pkcs8_plain.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/dsa_key_pkcs8_plain.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } public void testReadEcKeyCurves() throws Exception { String curve = randomFrom("secp256r1", "secp384r1", "secp521r1"); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/private_" + curve + ".pem"), ""::toCharArray); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/private_" + curve + ".pem"), ""::toCharArray); assertThat(privateKey, instanceOf(ECPrivateKey.class)); ECParameterSpec parameterSpec = ((ECPrivateKey) privateKey).getParams(); ECGenParameterSpec algorithmParameterSpec = new ECGenParameterSpec(curve); @@ -73,7 +74,7 @@ public void testReadPKCS8EcKey() throws Exception { Key key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_pkcs8_plain.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/ec_key_pkcs8_plain.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } @@ -83,7 +84,7 @@ public void testReadEncryptedPKCS8Key() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath ("/certs/pem-utils/key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -93,7 +94,7 @@ public void testReadDESEncryptedPKCS1Key() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/testnode.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/testnode.pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } @@ -103,7 +104,7 @@ public void testReadAESEncryptedPKCS1Key() throws Exception { assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); String bits = randomFrom("128", "192", "256"); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/testnode-aes" + bits + ".pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/testnode-aes" + bits + ".pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -113,7 +114,7 @@ public void testReadPKCS1RsaKey() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/testnode-unprotected.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/testnode-unprotected.pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -123,7 +124,7 @@ public void testReadOpenSslDsaKey() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_plain.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_plain.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -133,7 +134,7 @@ public void testReadOpenSslDsaKeyWithParams() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_plain_with_params.pem"), + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_plain_with_params.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); @@ -144,7 +145,7 @@ public void testReadEncryptedOpenSslDsaKey() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -154,7 +155,7 @@ public void testReadOpenSslEcKey() throws Exception { Key key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -164,7 +165,7 @@ public void testReadOpenSslEcKeyWithParams() throws Exception { Key key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain_with_params.pem"), + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain_with_params.pem"), EMPTY_PASSWORD); assertThat(privateKey, notNullValue()); @@ -175,7 +176,7 @@ public void testReadEncryptedOpenSslEcKey() throws Exception { Key key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.readPrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_encrypted.pem"), TESTNODE_PASSWORD); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -183,30 +184,27 @@ public void testReadEncryptedOpenSslEcKey() throws Exception { public void testReadUnsupportedKey() { final Path path = getDataPath("/certs/pem-utils/key_unsupported.pem"); - SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); + SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.parsePrivateKey(path, TESTNODE_PASSWORD)); assertThat(e.getMessage(), containsString("file does not contain a supported key format")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); } - public void testReadPemCertificateAsKey() { + public void testErrorWhenReadingPemCertificateAsKey() { final Path path = getDataPath("/certs/pem-utils/testnode.crt"); - SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); + SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.parsePrivateKey(path, TESTNODE_PASSWORD)); assertThat(e.getMessage(), containsString("file does not contain a supported key format")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); } public void testReadCorruptedKey() { final Path path = getDataPath("/certs/pem-utils/corrupted_key_pkcs8_plain.pem"); - SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); - assertThat(e.getMessage(), containsString("private key")); - assertThat(e.getMessage(), containsString("cannot be parsed")); - assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); - assertThat(e.getCause().getMessage(), containsString("PEM footer is invalid or missing")); + IOException e = expectThrows(IOException.class, () -> PemUtils.parsePrivateKey(path, TESTNODE_PASSWORD)); + assertThat(e.getMessage(), containsString("PEM footer is invalid or missing")); } public void testReadEmptyFile() { final Path path = getDataPath("/certs/pem-utils/empty.pem"); - SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.readPrivateKey(path, TESTNODE_PASSWORD)); + SslConfigException e = expectThrows(SslConfigException.class, () -> PemUtils.parsePrivateKey(path, TESTNODE_PASSWORD)); assertThat(e.getMessage(), containsString("file is empty")); assertThat(e.getMessage(), containsString(path.toAbsolutePath().toString())); } diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java index b14b67a9a5b9d..19ffb029974b6 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java @@ -36,6 +36,11 @@ public class SslConfigurationLoaderTests extends ESTestCase { private Settings settings; private MockSecureSettings secureSettings = new MockSecureSettings(); private SslConfigurationLoader loader = new SslConfigurationLoader("test.ssl.") { + @Override + protected boolean hasSettings(String prefix) { + return true; + } + @Override protected String getSettingAsString(String key) throws Exception { return settings.get(key); diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationTests.java index d42b55b1eb1a3..bb6c4de02fcf3 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationTests.java @@ -38,7 +38,7 @@ public void testBasicConstruction() { final List ciphers = randomSubsetOf(randomIntBetween(1, DEFAULT_CIPHERS.size()), DEFAULT_CIPHERS); final List protocols = randomSubsetOf(randomIntBetween(1, 4), VALID_PROTOCOLS); final SslConfiguration configuration = - new SslConfiguration(trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); + new SslConfiguration(true, trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); assertThat(configuration.getTrustConfig(), is(trustConfig)); assertThat(configuration.getKeyConfig(), is(keyConfig)); @@ -63,27 +63,27 @@ public void testEqualsAndHashCode() { final List ciphers = randomSubsetOf(randomIntBetween(1, DEFAULT_CIPHERS.size() - 1), DEFAULT_CIPHERS); final List protocols = randomSubsetOf(randomIntBetween(1, VALID_PROTOCOLS.length - 1), VALID_PROTOCOLS); final SslConfiguration configuration = - new SslConfiguration(trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); + new SslConfiguration(true, trustConfig, keyConfig, verificationMode, clientAuth, ciphers, protocols); EqualsHashCodeTestUtils.checkEqualsAndHashCode(configuration, - orig -> new SslConfiguration(orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), orig.getClientAuth(), + orig -> new SslConfiguration(true, orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), orig.getClientAuth(), orig.getCipherSuites(), orig.getSupportedProtocols()), orig -> { switch (randomIntBetween(1, 4)) { case 1: - return new SslConfiguration(orig.getTrustConfig(), orig.getKeyConfig(), + return new SslConfiguration(true, orig.getTrustConfig(), orig.getKeyConfig(), randomValueOtherThan(orig.getVerificationMode(), () -> randomFrom(SslVerificationMode.values())), orig.getClientAuth(), orig.getCipherSuites(), orig.getSupportedProtocols()); case 2: - return new SslConfiguration(orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), + return new SslConfiguration(true, orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), randomValueOtherThan(orig.getClientAuth(), () -> randomFrom(SslClientAuthenticationMode.values())), orig.getCipherSuites(), orig.getSupportedProtocols()); case 3: - return new SslConfiguration(orig.getTrustConfig(), orig.getKeyConfig(), + return new SslConfiguration(true, orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), orig.getClientAuth(), DEFAULT_CIPHERS, orig.getSupportedProtocols()); case 4: default: - return new SslConfiguration(orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), + return new SslConfiguration(true, orig.getTrustConfig(), orig.getKeyConfig(), orig.getVerificationMode(), orig.getClientAuth(), orig.getCipherSuites(), Arrays.asList(VALID_PROTOCOLS)); } }); @@ -92,7 +92,7 @@ public void testEqualsAndHashCode() { public void testDependentFiles() { final SslTrustConfig trustConfig = Mockito.mock(SslTrustConfig.class); final SslKeyConfig keyConfig = Mockito.mock(SslKeyConfig.class); - final SslConfiguration configuration = new SslConfiguration(trustConfig, keyConfig, + final SslConfiguration configuration = new SslConfiguration(true, trustConfig, keyConfig, randomFrom(SslVerificationMode.values()), randomFrom(SslClientAuthenticationMode.values()), DEFAULT_CIPHERS, SslConfigurationLoader.DEFAULT_PROTOCOLS); @@ -112,7 +112,7 @@ public void testBuildSslContext() { final SslTrustConfig trustConfig = Mockito.mock(SslTrustConfig.class); final SslKeyConfig keyConfig = Mockito.mock(SslKeyConfig.class); final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS); - final SslConfiguration configuration = new SslConfiguration(trustConfig, keyConfig, + final SslConfiguration configuration = new SslConfiguration(true, trustConfig, keyConfig, randomFrom(SslVerificationMode.values()), randomFrom(SslClientAuthenticationMode.values()), DEFAULT_CIPHERS, Collections.singletonList(protocol)); diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslDiagnosticsTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslDiagnosticsTests.java index a1dfc1018a055..9ff1e50485efa 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslDiagnosticsTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslDiagnosticsTests.java @@ -13,8 +13,6 @@ import org.hamcrest.Matchers; import org.mockito.Mockito; -import javax.net.ssl.SSLSession; -import javax.security.auth.x500.X500Principal; import java.io.IOException; import java.nio.file.Path; import java.security.PublicKey; @@ -32,6 +30,8 @@ import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.net.ssl.SSLSession; +import javax.security.auth.x500.X500Principal; public class SslDiagnosticsTests extends ESTestCase { @@ -52,7 +52,9 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsTrusted() final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by" + @@ -67,7 +69,9 @@ public void testDiagnosticMessageWhenServerProvidesAFullCertChainThatIsntTrusted final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" + @@ -81,7 +85,9 @@ public void testDiagnosticMessageWithPartialChainAndUnknownTrustedIssuers() thro final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain")); @@ -95,7 +101,9 @@ public void testDiagnosticMessageWithFullChainAndUnknownTrustedIssuers() throws final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" + @@ -109,7 +117,9 @@ public void testDiagnosticMessageWhenServerFullCertChainIsntTrustedButMimicIssue final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1] fingerprint [2b7b0416391bdf86502505c23149022d2213dadc])" + @@ -125,7 +135,9 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyAndTheCertA final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" + @@ -140,7 +152,9 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyButTheCertA final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" + @@ -160,7 +174,9 @@ public void testDiagnosticMessageWhenServerTrustsManyCAs() throws Exception { final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.CLIENT, session, "xpack.security.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with client at [192.168.1.2];" + - " the client provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the client provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate is issued by [CN=Test CA 1]" + " but the client did not provide a copy of the issuing certificate in the certificate chain;" + " this ssl context ([xpack.security.http.ssl]) is not configured to trust that issuer" + @@ -174,7 +190,9 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateOnlyWithMimicIs final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" + @@ -190,7 +208,9 @@ public void testDiagnosticMessageWhenServerProvidesEndCertificateWithMultipleMim final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.9];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1]" + " but the server did not provide a copy of the issuing certificate in the certificate chain;" + @@ -217,7 +237,9 @@ public void testDiagnosticMessageWhenServerProvidePartialChainFromTrustedCA() th "xpack.security.authc.realms.ldap.ldap1.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.5];" + " the server provided a certificate with subject name [CN=elastic1,OU=windows,DC=example,DC=com]" + - " and fingerprint [" + MOCK_FINGERPRINT_4 + "];" + + ", fingerprint [" + MOCK_FINGERPRINT_4 + "]," + + " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is issued by [CN=ca,OU=windows,DC=example,DC=com];" + " the certificate is" + @@ -246,7 +268,9 @@ public void testDiagnosticMessageWhenServerProvidePartialChainFromUntrustedCA() "xpack.security.authc.realms.ldap.ldap1.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.6];" + " the server provided a certificate with subject name [CN=elastic1,OU=windows,DC=example,DC=com]" + - " and fingerprint [" + MOCK_FINGERPRINT_4 + "];" + + ", fingerprint [" + MOCK_FINGERPRINT_4 + "]," + + " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is issued by [CN=ca,OU=windows,DC=example,DC=com];" + " the certificate is" + @@ -264,7 +288,8 @@ public void testDiagnosticMessageWhenServerProvidesASelfSignedCertThatIsDirectly "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + " the server provided a certificate with subject name [CN=Test CA 1]" + - " and fingerprint [2b7b0416391bdf86502505c23149022d2213dadc];" + + ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc], no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is self-issued; the [CN=Test CA 1] certificate is trusted in this ssl context ([xpack.http.ssl])")); } @@ -277,7 +302,8 @@ public void testDiagnosticMessageWhenServerProvidesASelfSignedCertThatIsNotTrust "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.10.10];" + " the server provided a certificate with subject name [CN=Test CA 1]" + - " and fingerprint [2b7b0416391bdf86502505c23149022d2213dadc];" + + ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc], no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is self-issued; the [CN=Test CA 1] certificate is not trusted in this ssl context ([xpack.http.ssl])")); } @@ -290,7 +316,8 @@ public void testDiagnosticMessageWhenServerProvidesASelfSignedCertWithMimicName( "xpack.http.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.1];" + " the server provided a certificate with subject name [CN=Test CA 1]" + - " and fingerprint [2b7b0416391bdf86502505c23149022d2213dadc];" + + ", fingerprint [2b7b0416391bdf86502505c23149022d2213dadc], no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is self-issued; the [CN=Test CA 1] certificate is not trusted in this ssl context ([xpack.http.ssl]);" + " this ssl context does trust a certificate with subject [CN=Test CA 1]" + @@ -319,7 +346,136 @@ public void testDiagnosticMessageWhenServerProvidesAnEmailSubjAltName() throws E "xpack.monitoring.exporters.elastic-cloud.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.3];" + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + - " and fingerprint [" + MOCK_FINGERPRINT_1 + "];" + + ", fingerprint [" + MOCK_FINGERPRINT_1 + "]," + + " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + + " the certificate does not have any DNS/IP subject alternative names;" + + " the certificate is self-issued;" + + " the [CN=foo,DC=example,DC=com] certificate is trusted in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])")); + } + + public void testDiagnosticMessageWhenServerCertificateHasNoKeyUsage() throws Exception { + final String subjectName = "CN=foo,DC=example,DC=com"; + + final X509Certificate certificate = mockCertificateWithIssuer( + subjectName, + MOCK_ENCODING_1, + Collections.singletonList(List.of(1, "foo@example.com")), + null, + null, + null + ); + X509Certificate[] chain = new X509Certificate[] { certificate }; + + final String peerHost = "192.168.1." + randomIntBetween(1, 128); + final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS); + final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS); + final SSLSession session = session(peerHost, cipherSuite, protocol); + final Map> trustIssuers = trust(certificate); + final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, + "xpack.monitoring.exporters.elastic-cloud.ssl", trustIssuers); + + assertThat(message, Matchers.equalTo("failed to establish trust with server at [" + peerHost + "];" + + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + + ", fingerprint [" + MOCK_FINGERPRINT_1 + "], no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [" + cipherSuite + "] and protocol [" + protocol + "];" + + " the certificate does not have any DNS/IP subject alternative names;" + + " the certificate is self-issued;" + + " the [CN=foo,DC=example,DC=com] certificate is trusted in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])")); + } + + public void testDiagnosticMessageWhenServerCertificateHasKeyUsageAndNoExtendedKeyUsage() throws Exception { + final String subjectName = "CN=foo,DC=example,DC=com"; + + final boolean[] keyUsage = {true, false, true, true, true, false, false, false, false, false}; + final X509Certificate certificate = mockCertificateWithIssuer( + subjectName, + MOCK_ENCODING_1, + Collections.singletonList(List.of(1, "foo@example.com")), + null, + keyUsage, + null + ); + X509Certificate[] chain = new X509Certificate[] { certificate }; + + final String peerHost = "192.168.1." + randomIntBetween(1, 128); + final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS); + final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS); + final SSLSession session = session(peerHost, cipherSuite, protocol); + final Map> trustIssuers = trust(certificate); + final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, + "xpack.monitoring.exporters.elastic-cloud.ssl", trustIssuers); + + assertThat(message, Matchers.equalTo("failed to establish trust with server at [" + peerHost + "];" + + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + + ", fingerprint [" + MOCK_FINGERPRINT_1 + "]," + + " keyUsage [digitalSignature, keyEncipherment, dataEncipherment, keyAgreement]" + + " and no extendedKeyUsage;" + + " the session uses cipher suite [" + cipherSuite + "] and protocol [" + protocol + "];" + + " the certificate does not have any DNS/IP subject alternative names;" + + " the certificate is self-issued;" + + " the [CN=foo,DC=example,DC=com] certificate is trusted in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])")); + } + + public void testDiagnosticMessageWhenServerCertificateHasKeyUsageAndExtendedKeyUsage() throws Exception { + final String subjectName = "CN=foo,DC=example,DC=com"; + + final boolean[] keyUsage = {false, false, false, false, false, false, false, true, false}; + final X509Certificate certificate = mockCertificateWithIssuer( + subjectName, + MOCK_ENCODING_1, + Collections.singletonList(List.of(1, "foo@example.com")), + null, + keyUsage, + List.of("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.2") + ); + X509Certificate[] chain = new X509Certificate[] { certificate }; + + final String peerHost = "192.168.1." + randomIntBetween(1, 128); + final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS); + final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS); + final SSLSession session = session(peerHost, cipherSuite, protocol); + final Map> trustIssuers = trust(certificate); + final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, + "xpack.monitoring.exporters.elastic-cloud.ssl", trustIssuers); + + assertThat(message, Matchers.equalTo("failed to establish trust with server at [" + peerHost + "];" + + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + + ", fingerprint [" + MOCK_FINGERPRINT_1 + "]," + + " keyUsage [encipherOnly] and extendedKeyUsage [serverAuth, clientAuth];" + + " the session uses cipher suite [" + cipherSuite + "] and protocol [" + protocol + "];" + + " the certificate does not have any DNS/IP subject alternative names;" + + " the certificate is self-issued;" + + " the [CN=foo,DC=example,DC=com] certificate is trusted in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])")); + } + + public void testDiagnosticMessageWhenServerCertificateHasOversizedKeyUsageAndUnrecognisedExtendedKeyUsage() throws Exception { + final String subjectName = "CN=foo,DC=example,DC=com"; + + final boolean[] keyUsage = { false, false, false, false, false, true, false, false, false, /* extra --> */ true, false, true }; + final X509Certificate certificate = mockCertificateWithIssuer( + subjectName, + MOCK_ENCODING_1, + Collections.singletonList(List.of(1, "foo@example.com")), + null, + keyUsage, + List.of("1.3.6.1.5.5.7.3.8", "1.3.6.1.5.5.7.3.12") + ); + X509Certificate[] chain = new X509Certificate[] { certificate }; + + final String peerHost = "192.168.1." + randomIntBetween(1, 128); + final String cipherSuite = randomFrom(SslConfigurationLoader.DEFAULT_CIPHERS); + final String protocol = randomFrom(SslConfigurationLoader.DEFAULT_PROTOCOLS); + final SSLSession session = session(peerHost, cipherSuite, protocol); + final Map> trustIssuers = trust(certificate); + final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, + "xpack.monitoring.exporters.elastic-cloud.ssl", trustIssuers); + + assertThat(message, Matchers.equalTo("failed to establish trust with server at [" + peerHost + "];" + + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + + ", fingerprint [" + MOCK_FINGERPRINT_1 + "]," + + " keyUsage [keyCertSign, #9, #11] and extendedKeyUsage [timeStamping, 1.3.6.1.5.5.7.3.12];" + + " the session uses cipher suite [" + cipherSuite + "] and protocol [" + protocol + "];" + " the certificate does not have any DNS/IP subject alternative names;" + " the certificate is self-issued;" + " the [CN=foo,DC=example,DC=com] certificate is trusted in this ssl context ([xpack.monitoring.exporters.elastic-cloud.ssl])")); @@ -336,8 +492,10 @@ public void testDiagnosticMessageWhenACertificateHasAnInvalidEncoding() throws E final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.security.transport.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.6];" + - " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]" + - " and invalid encoding [java.security.cert.CertificateEncodingException: MOCK INVALID ENCODING];" + + " the server provided a certificate with subject name [CN=foo,DC=example,DC=com]," + + " invalid encoding [java.security.cert.CertificateEncodingException: MOCK INVALID ENCODING]," + + " keyUsage [digitalSignature, nonRepudiation] and extendedKeyUsage [serverAuth, codeSigning];" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate does not have any subject alternative names;" + " the certificate is self-issued;" + " the [CN=foo,DC=example,DC=com] certificate is not trusted in this ssl context ([xpack.security.transport.ssl])")); @@ -352,7 +510,8 @@ public void testDiagnosticMessageForClientCertificate() throws Exception { "xpack.security.transport.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with client at [192.168.1.7];" + " the client provided a certificate with subject name [CN=cert1]" + - " and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + ", fingerprint [3bebe388a66362784afd6c51a9000961a4e10050], no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate is issued by [CN=Test CA 1]" + " but the client did not provide a copy of the issuing certificate in the certificate chain;" + " the issuing certificate with fingerprint [2b7b0416391bdf86502505c23149022d2213dadc]" + @@ -375,7 +534,9 @@ public void testDiagnosticMessageWhenCaHasNewIssuingCertificate() throws Excepti final String message = SslDiagnostics.getTrustDiagnosticFailure(chain, SslDiagnostics.PeerType.SERVER, session, "xpack.security.authc.realms.saml.saml1.ssl", trustIssuers); assertThat(message, Matchers.equalTo("failed to establish trust with server at [192.168.1.4];" + - " the server provided a certificate with subject name [CN=cert1] and fingerprint [3bebe388a66362784afd6c51a9000961a4e10050];" + + " the server provided a certificate with subject name [CN=cert1], fingerprint [3bebe388a66362784afd6c51a9000961a4e10050]," + + " no keyUsage and no extendedKeyUsage;" + + " the session uses cipher suite [TLS_ECDHE_RSA_WITH_RC4_128_SHA] and protocol [SSLv3];" + " the certificate has subject alternative names [DNS:localhost,IP:127.0.0.1];" + " the certificate is issued by [CN=Test CA 1];" + " the certificate is signed by (subject [CN=Test CA 1]" + @@ -398,8 +559,21 @@ public X509Certificate cloneCertificateAsMock(X509Certificate clone) throws Cert } public X509Certificate mockCertificateWithIssuer(String principal, byte[] encoding, List> subjAltNames, - @Nullable X509Certificate issuer) throws CertificateException { + @Nullable X509Certificate issuer) throws CertificateException { + + final List extendedKeyUsage = List.of("1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.3"); + final boolean[] keyUsage = {true, true, false, false, false, false, false, false, false}; + + return mockCertificateWithIssuer(principal, encoding, subjAltNames, issuer, keyUsage, extendedKeyUsage); + } + private X509Certificate mockCertificateWithIssuer( + String principal, + byte[] encoding, + List> subjAltNames, + X509Certificate issuer, + boolean[] keyUsage, List extendedKeyUsage + ) throws CertificateParsingException, CertificateEncodingException { final X509Certificate cert = Mockito.mock(X509Certificate.class); final X500Principal x500Principal = new X500Principal(principal); final PublicKey key = Mockito.mock(PublicKey.class); @@ -410,7 +584,8 @@ public X509Certificate mockCertificateWithIssuer(String principal, byte[] encodi Mockito.when(cert.getIssuerX500Principal()).thenReturn(issuerPrincipal); Mockito.when(cert.getPublicKey()).thenReturn(key); Mockito.when(cert.getEncoded()).thenReturn(encoding); - + Mockito.when(cert.getExtendedKeyUsage()).thenReturn(extendedKeyUsage); + Mockito.when(cert.getKeyUsage()).thenReturn(keyUsage); return cert; } @@ -452,8 +627,14 @@ private Map> trust(Collection KeyStoreUtil.filter(ks, entry -> entry.getAlias().equals("cert1")) + ); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(p12)); + assertKeysLoaded(keyConfig, "cert1"); + } + public void testLoadMultipleKeyJksWithSeparateKeyPassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path jks = getDataPath("/certs/cert-all/certs.jks"); - final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, JKS_PASS, "jks", "key-pass".toCharArray(), - KeyManagerFactory.getDefaultAlgorithm()); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(jks)); + final String jks = "cert-all/certs.jks"; + final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, JKS_PASS, "jks", null, KEY_PASS, KEY_MGR_ALGORITHM, configBasePath); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(configBasePath.resolve(jks))); assertKeysLoaded(keyConfig, "cert1", "cert2"); } public void testKeyManagerFailsWithIncorrectStorePassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path jks = getDataPath("/certs/cert-all/certs.jks"); - final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, P12_PASS, "jks", "key-pass".toCharArray(), - KeyManagerFactory.getDefaultAlgorithm()); - assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(jks)); - assertPasswordIsIncorrect(keyConfig, jks); + final String jks = "cert-all/certs.jks"; + final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, P12_PASS, "jks", null, KEY_PASS, KEY_MGR_ALGORITHM, configBasePath); + final Path path = configBasePath.resolve(jks); + assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); + assertPasswordIsIncorrect(keyConfig, path); } public void testKeyManagerFailsWithIncorrectKeyPassword() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path jks = getDataPath("/certs/cert-all/certs.jks"); - final StoreKeyConfig keyConfig = new StoreKeyConfig(jks, JKS_PASS, "jks", JKS_PASS, KeyManagerFactory.getDefaultAlgorithm()); + final StoreKeyConfig keyConfig = config(jks, JKS_PASS, "jks"); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(jks)); assertPasswordIsIncorrect(keyConfig, jks); } @@ -85,7 +113,7 @@ public void testKeyManagerFailsWithIncorrectKeyPassword() throws Exception { public void testKeyManagerFailsWithMissingKeystoreFile() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path path = getDataPath("/certs/cert-all/certs.jks").getParent().resolve("dne.jks"); - final StoreKeyConfig keyConfig = new StoreKeyConfig(path, JKS_PASS, "jks", JKS_PASS, KeyManagerFactory.getDefaultAlgorithm()); + final StoreKeyConfig keyConfig = config(path, JKS_PASS, "jks"); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); assertFileNotFound(keyConfig, path); } @@ -104,7 +132,7 @@ public void testMissingKeyEntriesFailsWithMeaningfulMessage() throws Exception { ks = getDataPath("/certs/ca-all/ca.jks"); password = JKS_PASS; } - final StoreKeyConfig keyConfig = new StoreKeyConfig(ks, password, type, password, KeyManagerFactory.getDefaultAlgorithm()); + final StoreKeyConfig keyConfig = config(ks, password, type); assertThat(keyConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertNoPrivateKeyEntries(keyConfig, ks); } @@ -117,7 +145,7 @@ public void testKeyConfigReloadsFileContents() throws Exception { final Path p12 = createTempFile("cert", ".p12"); - final StoreKeyConfig keyConfig = new StoreKeyConfig(p12, P12_PASS, "PKCS12", P12_PASS, KeyManagerFactory.getDefaultAlgorithm()); + final StoreKeyConfig keyConfig = config(p12, P12_PASS, "PKCS12"); Files.copy(cert1, p12, StandardCopyOption.REPLACE_EXISTING); assertKeysLoaded(keyConfig, "cert1"); @@ -135,6 +163,15 @@ public void testKeyConfigReloadsFileContents() throws Exception { assertFileNotFound(keyConfig, p12); } + private StoreKeyConfig config(Path path, char[] password, String type) { + return config(path, password, type, null); + } + + private StoreKeyConfig config(Path path, char[] password, String type, Function filter) { + final String pathName = path == null ? null : path.toString(); + return new StoreKeyConfig(pathName, password, type, filter, password, KeyManagerFactory.getDefaultAlgorithm(), configBasePath); + } + private void assertKeysLoaded(StoreKeyConfig keyConfig, String... names) throws CertificateParsingException { final X509ExtendedKeyManager keyManager = keyConfig.createKeyManager(); assertThat(keyManager, notNullValue()); @@ -156,6 +193,17 @@ private void assertKeysLoaded(StoreKeyConfig keyConfig, String... names) throws Arrays.asList(IP_NAME, "127.0.0.1") )); } + + final List> keys = keyConfig.getKeys(true); + assertThat(keys, iterableWithSize(names.length)); + for (Tuple tup : keys) { + PrivateKey privateKey = tup.v1(); + assertThat(privateKey, notNullValue()); + assertThat(privateKey.getAlgorithm(), is("RSA")); + + final X509Certificate certificate = tup.v2(); + assertThat(certificate.getIssuerDN().getName(), is("CN=Test CA 1")); + } } private void assertKeysNotLoaded(StoreKeyConfig keyConfig, String... names) throws CertificateParsingException { @@ -192,7 +240,7 @@ private void assertFileNotFound(StoreKeyConfig keyConfig, Path file) { assertThat(exception.getMessage(), containsString("keystore")); assertThat(exception.getMessage(), containsString(file.toAbsolutePath().toString())); assertThat(exception.getMessage(), containsString("does not exist")); - assertThat(exception.getCause(), nullValue()); + assertThat(exception.getCause(), instanceOf(NoSuchFileException.class)); } private void assertNoPrivateKeyEntries(StoreKeyConfig keyConfig, Path file) { diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/StoreTrustConfigTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/StoreTrustConfigTests.java index bbe55688f35d5..91ab353758c9a 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/StoreTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/StoreTrustConfigTests.java @@ -10,11 +10,13 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import org.junit.Before; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; @@ -25,7 +27,7 @@ import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.instanceOf; public class StoreTrustConfigTests extends ESTestCase { @@ -33,19 +35,26 @@ public class StoreTrustConfigTests extends ESTestCase { private static final char[] JKS_PASS = "jks-pass".toCharArray(); private static final String DEFAULT_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); + private Path configBasePath; + + @Before + public void setupPath() { + configBasePath = getDataPath("/certs"); + } + public void testBuildTrustConfigFromPKCS12() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks = getDataPath("/certs/ca1/ca.p12"); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + final String ks = "ca1/ca.p12"; + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM, true, configBasePath); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(ks))); assertCertificateChain(trustConfig, "CN=Test CA 1"); } public void testBuildTrustConfigFromJKS() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks = getDataPath("/certs/ca-all/ca.jks"); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); + final String ks = "ca-all/ca.jks"; + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, JKS_PASS, "jks", DEFAULT_ALGORITHM, true, configBasePath); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(resolve(ks))); assertCertificateChain(trustConfig, "CN=Test CA 1", "CN=Test CA 2", "CN=Test CA 3"); } @@ -53,44 +62,50 @@ public void testBadKeyStoreFormatFails() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); final Path ks = createTempFile("ca", ".p12"); Files.write(ks, randomByteArrayOfLength(128), StandardOpenOption.APPEND); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], randomFrom("PKCS12", "jks"), DEFAULT_ALGORITHM); + final String type = randomFrom("PKCS12", "jks"); + final String fileName = ks.toString(); + final StoreTrustConfig trustConfig = new StoreTrustConfig(fileName, new char[0], type, DEFAULT_ALGORITHM, true, configBasePath); assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); assertInvalidFileFormat(trustConfig, ks); } public void testMissingKeyStoreFailsWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks = getDataPath("/certs/ca-all/ca.p12").getParent().resolve("keystore.dne"); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], randomFrom("PKCS12", "jks"), DEFAULT_ALGORITHM); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); - assertFileNotFound(trustConfig, ks); + final String ks = "ca-all/keystore.dne"; + final String type = randomFrom("PKCS12", "jks"); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], type, DEFAULT_ALGORITHM, true, configBasePath); + final Path path = resolve(ks); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); + assertFileNotFound(trustConfig, path); } public void testIncorrectPasswordFailsWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks = getDataPath("/certs/ca1/ca.p12"); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], "PKCS12", DEFAULT_ALGORITHM); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); - assertPasswordIsIncorrect(trustConfig, ks); + final String ks = "ca1/ca.p12"; + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, new char[0], "PKCS12", DEFAULT_ALGORITHM, true, configBasePath); + final Path path = resolve(ks); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); + assertPasswordIsIncorrect(trustConfig, path); } public void testMissingTrustEntriesFailsWithMeaningfulMessage() throws Exception { assumeFalse("Can't use JKS/PKCS12 keystores in a FIPS JVM", inFipsJvm()); - final Path ks; + final String ks; final char[] password; final String type; if (randomBoolean()) { type = "PKCS12"; - ks = getDataPath("/certs/cert-all/certs.p12"); + ks = "cert-all/certs.p12"; password = P12_PASS; } else { type = "jks"; - ks = getDataPath("/certs/cert-all/certs.jks"); + ks = "cert-all/certs.jks"; password = JKS_PASS; } - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, password, type, DEFAULT_ALGORITHM); - assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(ks)); - assertNoCertificateEntries(trustConfig, ks); + final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, password, type, DEFAULT_ALGORITHM, true, configBasePath); + final Path path = resolve(ks); + assertThat(trustConfig.getDependentFiles(), Matchers.containsInAnyOrder(path)); + assertNoCertificateEntries(trustConfig, path); } public void testTrustConfigReloadsKeysStoreContents() throws Exception { @@ -100,7 +115,8 @@ public void testTrustConfigReloadsKeysStoreContents() throws Exception { final Path ks = createTempFile("ca", "p12"); - final StoreTrustConfig trustConfig = new StoreTrustConfig(ks, P12_PASS, "PKCS12", DEFAULT_ALGORITHM); + final String fileName = ks.toString(); + final StoreTrustConfig trustConfig = new StoreTrustConfig(fileName, P12_PASS, "PKCS12", DEFAULT_ALGORITHM, true, configBasePath); Files.copy(ks1, ks, StandardCopyOption.REPLACE_EXISTING); assertCertificateChain(trustConfig, "CN=Test CA 1"); @@ -115,6 +131,10 @@ public void testTrustConfigReloadsKeysStoreContents() throws Exception { assertCertificateChain(trustConfig, "CN=Test CA 1", "CN=Test CA 2", "CN=Test CA 3"); } + private Path resolve(String name) { + return configBasePath.resolve(name); + } + private void assertCertificateChain(StoreTrustConfig trustConfig, String... caNames) { final X509ExtendedTrustManager trustManager = trustConfig.createTrustManager(); final X509Certificate[] issuers = trustManager.getAcceptedIssuers(); @@ -128,18 +148,18 @@ private void assertCertificateChain(StoreTrustConfig trustConfig, String... caNa private void assertInvalidFileFormat(StoreTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); - assertThat(exception.getMessage(), Matchers.containsString("cannot read")); - assertThat(exception.getMessage(), Matchers.containsString("keystore")); - assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); + assertThat(exception.getMessage(), containsString("cannot read")); + assertThat(exception.getMessage(), containsString("keystore")); + assertThat(exception.getMessage(), containsString(file.toAbsolutePath().toString())); assertThat(exception.getCause(), Matchers.instanceOf(IOException.class)); } private void assertFileNotFound(StoreTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); - assertThat(exception.getMessage(), Matchers.containsString("file does not exist")); - assertThat(exception.getMessage(), Matchers.containsString("keystore")); - assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); - assertThat(exception.getCause(), nullValue()); + assertThat(exception.getMessage(), containsString("file does not exist")); + assertThat(exception.getMessage(), containsString("keystore")); + assertThat(exception.getMessage(), containsString(file.toAbsolutePath().toString())); + assertThat(exception.getCause(), instanceOf(NoSuchFileException.class)); } private void assertPasswordIsIncorrect(StoreTrustConfig trustConfig, Path key) { @@ -151,9 +171,9 @@ private void assertPasswordIsIncorrect(StoreTrustConfig trustConfig, Path key) { private void assertNoCertificateEntries(StoreTrustConfig trustConfig, Path file) { final SslConfigException exception = expectThrows(SslConfigException.class, trustConfig::createTrustManager); - assertThat(exception.getMessage(), Matchers.containsString("does not contain any trusted certificate entries")); - assertThat(exception.getMessage(), Matchers.containsString("truststore")); - assertThat(exception.getMessage(), Matchers.containsString(file.toAbsolutePath().toString())); + assertThat(exception.getMessage(), containsString("does not contain any trusted certificate entries")); + assertThat(exception.getMessage(), containsString("truststore")); + assertThat(exception.getMessage(), containsString(file.toAbsolutePath().toString())); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/FilterXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/FilterXContentParser.java index fb9932fe3fbda..f52e7da9e14ce 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/FilterXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/FilterXContentParser.java @@ -33,6 +33,11 @@ public XContentType contentType() { return in.contentType(); } + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + in.allowDuplicateKeys(allowDuplicateKeys); + } + @Override public Token nextToken() throws IOException { return in.nextToken(); diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java index 5f5f59a21db24..6e6e40223761a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java @@ -12,12 +12,11 @@ import org.elasticsearch.core.RestApiVersion; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -43,92 +42,82 @@ public static class Entry { /** A name for the entry which is unique within the {@link #categoryClass}. */ public final ParseField name; + public final Function restApiCompatibility; + /** A parser capability of parser the entry's class. */ private final ContextParser parser; - /** Creates a new entry which can be stored by the registry. */ + /** + * Creates a new entry which can be stored by the registry. + */ public Entry(Class categoryClass, ParseField name, CheckedFunction parser) { - this.categoryClass = Objects.requireNonNull(categoryClass); - this.name = Objects.requireNonNull(name); - this.parser = Objects.requireNonNull((p, c) -> parser.apply(p)); + this(categoryClass, name, (p, c) -> parser.apply(p), name.getForRestApiVersion()); + } + + public Entry(Class categoryClass, ParseField name, CheckedFunction parser, + Function restApiCompatibility) { + this(categoryClass, name, (p, c) -> parser.apply(p), restApiCompatibility); } /** * Creates a new entry which can be stored by the registry. * Prefer {@link Entry#Entry(Class, ParseField, CheckedFunction)} unless you need a context to carry around while parsing. */ public Entry(Class categoryClass, ParseField name, ContextParser parser) { + this(categoryClass, name, parser, name.getForRestApiVersion()); + } + + public Entry(Class categoryClass, ParseField name, ContextParser parser, + Function restApiCompatibility) { this.categoryClass = Objects.requireNonNull(categoryClass); this.name = Objects.requireNonNull(name); this.parser = Objects.requireNonNull(parser); + this.restApiCompatibility = restApiCompatibility; } } - private final Map, Map> registry; - private final Map, Map> compatibleRegistry; + private final Map, Map>> registry; - public NamedXContentRegistry(List entries){ - this(entries, Collections.emptyList()); - } - public NamedXContentRegistry(List entries, List compatibleEntries) { - this.registry = unmodifiableMap(getRegistry(entries)); - this.compatibleRegistry = unmodifiableMap(getCompatibleRegistry(compatibleEntries)); + public NamedXContentRegistry(List entries) { + this.registry = unmodifiableMap(createRegistry(entries)); } - private Map, Map> getCompatibleRegistry(List compatibleEntries) { - Map, Map> compatibleRegistry = new HashMap<>(registry); - List unseenEntries = new ArrayList<>(); - compatibleEntries.forEach(entry -> { - Map parsers = compatibleRegistry.get(entry.categoryClass); - if (parsers == null) { - unseenEntries.add(entry); - } else { - Map parsersCopy = new HashMap<>(parsers); - for (String name : entry.name.getAllNamesIncludedDeprecated()) { - parsersCopy.put(name, entry); //override the parser for the given name - } - compatibleRegistry.put(entry.categoryClass, parsersCopy); - } - } - ); - compatibleRegistry.putAll(getRegistry(unseenEntries)); - return compatibleRegistry; - } - private Map, Map> getRegistry(List entries){ + private Map, Map>> createRegistry(List entries){ if (entries.isEmpty()) { return emptyMap(); } - entries = new ArrayList<>(entries); - entries.sort((e1, e2) -> e1.categoryClass.getName().compareTo(e2.categoryClass.getName())); - Map, Map> registry = new HashMap<>(); - Map parsers = null; - Class currentCategory = null; + Map, Map>> registry = new HashMap<>(); for (Entry entry : entries) { - if (currentCategory != entry.categoryClass) { - if (currentCategory != null) { - // we've seen the last of this category, put it into the big map - registry.put(currentCategory, unmodifiableMap(parsers)); - } - parsers = new HashMap<>(); - currentCategory = entry.categoryClass; - } - for (String name : entry.name.getAllNamesIncludedDeprecated()) { - Object old = parsers.put(name, entry); - if (old != null) { - throw new IllegalArgumentException("NamedXContent [" + currentCategory.getName() + "][" + entry.name + "]" + - " is already registered for [" + old.getClass().getName() + "]," + - " cannot register [" + entry.parser.getClass().getName() + "]"); + if (RestApiVersion.minimumSupported().matches(entry.restApiCompatibility)) { + registerParsers(registry, entry, name, RestApiVersion.minimumSupported()); + } + if (RestApiVersion.current().matches(entry.restApiCompatibility)) { + registerParsers(registry, entry, name, RestApiVersion.current()); } } } - // handle the last category - registry.put(currentCategory, unmodifiableMap(parsers)); return registry; } + private void registerParsers(Map, Map>> registry, + Entry entry, + String name, + RestApiVersion restApiVersion) { + final Map, Map> classRegistry = + registry.computeIfAbsent(restApiVersion, (v) -> new HashMap<>()); + final Map parsers = + classRegistry.computeIfAbsent(entry.categoryClass, (v) -> new HashMap<>()); + Object old = parsers.put(name, entry); + if (old != null) { + throw new IllegalArgumentException("NamedXContent [" + entry.categoryClass.getName() + "][" + entry.name + "]" + + " is already registered for [" + old.getClass().getName() + "]," + + " cannot register [" + entry.parser.getClass().getName() + "]"); + } + } + /** * Parse a named object, throwing an exception if the parser isn't found. Throws an {@link NamedObjectNotFoundException} if the * {@code categoryClass} isn't registered because this is almost always a bug. Throws an {@link NamedObjectNotFoundException} if the @@ -137,9 +126,14 @@ private Map, Map> getRegistry(List entries){ * @throws NamedObjectNotFoundException if the categoryClass or name is not registered */ public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { + Entry entry = lookupParser(categoryClass, name, parser); + return categoryClass.cast(entry.parser.parse(parser, context)); + } - Map parsers = parser.getRestApiVersion() == RestApiVersion.minimumSupported() ? - compatibleRegistry.get(categoryClass) : registry.get(categoryClass); + //scope for testing + public Entry lookupParser(Class categoryClass, String name, XContentParser parser) { + Map parsers = registry.getOrDefault(parser.getRestApiVersion(), emptyMap()) + .get(categoryClass); if (parsers == null) { if (registry.isEmpty()) { // The "empty" registry will never work so we throw a better exception as a hint. @@ -157,7 +151,7 @@ public T parseNamedObject(Class categoryClass, String name, XContentPa throw new XContentParseException(parser.getTokenLocation(), "unable to parse " + categoryClass.getSimpleName() + " with name [" + name + "]: parser didn't match"); } - return categoryClass.cast(entry.parser.parse(parser, context)); + return entry; } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 6dca3b7c94cb8..7094c77a771a0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -112,6 +112,8 @@ enum NumberType { XContentType contentType(); + void allowDuplicateKeys(boolean allowDuplicateKeys); + Token nextToken() throws IOException; void skipChildren() throws IOException; diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index 76759a57a6040..4927717ab8dba 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -43,6 +43,11 @@ public XContentType contentType() { return parser.contentType(); } + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + parser.allowDuplicateKeys(allowDuplicateKeys); + } + @Override public Token nextToken() throws IOException { if (level > 0) { diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index 0fd9e85fa48e3..baac5d6dc2bfe 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -82,13 +83,13 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data) throws IOException { - return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(data)); + return createParser(xContentRegistry, deprecationHandler, data, 0, data.length); } @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException { - return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(data, offset, length)); + return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } @Override @@ -109,7 +110,12 @@ public XContentParser createParserForCompatibility(NamedXContentRegistry xConten public XContentParser createParserForCompatibility(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length, RestApiVersion restApiVersion) throws IOException { - return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(data, offset, length), restApiVersion); + return new CborXContentParser( + xContentRegistry, + deprecationHandler, + cborFactory.createParser(new ByteArrayInputStream(data, offset, length)), + restApiVersion + ); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java index fed34b83a4028..d6de473a35aa4 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java @@ -32,4 +32,9 @@ public CborXContentParser(NamedXContentRegistry xContentRegistry, public XContentType contentType() { return XContentType.CBOR; } + + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + throw new UnsupportedOperationException("Allowing duplicate keys after the parser has been created is not possible for CBOR"); + } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java index f7f4c098c772c..7e145b2b49a32 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -83,13 +84,13 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data) throws IOException { - return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(data)); + return createParser(xContentRegistry, deprecationHandler, data, 0, data.length); } @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException { - return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(data, offset, length)); + return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } @Override @@ -109,7 +110,12 @@ public XContentParser createParserForCompatibility(NamedXContentRegistry xConten public XContentParser createParserForCompatibility(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length, RestApiVersion restApiVersion) throws IOException { - return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(data, offset, length), restApiVersion); + return new JsonXContentParser( + xContentRegistry, + deprecationHandler, + jsonFactory.createParser(new ByteArrayInputStream(data, offset, length)), + restApiVersion + ); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index 1cc0e9cbd6b13..9b73847b4db0c 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -44,6 +44,11 @@ public XContentType contentType() { return XContentType.JSON; } + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + parser.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, allowDuplicateKeys == false); + } + @Override public Token nextToken() throws IOException { return convertToken(parser.nextToken()); diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index 0faa82f002b20..5d826dea77e03 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -84,13 +85,13 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data) throws IOException { - return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(data)); + return createParser(xContentRegistry, deprecationHandler, data, 0, data.length); } @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException { - return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(data, offset, length)); + return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } @Override @@ -110,7 +111,11 @@ public XContentParser createParserForCompatibility(NamedXContentRegistry xConten public XContentParser createParserForCompatibility(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length, RestApiVersion restApiVersion) throws IOException { - return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(data, offset, length), - restApiVersion); + return new SmileXContentParser( + xContentRegistry, + deprecationHandler, + smileFactory.createParser(new ByteArrayInputStream(data, offset, length)), + restApiVersion + ); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java index f78064a7d5309..c743849cbca46 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java @@ -32,4 +32,9 @@ public SmileXContentParser(NamedXContentRegistry xContentRegistry, public XContentType contentType() { return XContentType.SMILE; } + + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + throw new UnsupportedOperationException("Allowing duplicate keys after the parser has been created is not possible for Smile"); + } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java index b87fe9c5c9b86..fa967d8663e85 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java @@ -90,6 +90,11 @@ public XContentType contentType() { return xContentType; } + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + throw new UnsupportedOperationException("Allowing duplicate keys is not possible for maps"); + } + @Override public Token nextToken() throws IOException { if (iterator == null) { @@ -158,7 +163,7 @@ public Object objectBytes() throws IOException { @Override public boolean hasTextCharacters() { - throw new UnsupportedOperationException("use text() instead"); + return false; } @Override diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java index 92fdbfcd43967..4c8e7f0f73345 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -77,13 +78,13 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data) throws IOException { - return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(data)); + return createParser(xContentRegistry, deprecationHandler, data, 0, data.length); } @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException { - return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(data, offset, length)); + return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } @Override @@ -103,8 +104,12 @@ public XContentParser createParserForCompatibility(NamedXContentRegistry xConten public XContentParser createParserForCompatibility(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length, RestApiVersion restApiVersion) throws IOException { - return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(data, offset, length), - restApiVersion); + return new YamlXContentParser( + xContentRegistry, + deprecationHandler, + yamlFactory.createParser(new ByteArrayInputStream(data, offset, length)), + restApiVersion + ); } diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java index 49071b968f29a..afae071adc83e 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.test.ESTestCase; @@ -57,19 +58,51 @@ public void testSimpleMap() throws IOException { }); } - public void testRandomObject() throws IOException { compareTokens(builder -> generateRandomObject(builder, randomIntBetween(0, 10))); } - public void compareTokens(CheckedConsumer consumer) throws IOException { + /** + * Assert that {@link XContentParser#hasTextCharacters()} returns false because + * we don't support {@link XContentParser#textCharacters()}. + */ + public void testHasTextCharacters() throws IOException { + assertFalse( + new MapXContentParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + Map.of("a", "b"), + randomFrom(XContentType.values()) + ).hasTextCharacters() + ); + } + + public void testCopyCurrentStructure() throws IOException { + try ( + XContentParser parser = new MapXContentParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + Map.of("a", "b"), + randomFrom(XContentType.values()) + ) + ) { + try ( + XContentBuilder builder = JsonXContent.contentBuilder().copyCurrentStructure(parser); + XContentParser copied = createParser(builder) + ) { + assertEquals(copied.map(), Map.of("a", "b")); + } + } + } + + private void compareTokens(CheckedConsumer consumer) throws IOException { for (XContentType xContentType : EnumSet.allOf(XContentType.class)) { logger.info("--> testing with xcontent type: {}", xContentType); compareTokens(consumer, xContentType); } } - public void compareTokens(CheckedConsumer consumer, XContentType xContentType) throws IOException { + private void compareTokens(CheckedConsumer consumer, XContentType xContentType) throws IOException { try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { consumer.accept(builder); final Map map; diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 4b4cdfd4d75a4..a174356eaa533 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -25,11 +25,16 @@ dependencies { compileOnly project(':modules:lang-painless') } +def v7compatibilityNotSupportedTests = { + return [ + //marked as not needing compatible api + 'indices.analyze/10_analyze/htmlStrip_deprecated', // Cleanup versioned deprecations in analysis #41560 + 'analysis-common/40_token_filters/delimited_payload_filter_error', //Remove preconfigured delimited_payload_filter #43686 + 'analysis-common/20_analyzers/standard_html_strip', // Cleanup versioned deprecations in analysis #41560 + 'search.query/50_queries_with_synonyms/Test common terms query with stacked tokens', // #42654 - `common` query throws an exception + ] +} + tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'search.query/50_queries_with_synonyms/Test common terms query with stacked tokens', - 'indices.analyze/10_analyze/htmlStrip_deprecated', - 'analysis-common/40_token_filters/delimited_payload_filter_error', - 'analysis-common/20_analyzers/standard_html_strip' - ].join(',') + systemProperty 'tests.rest.blacklist', v7compatibilityNotSupportedTests().join(',') } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 97b6f29a91324..6f9f8a7f480cc 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -44,10 +44,6 @@ tasks.named("thirdPartyAudit").configure { ) } -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'ingest/80_foreach/Test foreach Processor', - 'ingest/230_change_target_index/Test Change Target Index with Explicit Pipeline', - 'ingest/230_change_target_index/Test Change Target Index with Default Pipeline' - ].join(',') -} +tasks.named("transformV7RestTests").configure({ task -> + task.addAllowedWarningRegex("\\[types removal\\].*") +}) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index fcadde8b47f3d..557cf214cc453 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -13,6 +13,7 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.ValueSource; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; @@ -71,10 +72,15 @@ public AppendProcessor create(Map registry, String pr String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value"); boolean allowDuplicates = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "allow_duplicates", true); - TemplateScript.Factory compiledTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, - "field", field, scriptService); - return new AppendProcessor(processorTag, description, compiledTemplate, ValueSource.wrap(value, scriptService), - allowDuplicates); + TemplateScript.Factory compiledTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", field, scriptService); + String mediaType = ConfigurationUtils.readMediaTypeProperty(TYPE, processorTag, config, "media_type", "application/json"); + return new AppendProcessor( + processorTag, + description, + compiledTemplate, + ValueSource.wrap(value, scriptService, Map.of(Script.CONTENT_TYPE_OPTION, mediaType)), + allowDuplicates + ); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java index 562017d38694d..76fdfbf03747e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java @@ -13,6 +13,7 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; +import java.util.ArrayList; import java.util.Map; public final class DotExpanderProcessor extends AbstractProcessor { @@ -21,11 +22,17 @@ public final class DotExpanderProcessor extends AbstractProcessor { private final String path; private final String field; + private final boolean override; DotExpanderProcessor(String tag, String description, String path, String field) { + this(tag, description, path, field, false); + } + + DotExpanderProcessor(String tag, String description, String path, String field, boolean override) { super(tag, description); this.path = path; this.field = field; + this.override = override; } @Override @@ -41,10 +48,29 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { map = ingestDocument.getSourceAndMetadata(); } + if (this.field.equals("*")) { + for (String key : new ArrayList<>(map.keySet())) { + if (key.indexOf('.') > 0) { + path = this.path != null ? this.path + "." + key : key; + expandDot(ingestDocument, path, key, map); + } + } + } else { + expandDot(ingestDocument, path, field, map); + } + + return ingestDocument; + } + + private void expandDot(IngestDocument ingestDocument, String path, String field, Map map) { if (map.containsKey(field)) { if (ingestDocument.hasField(path)) { Object value = map.remove(field); - ingestDocument.appendFieldValue(path, value); + if (override) { + ingestDocument.setFieldValue(path, value); + } else { + ingestDocument.appendFieldValue(path, value); + } } else { // check whether we actually can expand the field in question into an object field. // part of the path may already exist and if part of it would be a value field (string, integer etc.) @@ -66,7 +92,6 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { ingestDocument.setFieldValue(path, value); } } - return ingestDocument; } @Override @@ -88,9 +113,9 @@ public static final class Factory implements Processor.Factory { public Processor create(Map processorFactories, String tag, String description, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); - if (field.contains(".") == false) { + if (field.contains(".") == false && field.equals("*") == false) { throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", - "field does not contain a dot"); + "field does not contain a dot and is not a wildcard"); } if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) { throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", @@ -106,7 +131,8 @@ public Processor create(Map processorFactories, Strin } String path = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "path"); - return new DotExpanderProcessor(tag, null, path, field); + boolean override = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, "override", false); + return new DotExpanderProcessor(tag, null, path, field, override); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 2786b77ad5868..9bb5a0c3fa430 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -70,7 +70,7 @@ public Map getProcessors(Processor.Parameters paramet entry(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)), entry(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()), entry(JsonProcessor.TYPE, new JsonProcessor.Factory()), - entry(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()), + entry(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory(parameters.scriptService)), entry(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()), entry(BytesProcessor.TYPE, new BytesProcessor.Factory()), entry(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)), diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index bd84e10fa8fca..68c1db0023fa7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; @@ -36,12 +37,17 @@ public final class JsonProcessor extends AbstractProcessor { private final String field; private final String targetField; private final boolean addToRoot; + private final ConflictStrategy addToRootConflictStrategy; + private final boolean allowDuplicateKeys; - JsonProcessor(String tag, String description, String field, String targetField, boolean addToRoot) { + JsonProcessor(String tag, String description, String field, String targetField, boolean addToRoot, + ConflictStrategy addToRootConflictStrategy, boolean allowDuplicateKeys) { super(tag, description); this.field = field; this.targetField = targetField; this.addToRoot = addToRoot; + this.addToRootConflictStrategy = addToRootConflictStrategy; + this.allowDuplicateKeys = allowDuplicateKeys; } public String getField() { @@ -56,11 +62,16 @@ boolean isAddToRoot() { return addToRoot; } - public static Object apply(Object fieldValue) { + public ConflictStrategy getAddToRootConflictStrategy() { + return addToRootConflictStrategy; + } + + public static Object apply(Object fieldValue, boolean allowDuplicateKeys) { BytesReference bytesRef = fieldValue == null ? new BytesArray("null") : new BytesArray(fieldValue.toString()); try (InputStream stream = bytesRef.streamInput(); XContentParser parser = JsonXContent.jsonXContent .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { + parser.allowDuplicateKeys(allowDuplicateKeys); XContentParser.Token token = parser.nextToken(); Object value = null; if (token == XContentParser.Token.VALUE_NULL) { @@ -84,23 +95,47 @@ public static Object apply(Object fieldValue) { } } - public static void apply(Map ctx, String fieldName) { - Object value = apply(ctx.get(fieldName)); + public static void apply(Map ctx, String fieldName, boolean allowDuplicateKeys, ConflictStrategy conflictStrategy) { + Object value = apply(ctx.get(fieldName), allowDuplicateKeys); if (value instanceof Map) { @SuppressWarnings("unchecked") - Map map = (Map) value; + Map map = (Map) value; + if (conflictStrategy == ConflictStrategy.MERGE) { + recursiveMerge(ctx, map); + } else { ctx.putAll(map); + } } else { throw new IllegalArgumentException("cannot add non-map fields to root of document"); } } + public static void recursiveMerge(Map target, Map from) { + for (String key : from.keySet()) { + if (target.containsKey(key)) { + Object targetValue = target.get(key); + Object fromValue = from.get(key); + if (targetValue instanceof Map && fromValue instanceof Map) { + @SuppressWarnings("unchecked") + Map targetMap = (Map) targetValue; + @SuppressWarnings("unchecked") + Map fromMap = (Map) fromValue; + recursiveMerge(targetMap, fromMap); + } else { + target.put(key, fromValue); + } + } else { + target.put(key, from.get(key)); + } + } + } + @Override public IngestDocument execute(IngestDocument document) throws Exception { if (addToRoot) { - apply(document.getSourceAndMetadata(), field); + apply(document.getSourceAndMetadata(), field, allowDuplicateKeys, addToRootConflictStrategy); } else { - document.setFieldValue(targetField, apply(document.getFieldValue(field, Object.class))); + document.setFieldValue(targetField, apply(document.getFieldValue(field, Object.class), allowDuplicateKeys)); } return document; } @@ -110,24 +145,58 @@ public String getType() { return TYPE; } + public enum ConflictStrategy { + REPLACE, + MERGE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static ConflictStrategy fromString(String conflictStrategy) { + return ConflictStrategy.valueOf(conflictStrategy.toUpperCase(Locale.ROOT)); + } + } + public static final class Factory implements Processor.Factory { + @Override public JsonProcessor create(Map registry, String processorTag, String description, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); boolean addToRoot = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "add_to_root", false); + boolean allowDuplicateKeys = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "allow_duplicate_keys", false); + String conflictStrategyString = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, + "add_to_root_conflict_strategy"); + boolean hasConflictStrategy = conflictStrategyString != null; + if (conflictStrategyString == null) { + conflictStrategyString = ConflictStrategy.REPLACE.name(); + } + ConflictStrategy addToRootConflictStrategy; + try { + addToRootConflictStrategy = ConflictStrategy.fromString(conflictStrategyString); + } catch (IllegalArgumentException e) { + throw newConfigurationException(TYPE, processorTag, "add_to_root_conflict_strategy", "conflict strategy [" + + conflictStrategyString + "] not supported, cannot convert field."); + } if (addToRoot && targetField != null) { throw newConfigurationException(TYPE, processorTag, "target_field", "Cannot set a target field while also setting `add_to_root` to true"); } + if (addToRoot == false && hasConflictStrategy) { + throw newConfigurationException(TYPE, processorTag, "add_to_root_conflict_strategy", + "Cannot set `add_to_root_conflict_strategy` if `add_to_root` is false"); + } if (targetField == null) { targetField = field; } - return new JsonProcessor(processorTag, description, field, targetField, addToRoot); + return new JsonProcessor(processorTag, description, field, targetField, addToRoot, addToRootConflictStrategy, + allowDuplicateKeys); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java index a20686a65c14a..d46ed71f68186 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/KeyValueProcessor.java @@ -13,6 +13,8 @@ import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.TemplateScript; import java.util.Collections; import java.util.List; @@ -32,17 +34,17 @@ public final class KeyValueProcessor extends AbstractProcessor { private static final Pattern STRIP_BRACKETS = Pattern.compile("(^[\\(\\[<\"'])|([\\]\\)>\"']$)"); - private final String field; + private final TemplateScript.Factory field; private final String fieldSplit; private final String valueSplit; private final Set includeKeys; private final Set excludeKeys; - private final String targetField; + private final TemplateScript.Factory targetField; private final boolean ignoreMissing; private final Consumer execution; - KeyValueProcessor(String tag, String description, String field, String fieldSplit, String valueSplit, Set includeKeys, - Set excludeKeys, String targetField, boolean ignoreMissing, + KeyValueProcessor(String tag, String description, TemplateScript.Factory field, String fieldSplit, String valueSplit, + Set includeKeys, Set excludeKeys, TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, boolean stripBrackets, String prefix) { super(tag, description); this.field = field; @@ -58,9 +60,9 @@ public final class KeyValueProcessor extends AbstractProcessor { ); } - private static Consumer buildExecution(String fieldSplit, String valueSplit, String field, + private static Consumer buildExecution(String fieldSplit, String valueSplit, TemplateScript.Factory field, Set includeKeys, Set excludeKeys, - String targetField, boolean ignoreMissing, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, boolean stripBrackets, String prefix) { final Predicate keyFilter; @@ -77,19 +79,7 @@ private static Consumer buildExecution(String fieldSplit, String keyFilter = key -> includeKeys.contains(key) && excludeKeys.contains(key) == false; } } - final String fieldPathPrefix; - String keyPrefix = prefix == null ? "" : prefix; - if (targetField == null) { - fieldPathPrefix = keyPrefix; - } else { - fieldPathPrefix = targetField + "." + keyPrefix; - } - final Function keyPrefixer; - if (fieldPathPrefix.isEmpty()) { - keyPrefixer = val -> val; - } else { - keyPrefixer = val -> fieldPathPrefix + val; - } + final Function fieldSplitter = buildSplitter(fieldSplit, true); Function valueSplitter = buildSplitter(valueSplit, false); final Function keyTrimmer = buildTrimmer(trimKey); @@ -101,17 +91,43 @@ private static Consumer buildExecution(String fieldSplit, String } final Function valueTrimmer = buildTrimmer(trimValue); return document -> { - String value = document.getFieldValue(field, String.class, ignoreMissing); + String target = ""; + if (targetField != null) { + target = document.renderTemplate(targetField); + } + + final String fieldPathPrefix; + String keyPrefix = prefix == null ? "" : prefix; + if (target.isEmpty()) { + fieldPathPrefix = keyPrefix; + } else { + fieldPathPrefix = target + "." + keyPrefix; + } + final Function keyPrefixer; + if (fieldPathPrefix.isEmpty()) { + keyPrefixer = val -> val; + } else { + keyPrefixer = val -> fieldPathPrefix + val; + } + String path = document.renderTemplate(field); + if (path.isEmpty() || document.hasField(path, true) == false) { + if (ignoreMissing) { + return; + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } + } + String value = document.getFieldValue(path, String.class, ignoreMissing); if (value == null) { if (ignoreMissing) { return; } - throw new IllegalArgumentException("field [" + field + "] is null, cannot extract key-value pairs."); + throw new IllegalArgumentException("field [" + path + "] is null, cannot extract key-value pairs."); } for (String part : fieldSplitter.apply(value)) { String[] kv = valueSplitter.apply(part); if (kv.length != 2) { - throw new IllegalArgumentException("field [" + field + "] does not contain value_split [" + valueSplit + "]"); + throw new IllegalArgumentException("field [" + path + "] does not contain value_split [" + valueSplit + "]"); } String key = keyTrimmer.apply(kv[0]); if (keyFilter.test(key)) { @@ -140,7 +156,7 @@ private static Function buildSplitter(String split, boolean fi } } - String getField() { + TemplateScript.Factory getField() { return field; } @@ -160,7 +176,7 @@ Set getExcludeKeys() { return excludeKeys; } - String getTargetField() { + TemplateScript.Factory getTargetField() { return targetField; } @@ -188,11 +204,25 @@ public String getType() { } public static class Factory implements Processor.Factory { + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public KeyValueProcessor create(Map registry, String processorTag, String description, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + TemplateScript.Factory fieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, + "field", field, scriptService); String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = null; + if (targetField != null) { + targetFieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, + "target_field", targetField, scriptService); + } + String fieldSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field_split"); String valueSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "value_split"); String trimKey = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "trim_key"); @@ -212,8 +242,8 @@ public KeyValueProcessor create(Map registry, String } boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); return new KeyValueProcessor( - processorTag, description, field, fieldSplit, valueSplit, includeKeys, excludeKeys, targetField, ignoreMissing, - trimKey, trimValue, stripBrackets, prefix + processorTag, description, fieldTemplate, fieldSplit, valueSplit, includeKeys, excludeKeys, targetFieldTemplate, + ignoreMissing, trimKey, trimValue, stripBrackets, prefix ); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java index c1d56b6c333df..80f2aafd310f4 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java @@ -59,7 +59,7 @@ public static String uppercase(String value) { * @return structured JSON object */ public static Object json(Object fieldValue) { - return JsonProcessor.apply(fieldValue); + return JsonProcessor.apply(fieldValue, false); } /** @@ -72,7 +72,7 @@ public static Object json(Object fieldValue) { * contains the JSON string */ public static void json(Map map, String field) { - JsonProcessor.apply(map, field); + JsonProcessor.apply(map, field, false, JsonProcessor.ConflictStrategy.REPLACE); } /** diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java index 3527f0113b1a8..76eec9778fa79 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RenameProcessor.java @@ -51,7 +51,7 @@ boolean isIgnoreMissing() { @Override public IngestDocument execute(IngestDocument document) { String path = document.renderTemplate(field); - if (document.hasField(path, true) == false) { + if (path.isEmpty() || document.hasField(path, true) == false) { if (ignoreMissing) { return document; } else { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java index f351165503152..060aad95971c8 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -20,6 +21,7 @@ import java.util.Map; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.containsString; public class AppendProcessorFactoryTests extends ESTestCase { @@ -92,4 +94,26 @@ public void testInvalidMustacheTemplate() throws Exception { assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); assertThat(exception.getMetadata("es.processor_tag").get(0), equalTo(processorTag)); } + + public void testMediaType() throws Exception { + // valid media type + String expectedMediaType = randomFrom(ConfigurationUtils.VALID_MEDIA_TYPES); + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("value", "value1"); + config.put("media_type", expectedMediaType); + String processorTag = randomAlphaOfLength(10); + AppendProcessor appendProcessor = factory.create(null, processorTag, null, config); + assertThat(appendProcessor.getTag(), equalTo(processorTag)); + + // invalid media type + expectedMediaType = randomValueOtherThanMany(m -> Arrays.asList(ConfigurationUtils.VALID_MEDIA_TYPES).contains(m), + () -> randomAlphaOfLengthBetween(5, 9)); + final Map config2 = new HashMap<>(); + config2.put("field", "field1"); + config2.put("value", "value1"); + config2.put("media_type", expectedMediaType); + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(e.getMessage(), containsString("property does not contain a supported media type [" + expectedMediaType + "]")); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java index eebfd110fffc5..a153f736a3662 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorFactoryTests.java @@ -66,7 +66,7 @@ public void testCreate_invalidFields() throws Exception { Map config = new HashMap<>(); config.put("field", field); Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config)); - assertThat(e.getMessage(), equalTo("[field] field does not contain a dot")); + assertThat(e.getMessage(), equalTo("[field] field does not contain a dot and is not a wildcard")); } fields = new String[] {".a", "a.", "."}; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java index 391389b1c9021..d3349302b1a22 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java @@ -166,4 +166,54 @@ public void testEscapeFields_doNothingIfFieldNotInSourceDoc() throws Exception { assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1")); } + public void testOverride() throws Exception { + Map source = new HashMap<>(); + Map inner = new HashMap<>(); + inner.put("bar", "baz1"); + inner.put("qux", "quux"); + source.put("foo", inner); + source.put("foo.bar", "baz2"); + IngestDocument document = new IngestDocument(source, Map.of()); + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, null, "foo.bar", true); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(2)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz2")); + assertThat(document.getFieldValue("foo.qux", String.class), equalTo("quux")); + } + + public void testWildcard() throws Exception { + Map source = new HashMap<>(); + source.put("foo.bar", "baz"); + source.put("qux.quux", "corge"); + IngestDocument document = new IngestDocument(source, Map.of()); + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, null, "*"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz")); + assertThat(document.getFieldValue("qux", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("qux.quux", String.class), equalTo("corge")); + + source = new HashMap<>(); + Map inner = new HashMap<>(); + inner.put("bar.baz", "qux"); + source.put("foo", inner); + document = new IngestDocument(source, Map.of()); + processor = new DotExpanderProcessor("_tag", null, "foo", "*"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("qux")); + + source = new HashMap<>(); + inner = new HashMap<>(); + inner.put("bar.baz", "qux"); + source.put("foo", inner); + document = new IngestDocument(source, Map.of()); + processor = new DotExpanderProcessor("_tag", null, null, "*"); + processor.execute(document); + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + IngestDocument finalDocument = document; + expectThrows(IllegalArgumentException.class, () -> finalDocument.getFieldValue("foo.bar", Map.class)); + } + } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java index eb3755ae676e8..e48158c76ed02 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java @@ -77,4 +77,42 @@ public void testCreateWithBothTargetFieldAndAddToRoot() throws Exception { () -> FACTORY.create(null, randomAlphaOfLength(10), null, config)); assertThat(exception.getMessage(), equalTo("[target_field] Cannot set a target field while also setting `add_to_root` to true")); } + + public void testReplaceMergeStrategy() throws Exception { + JsonProcessor jsonProcessor = getJsonProcessorWithMergeStrategy(null, true); + assertThat(jsonProcessor.getAddToRootConflictStrategy(), equalTo(JsonProcessor.ConflictStrategy.REPLACE)); + + jsonProcessor = getJsonProcessorWithMergeStrategy("replace", true); + assertThat(jsonProcessor.getAddToRootConflictStrategy(), equalTo(JsonProcessor.ConflictStrategy.REPLACE)); + } + + public void testRecursiveMergeStrategy() throws Exception { + JsonProcessor jsonProcessor = getJsonProcessorWithMergeStrategy("merge", true); + assertThat(jsonProcessor.getAddToRootConflictStrategy(), equalTo(JsonProcessor.ConflictStrategy.MERGE)); + } + + public void testMergeStrategyWithoutAddToRoot() throws Exception { + ElasticsearchException exception = expectThrows(ElasticsearchParseException.class, + () -> getJsonProcessorWithMergeStrategy("replace", false)); + assertThat(exception.getMessage(), + equalTo("[add_to_root_conflict_strategy] Cannot set `add_to_root_conflict_strategy` if `add_to_root` is false")); + } + + public void testUnknownMergeStrategy() throws Exception { + ElasticsearchException exception = expectThrows(ElasticsearchParseException.class, + () -> getJsonProcessorWithMergeStrategy("foo", true)); + assertThat(exception.getMessage(), + equalTo("[add_to_root_conflict_strategy] conflict strategy [foo] not supported, cannot convert field.")); + } + + private JsonProcessor getJsonProcessorWithMergeStrategy(String mergeStrategy, boolean addToRoot) throws Exception { + String randomField = randomAlphaOfLength(10); + Map config = new HashMap<>(); + config.put("field", randomField); + config.put("add_to_root", addToRoot); + if (mergeStrategy != null) { + config.put("add_to_root_conflict_strategy", mergeStrategy); + } + return FACTORY.create(null, randomAlphaOfLength(10), null, config); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java index a6e8e85679341..c1ed2c49c36a4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.ingest.common.JsonProcessor.ConflictStrategy.MERGE; +import static org.elasticsearch.ingest.common.JsonProcessor.ConflictStrategy.REPLACE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -32,7 +34,7 @@ public void testExecute() throws Exception { String processorTag = randomAlphaOfLength(3); String randomField = randomAlphaOfLength(3); String randomTargetField = randomAlphaOfLength(2); - JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, randomField, randomTargetField, false); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, randomField, randomTargetField, false, REPLACE, false); Map document = new HashMap<>(); Map randomJsonMap = RandomDocumentPicks.randomSource(random()); @@ -47,7 +49,7 @@ public void testExecute() throws Exception { } public void testInvalidValue() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); document.put("field", "blah blah"); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -58,7 +60,7 @@ public void testInvalidValue() { } public void testByteArray() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); document.put("field", new byte[] { 0, 1 }); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -73,7 +75,7 @@ public void testByteArray() { } public void testNull() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); document.put("field", null); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -82,7 +84,7 @@ public void testNull() throws Exception { } public void testBoolean() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); boolean value = true; document.put("field", value); @@ -92,7 +94,7 @@ public void testBoolean() throws Exception { } public void testInteger() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); int value = 3; document.put("field", value); @@ -102,7 +104,7 @@ public void testInteger() throws Exception { } public void testDouble() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); double value = 3.0; document.put("field", value); @@ -112,7 +114,7 @@ public void testDouble() throws Exception { } public void testString() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); String value = "hello world"; document.put("field", "\"" + value + "\""); @@ -122,7 +124,7 @@ public void testString() throws Exception { } public void testArray() throws Exception { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); List value = Arrays.asList(true, true, false); document.put("field", value.toString()); @@ -132,7 +134,7 @@ public void testArray() throws Exception { } public void testFieldMissing() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -143,7 +145,7 @@ public void testFieldMissing() { public void testAddToRoot() throws Exception { String processorTag = randomAlphaOfLength(3); String randomTargetField = randomAlphaOfLength(2); - JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, "a", randomTargetField, true); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, "a", randomTargetField, true, REPLACE, false); Map document = new HashMap<>(); String json = "{\"a\": 1, \"b\": 2}"; @@ -159,8 +161,68 @@ public void testAddToRoot() throws Exception { assertEquals("see", sourceAndMetadata.get("c")); } + public void testDuplicateKeys() throws Exception { + String processorTag = randomAlphaOfLength(3); + JsonProcessor lenientJsonProcessor = new JsonProcessor(processorTag, null, "a", null, true, REPLACE, true); + + Map document = new HashMap<>(); + String json = "{\"a\": 1, \"a\": 2}"; + document.put("a", json); + document.put("c", "see"); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + lenientJsonProcessor.execute(ingestDocument); + + Map sourceAndMetadata = ingestDocument.getSourceAndMetadata(); + assertEquals(2, sourceAndMetadata.get("a")); + assertEquals("see", sourceAndMetadata.get("c")); + + JsonProcessor strictJsonProcessor = new JsonProcessor(processorTag, null, "a", null, true, REPLACE, false); + Exception exception = expectThrows(IllegalArgumentException.class, () -> + strictJsonProcessor.execute(RandomDocumentPicks.randomIngestDocument(random(), document))); + assertThat(exception.getMessage(), containsString("Duplicate field 'a'")); + } + + public void testAddToRootRecursiveMerge() throws Exception { + String processorTag = randomAlphaOfLength(3); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, "json", null, true, MERGE, false); + + Map document = new HashMap<>(); + String json = "{\"foo\": {\"bar\": \"baz\"}}"; + document.put("json", json); + Map inner = new HashMap<>(); + inner.put("bar", "override_me"); + inner.put("qux", "quux"); + document.put("foo", inner); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + jsonProcessor.execute(ingestDocument); + + assertEquals("baz", ingestDocument.getFieldValue("foo.bar", String.class)); + assertEquals("quux", ingestDocument.getFieldValue("foo.qux", String.class)); + } + + public void testAddToRootNonRecursiveMerge() throws Exception { + String processorTag = randomAlphaOfLength(3); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, null, "json", null, true, REPLACE, false); + + Map document = new HashMap<>(); + String json = "{\"foo\": {\"bar\": \"baz\"}}"; + document.put("json", json); + Map inner = new HashMap<>(); + inner.put("bar", "override_me"); + inner.put("qux", "quux"); + document.put("foo", inner); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + jsonProcessor.execute(ingestDocument); + + assertEquals("baz", ingestDocument.getFieldValue("foo.bar", String.class)); + assertFalse(ingestDocument.hasField("foo.qux")); + } + public void testAddBoolToRoot() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", true); + JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", true, REPLACE, false); Map document = new HashMap<>(); document.put("field", true); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java index ef8650ab0fd3d..b14d13d2d6e5d 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java @@ -11,7 +11,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -24,8 +26,14 @@ public class KeyValueProcessorFactoryTests extends ESTestCase { + private KeyValueProcessor.Factory factory; + + @Before + public void init() { + factory = new KeyValueProcessor.Factory(TestTemplateService.instance()); + } + public void testCreateWithDefaults() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -33,7 +41,7 @@ public void testCreateWithDefaults() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), is(nullValue())); @@ -42,7 +50,6 @@ public void testCreateWithDefaults() throws Exception { } public void testCreateWithAllFieldsSet() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -54,17 +61,16 @@ public void testCreateWithAllFieldsSet() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), equalTo(Sets.newHashSet("a", "b"))); assertThat(processor.getExcludeKeys(), equalTo(Collections.emptySet())); - assertThat(processor.getTargetField(), equalTo("target")); + assertThat(processor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); assertTrue(processor.isIgnoreMissing()); } public void testCreateWithMissingField() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); String processorTag = randomAlphaOfLength(10); ElasticsearchException exception = expectThrows(ElasticsearchParseException.class, @@ -73,7 +79,6 @@ public void testCreateWithMissingField() { } public void testCreateWithMissingFieldSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAlphaOfLength(10); @@ -83,7 +88,6 @@ public void testCreateWithMissingFieldSplit() { } public void testCreateWithMissingValueSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java index 0769b8492ded4..ce1ab8d8811e8 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; +import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -27,7 +28,7 @@ public class KeyValueProcessorTests extends ESTestCase { - private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(); + private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(TestTemplateService.instance()); public void test() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); @@ -92,7 +93,7 @@ public void testMissingField() throws Exception { Processor processor = createKvProcessor("unknown", "&", "=", null, null, "target", false); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), equalTo("field [unknown] not present as part of path [unknown]")); + assertThat(exception.getMessage(), equalTo("field [unknown] doesn't exist")); } public void testNullValueWithIgnoreMissing() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index cc1308dbfd452..160ba4871ff46 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -99,6 +99,11 @@ public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { RandomDocumentPicks.randomFieldName(random()), true); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); + + Processor processor1 = createRenameProcessor("", + RandomDocumentPicks.randomFieldName(random()), true); + processor1.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); } public void testRenameNewFieldAlreadyExists() throws Exception { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml index 5fb416228f2c1..f60a6946c2928 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/130_escape_dot.yml @@ -4,6 +4,10 @@ teardown: ingest.delete_pipeline: id: "1" ignore: 404 + - do: + ingest.delete_pipeline: + id: "2" + ignore: 404 --- "Test escape_dot processor": @@ -36,3 +40,40 @@ teardown: index: test id: 1 - match: { _source.foo.bar: "baz" } +--- +"Test escape_dot processor with override and wildcard": + - do: + ingest.put_pipeline: + id: "2" + body: > + { + "processors": [ + { + "dot_expander" : { + "field" : "*", + "override": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "2" + body: { + foo.bar: "baz", + foo: { + bar: "override_me", + qux: "quux" + } + } + + - do: + get: + index: test + id: 2 + - match: { _source.foo.bar: "baz" } + - match: { _source.foo.qux: "quux" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml index 95de56b2be3d4..746858a673531 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml @@ -4,6 +4,14 @@ teardown: ingest.delete_pipeline: id: "1" ignore: 404 + - do: + ingest.delete_pipeline: + id: "2" + ignore: 404 + - do: + ingest.delete_pipeline: + id: "3" + ignore: 404 --- "Test JSON Processor": @@ -71,3 +79,74 @@ teardown: - match: { _source.foo_number: 3 } - is_true: _source.foo_boolean - is_false: _source.foo_null + +--- +"Test JSON Processor duplicate keys": + - do: + ingest.put_pipeline: + id: "2" + body: { + "processors": [ + { + "json" : { + "field" : "json", + "add_to_root": true, + "allow_duplicate_keys": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 2 + pipeline: "2" + body: { + json: "{\"dupe\": 1, \"dupe\": 2}", + } + + - do: + get: + index: test + id: 2 + - match: { _source.dupe: 2 } + +--- +"Test JSON Processor recursive merge strategy": + - do: + ingest.put_pipeline: + id: "3" + body: { + "processors": [ + { + "json" : { + "field" : "json", + "add_to_root": true, + "add_to_root_conflict_strategy": "merge" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 3 + pipeline: "3" + body: { + json: "{\"foo\": {\"bar\": \"baz\"} }", + foo: { + bar: "override_me", + qux: "quux" + } + } + + - do: + get: + index: test + id: 3 + - match: { _source.foo.bar: "baz" } + - match: { _source.foo.qux: "quux" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml index 836243652b2e0..486739e49283c 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml @@ -39,3 +39,58 @@ teardown: id: 1 - match: { _source.goodbye: "everybody" } - match: { _source.hello: "world" } + +--- +"Test KV Processor with template snippets": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{origin}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + }, + { + "kv" : { + "field" : "{{origin}}", + "field_split": " ", + "value_split": "=" + } + }, + { + "kv" : { + "field" : "{{origin1}}", + "field_split": " ", + "value_split": "=", + "ignore_missing": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + origin: "field1", + field1: "goodbye=everybody hello=world", + target: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source.bar.goodbye: "everybody" } + - match: { _source.bar.hello: "world" } + - match: { _source.goodbye: "everybody" } + - match: { _source.hello: "world" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename.yml new file mode 100644 index 0000000000000..a6c663a260aae --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename.yml @@ -0,0 +1,40 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test Rename Processor with template snippets and ignore_missing": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "rename" : { + "field" : "{{foo}}", + "target_field": "bar", + "ignore_missing": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + message: "test" + } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "test" } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index a5a303da1d3d4..cf59994ab8d2d 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -55,7 +55,6 @@ tasks.named("internalClusterTest").configure { if (useFixture) { nonInputProperties.systemProperty "geoip_endpoint", "${-> fixtureAddress()}" } - systemProperty "ingest.geoip.downloader.enabled.default", "true" } tasks.register("copyDefaultGeoIp2DatabaseFiles", Copy) { @@ -102,3 +101,9 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { tasks.named("forbiddenPatterns").configure { exclude '**/*.mmdb' } + +tasks.named("dependencyLicenses").configure { + mapping from: /geoip.*/, to: 'maxmind-geolite2-eula' + mapping from: /maxmind-db.*/, to: 'maxmind-db-reader' + ignoreFile 'elastic-geoip-database-service-agreement-LICENSE.txt' +} diff --git a/modules/ingest-geoip/licenses/elastic-geoip-database-service-agreement-LICENSE.txt b/modules/ingest-geoip/licenses/elastic-geoip-database-service-agreement-LICENSE.txt new file mode 100644 index 0000000000000..fb79d71792b51 --- /dev/null +++ b/modules/ingest-geoip/licenses/elastic-geoip-database-service-agreement-LICENSE.txt @@ -0,0 +1,2 @@ +By using the GeoIP Database Service, you agree to the Elastic GeoIP Database Service Agreement, +available at www.elastic.co/elastic-geoip-database-service-terms. diff --git a/modules/ingest-geoip/licenses/geoip2-NOTICE.txt b/modules/ingest-geoip/licenses/geoip2-NOTICE.txt deleted file mode 100644 index 448b71d47d382..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -This software is Copyright (c) 2013 by MaxMind, Inc. - -This is free software, licensed under the Apache License, Version 2.0. \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-NOTICE.txt b/modules/ingest-geoip/licenses/maxmind-db-NOTICE.txt deleted file mode 100644 index 1ebe2b0826dd3..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -This software is Copyright (c) 2014 by MaxMind, Inc. - -This is free software, licensed under the Apache License, Version 2.0. diff --git a/modules/ingest-geoip/licenses/maxmind-db-LICENSE.txt b/modules/ingest-geoip/licenses/maxmind-db-reader-LICENSE.txt similarity index 100% rename from modules/ingest-geoip/licenses/maxmind-db-LICENSE.txt rename to modules/ingest-geoip/licenses/maxmind-db-reader-LICENSE.txt diff --git a/modules/ingest-geoip/licenses/maxmind-db-reader-NOTICE.txt b/modules/ingest-geoip/licenses/maxmind-db-reader-NOTICE.txt new file mode 100644 index 0000000000000..1ab8558bb1e57 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-reader-NOTICE.txt @@ -0,0 +1 @@ +This software is Copyright (c) 2014-2020 by MaxMind, Inc. This is free software, licensed under the Apache License, Version 2.0. diff --git a/modules/ingest-geoip/licenses/maxmind-geolite2-eula-LICENSE.txt b/modules/ingest-geoip/licenses/maxmind-geolite2-eula-LICENSE.txt new file mode 100644 index 0000000000000..f3729ff25810e --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-geolite2-eula-LICENSE.txt @@ -0,0 +1,2 @@ +The Elastic GeoIP Database Service uses the GeoLite2 Data created and licensed by MaxMind, +which is governed by MaxMind’s GeoLite2 End User License Agreement, available at https://www.maxmind.com/en/geolite2/eula. diff --git a/modules/ingest-geoip/licenses/maxmind-geolite2-eula-NOTICE.txt b/modules/ingest-geoip/licenses/maxmind-geolite2-eula-NOTICE.txt new file mode 100644 index 0000000000000..dfc2a51ae6eb9 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-geolite2-eula-NOTICE.txt @@ -0,0 +1 @@ +This product includes the GeoLite2 Data created by MaxMind, available at https://www.maxmind.com. diff --git a/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-LICENSE.txt b/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-LICENSE.txt new file mode 100644 index 0000000000000..cc3e2459dc5c4 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-LICENSE.txt @@ -0,0 +1,427 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-NOTICE.txt b/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-NOTICE.txt new file mode 100644 index 0000000000000..98c75c9fe4310 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-geolite2-legacy/maxmind-geolite2-legacy-NOTICE.txt @@ -0,0 +1,2 @@ +If you do not use the Elastic GeoIP Database Service, this product uses the legacy GeoLite2 Data created by MaxMind +and licensed under the Creative Commons Attribution-ShareAlike 4.0 International License. diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 456e69a90e340..1a6ed3bfc8e58 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -55,11 +55,13 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -83,17 +85,37 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } @After - public void cleanUp() { + public void cleanUp() throws Exception { ClusterUpdateSettingsResponse settingsResponse = client().admin().cluster() .prepareUpdateSettings() .setPersistentSettings(Settings.builder() - .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), (String) null) - .put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), (String) null) - .put("ingest.geoip.database_validity", (String) null)) + .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) + .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull("ingest.geoip.database_validity")) .get(); assertTrue(settingsResponse.isAcknowledged()); + + assertBusy(() -> { + PersistentTasksCustomMetadata.PersistentTask task = getTask(); + if (task != null) { + GeoIpTaskState state = (GeoIpTaskState) task.getState(); + assertThat(state.getDatabases(), anEmptyMap()); + } + }); + assertBusy(() -> { + List geoIpTmpDirs = getGeoIpTmpDirs(); + for (Path geoIpTmpDir : geoIpTmpDirs) { + try (Stream files = Files.list(geoIpTmpDir)) { + Set names = files.map(f -> f.getFileName().toString()).collect(Collectors.toSet()); + assertThat(names, not(hasItem("GeoLite2-ASN.mmdb"))); + assertThat(names, not(hasItem("GeoLite2-City.mmdb"))); + assertThat(names, not(hasItem("GeoLite2-Country.mmdb"))); + } + } + }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/75221") public void testInvalidTimestamp() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); ClusterUpdateSettingsResponse settingsResponse = @@ -147,6 +169,21 @@ public void testInvalidTimestamp() throws Exception { assertFalse(result.getIngestDocument().hasField("ip-asn")); assertFalse(result.getIngestDocument().hasField("ip-country")); }); + settingsResponse = + client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder() + .putNull("ingest.geoip.database_validity")) + .get(); + assertTrue(settingsResponse.isAcknowledged()); + assertBusy(() -> { + for (Path geoIpTmpDir : geoIpTmpDirs) { + try (Stream files = Files.list(geoIpTmpDir)) { + Set names = files.map(f -> f.getFileName().toString()).collect(Collectors.toSet()); + assertThat(names, hasItems("GeoLite2-ASN.mmdb","GeoLite2-City.mmdb","GeoLite2-Country.mmdb")); + } + } + }); } public void testUpdatedTimestamp() throws Exception { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index c6a1c3a87e606..02e04bccb3e65 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -37,7 +37,8 @@ */ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor implements ClusterStateListener { - static final boolean ENABLED_DEFAULT = "true".equals(System.getProperty("ingest.geoip.downloader.enabled.default")); + private static final boolean ENABLED_DEFAULT = + "false".equals(System.getProperty("ingest.geoip.downloader.enabled.default", "true")) == false; public static final Setting ENABLED_SETTING = Setting.boolSetting("ingest.geoip.downloader.enabled", ENABLED_DEFAULT, Setting.Property.Dynamic, Setting.Property.NodeScope); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 61b4ad367c146..5ca1efe361b9f 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -17,11 +17,13 @@ import com.maxmind.geoip2.record.Country; import com.maxmind.geoip2.record.Location; import com.maxmind.geoip2.record.Subdivision; + import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.ingest.AbstractProcessor; @@ -65,10 +67,11 @@ public final class GeoIpProcessor extends AbstractProcessor { /** * Construct a geo-IP processor. + * * @param tag the processor tag * @param description the processor description * @param field the source field to geo-IP map - * @param supplier a supplier of a geo-IP database reader; ideally this is lazily-loaded once on first use + * @param supplier a supplier of a geo-IP database reader; ideally this is lazily-loaded once on first use * @param isValid * @param targetField the target field * @param properties the properties; ideally this is lazily-loaded once on first use @@ -104,7 +107,7 @@ public IngestDocument execute(IngestDocument ingestDocument) throws IOException Object ip = ingestDocument.getFieldValue(field, Object.class, ignoreMissing); if (isValid.get() == false) { - ingestDocument.appendFieldValue("tags","_geoip_expired_database", false); + ingestDocument.appendFieldValue("tags", "_geoip_expired_database", false); return ingestDocument; } else if (ip == null && ignoreMissing) { return ingestDocument; @@ -367,9 +370,9 @@ public Factory(DatabaseRegistry databaseRegistry, ClusterService clusterService) @Override public GeoIpProcessor create( - final Map registry, - final String processorTag, - final String description, final Map config) throws IOException { + final Map registry, + final String processorTag, + final String description, final Map config) throws IOException { String ipField = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "geoip"); String databaseFile = readStringProperty(TYPE, processorTag, config, "database_file", "GeoLite2-City.mmdb"); @@ -436,7 +439,19 @@ public GeoIpProcessor create( return true; } GeoIpTaskState state = (GeoIpTaskState) task.getState(); - return state.getDatabases().get(databaseFile).isValid(currentState.metadata().settings()); + GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile); + // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid + if (metadata == null) { + return true; + } + + boolean valid = metadata.isValid(currentState.metadata().settings()); + if (valid && metadata.isCloseToExpiration()) { + HeaderWarning.addWarning("database [{}] was not updated for over 25 days, geoip processor will stop working if there " + + "is no update for 30 days", databaseFile); + } + + return valid; }; return new GeoIpProcessor(processorTag, description, ipField, supplier, isValid, targetField, properties, ignoreMissing, firstOnly); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index c9062a3443e87..b1f40c7843492 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -69,7 +69,7 @@ public static GeoIpTaskState fromXContent(XContentParser parser) throws IOExcept in -> { long lastUpdate = in.readLong(); return new Metadata(lastUpdate, in.readVInt(), in.readVInt(), in.readString(), - in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readLong() : lastUpdate); + in.getVersion().onOrAfter(Version.V_7_14_0) ? in.readLong() : lastUpdate); })); } @@ -135,7 +135,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeVInt(v.firstChunk); o.writeVInt(v.lastChunk); o.writeString(v.md5); - if (o.getVersion().onOrAfter(Version.V_8_0_0)) { + if (o.getVersion().onOrAfter(Version.V_7_14_0)) { o.writeLong(v.lastCheck); } }); @@ -189,6 +189,10 @@ public long getLastUpdate() { return lastUpdate; } + public boolean isCloseToExpiration(){ + return Instant.ofEpochMilli(lastCheck).isBefore(Instant.now().minus(25, ChronoUnit.DAYS)); + } + public boolean isValid(Settings settings) { TimeValue valid = settings.getAsTime("ingest.geoip.database_validity", TimeValue.timeValueDays(30)); return Instant.ofEpochMilli(lastCheck).isAfter(Instant.now().minus(valid.getMillis(), ChronoUnit.MILLIS)); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index f5698d7ab01f5..f26c7820c8c61 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -64,6 +64,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; @@ -113,10 +114,6 @@ public Collection createComponents(Client client, throw new UncheckedIOException(e); } - if (GeoIpDownloaderTaskExecutor.ENABLED_DEFAULT == false) { - return List.of(databaseRegistry.get()); - } - geoIpDownloaderTaskExecutor = new GeoIpDownloaderTaskExecutor(client, new HttpClient(), clusterService, threadPool); return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor); } @@ -130,9 +127,6 @@ public void close() throws IOException { public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, Client client, SettingsModule settingsModule, IndexNameExpressionResolver expressionResolver) { - if (GeoIpDownloaderTaskExecutor.ENABLED_DEFAULT == false) { - return Collections.emptyList(); - } return List.of(geoIpDownloaderTaskExecutor); } @@ -165,9 +159,6 @@ public List getNamedWriteables() { @Override public Collection getSystemIndexDescriptors(Settings settings) { - if (GeoIpDownloaderTaskExecutor.ENABLED_DEFAULT == false) { - return Collections.emptyList(); - } SystemIndexDescriptor geoipDatabasesIndex = SystemIndexDescriptor.builder() .setIndexPattern(DATABASES_INDEX) .setDescription("GeoIP databases") @@ -177,9 +168,10 @@ public Collection getSystemIndexDescriptors(Settings sett .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .build()) - .setOrigin("geoip") + .setOrigin(INGEST_ORIGIN) .setVersionMetaKey("version") .setPrimaryIndex(DATABASES_INDEX) + .setNetNew() .build(); return Collections.singleton(geoipDatabasesIndex); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java index d368127b30cd9..f87c276741fe6 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStats.java @@ -59,7 +59,7 @@ public GeoIpDownloaderStats(StreamInput in) throws IOException { totalDownloadTime = in.readVLong(); databasesCount = in.readVInt(); skippedDownloads = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_14_0)) { expiredDatabases = in.readVInt(); } else { expiredDatabases = 0; @@ -149,7 +149,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalDownloadTime); out.writeVInt(databasesCount); out.writeVInt(skippedDownloads); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_14_0)) { out.writeVInt(expiredDatabases); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsTransportAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsTransportAction.java index e9ec8290f182e..738970729694a 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsTransportAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsTransportAction.java @@ -33,20 +33,17 @@ public class GeoIpDownloaderStatsTransportAction extends TransportNodesAction config = new HashMap<>(); + config.put("field", "_field"); + String processorTag = randomAlphaOfLength(10); + + GeoIpProcessor processor = factory.create(null, processorTag, null, config); + + processor.execute(RandomDocumentPicks.randomIngestDocument(random(), Map.of("_field", "89.160.20.128"))); + } + public void testFallbackUsingDefaultDatabasesWhileIngesting() throws Exception { copyDatabaseFile(geoipTmpDir, "GeoLite2-City-Test.mmdb"); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseRegistry, clusterService); diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index 9082fed86be63..1518b1d64a08c 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -23,10 +23,6 @@ testClusters.all { extraConfigFile 'ingest-user-agent/test-regexes.yml', file('src/test/test-regexes.yml') } -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'ingest-useragent/30_custom_regex/Test user agent processor with custom regex file', - 'ingest-useragent/20_useragent_processor/Test user agent processor with defaults', - 'ingest-useragent/20_useragent_processor/Test user agent processor with parameters' - ].join(',') -} +tasks.named("transformV7RestTests").configure({ task -> + task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") +}) diff --git a/modules/lang-expression/licenses/lucene-expressions-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 1ec86f8c52a15..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f8dd84578e847ce9a982d22dcb783c818ae761a \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.9.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..ae69d7ff51221 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.9.0.jar.sha1 @@ -0,0 +1 @@ +c52e0f197d8f37fec8e679660a4814b8928e4db2 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionNumberSortScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionNumberSortScript.java index afa9c42a0d231..504a706ceaf28 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionNumberSortScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionNumberSortScript.java @@ -15,7 +15,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; +import org.elasticsearch.script.DocReader; import org.elasticsearch.script.GeneralScriptException; +import org.elasticsearch.script.LeafReaderContextSupplier; import org.elasticsearch.script.NumberSortScript; /** @@ -37,10 +39,18 @@ class ExpressionNumberSortScript implements NumberSortScript.LeafFactory { } @Override - public NumberSortScript newInstance(final LeafReaderContext leaf) throws IOException { + public NumberSortScript newInstance(final DocReader reader) throws IOException { + // Use DocReader to get the leaf context while transitioning to DocReader for Painless. DocReader for expressions should follow. + if (reader instanceof LeafReaderContextSupplier == false) { + throw new IllegalStateException( + "Expected LeafReaderContextSupplier when creating expression NumberSortScript instead of [" + reader + "]" + ); + } + final LeafReaderContext ctx = ((LeafReaderContextSupplier) reader).getLeafReaderContext(); + return new NumberSortScript() { // Fake the scorer until setScorer is called. - DoubleValues values = source.getValues(leaf, new DoubleValues() { + DoubleValues values = source.getValues(ctx, new DoubleValues() { @Override public double doubleValue() { return 0.0D; diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java index 39cb80e1941da..fd89ccc6405cb 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java @@ -14,7 +14,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValuesSource; +import org.elasticsearch.script.DocReader; import org.elasticsearch.script.GeneralScriptException; +import org.elasticsearch.script.LeafReaderContextSupplier; import org.elasticsearch.script.ScoreScript; import java.io.IOException; @@ -41,7 +43,15 @@ public boolean needs_score() { } @Override - public ScoreScript newInstance(final LeafReaderContext leaf) throws IOException { + public ScoreScript newInstance(final DocReader reader) throws IOException { + // Use DocReader to get the leaf context while transitioning to DocReader for Painless. DocReader for expressions should follow. + if (reader instanceof LeafReaderContextSupplier == false) { + throw new IllegalStateException( + "Expected LeafReaderContextSupplier when creating expression ExpressionScoreScript instead of [" + reader + "]" + ); + } + final LeafReaderContext leaf = ((LeafReaderContextSupplier) reader).getLeafReaderContext(); + return new ScoreScript(null, null, null) { // Fake the scorer until setScorer is called. DoubleValues values = source.getValues(leaf, new DoubleValues() { diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index fa636f697aff0..b708685487f9a 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -331,9 +331,9 @@ private static FieldScript.LeafFactory newFieldScript(Expression expr, SearchLoo */ private static FilterScript.LeafFactory newFilterScript(Expression expr, SearchLookup lookup, @Nullable Map vars) { ScoreScript.LeafFactory searchLeafFactory = newScoreScript(expr, lookup, vars); - return ctx -> { - ScoreScript script = searchLeafFactory.newInstance(ctx); - return new FilterScript(vars, lookup, ctx) { + return docReader -> { + ScoreScript script = searchLeafFactory.newInstance(docReader); + return new FilterScript(vars, lookup, docReader) { @Override public boolean execute() { return script.execute(null) != 0.0; diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java index 527b380879fe5..f151cd6ef7420 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.NumberSortScript; import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.lookup.SearchLookup; @@ -73,7 +74,7 @@ public void testLinkError() { } public void testFieldAccess() throws IOException { - NumberSortScript script = compile("doc['field'].value").newInstance(null); + NumberSortScript script = compile("doc['field'].value").newInstance(mock(DocValuesDocReader.class)); script.setDocument(1); double result = script.execute(); diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 319061d6c5a3b..a4e328ac46080 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -26,9 +26,6 @@ restResources { } } -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'lang_mustache/60_typed_keys/Multisearch template with typed_keys parameter', - 'lang_mustache/60_typed_keys/Search template with typed_keys parameter' - ].join(',') -} +tasks.named("transformV7RestTests").configure({ task -> + task.addAllowedWarningRegex("\\[types removal\\].*") +}) diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 4e0d47dad14e3..240e509373e3a 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -23,6 +23,15 @@ testClusters.all { systemProperty 'es.transport.cname_in_publish_address', 'true' } +configurations { + spi + compileOnlyApi.extendsFrom(spi) + if (isEclipse) { + // Eclipse buildship doesn't know about compileOnlyApi + api.extendsFrom(spi) + } +} + dependencies { api 'org.antlr:antlr4-runtime:4.5.3' api 'org.ow2.asm:asm-util:7.2' @@ -30,7 +39,7 @@ dependencies { api 'org.ow2.asm:asm-commons:7.2' api 'org.ow2.asm:asm-analysis:7.2' api 'org.ow2.asm:asm:7.2' - api project('spi') + spi project('spi') } tasks.named("dependencyLicenses").configure { @@ -51,18 +60,6 @@ tasks.named("test").configure { tasks.named("yamlRestCompatTest").configure { systemProperty 'tests.rest.blacklist', [ - 'painless/70_execute_painless_scripts/Execute with date field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with date field context (multi-value)', - 'painless/70_execute_painless_scripts/Execute with double field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with double field context (multi-value)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (multi-value)', - 'painless/70_execute_painless_scripts/Execute with ip field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with ip field context (multi-value)', - 'painless/70_execute_painless_scripts/Execute with long field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with long field context (multi-value)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (single-value)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (multi-value)', ].join(',') } @@ -70,7 +67,7 @@ tasks.named("yamlRestCompatTest").configure { * Painless plugin */ tasks.register("apiJavadoc", Javadoc) { source = sourceSets.main.allJava - classpath = sourceSets.main.runtimeClasspath + classpath = sourceSets.main.compileClasspath + sourceSets.main.output include '**/org/elasticsearch/painless/api/' destinationDir = new File(docsDir, 'apiJavadoc') } @@ -83,6 +80,16 @@ tasks.register("apiJavadocJar", Jar) { tasks.named("assemble").configure { dependsOn "apiJavadocJar" } +tasks.named("check").configure { + dependsOn "apiJavadocJar" +} + +tasks.named("bundlePlugin").configure { + it.into("spi") { + from(configurations.spi) + } +} + /********************************************** * Context API Generation * **********************************************/ @@ -118,7 +125,7 @@ tasks.register("generateContextDoc", DefaultTestClustersTask) { useCluster testClusters.generateContextCluster doFirst { project.javaexec { - main = 'org.elasticsearch.painless.ContextDocGenerator' + mainClass = 'org.elasticsearch.painless.ContextDocGenerator' classpath = sourceSets.doc.runtimeClasspath systemProperty "cluster.uri", "${-> testClusters.generateContextCluster.singleNode().getAllHttpSocketURI().get(0)}" }.assertNormalExitValue() @@ -138,11 +145,11 @@ tasks.register("generateContextApiSpec", DefaultTestClustersTask) { useCluster testClusters.generateContextApiSpecCluster doFirst { project.javaexec { - main = 'org.elasticsearch.painless.ContextApiSpecGenerator' + mainClass = 'org.elasticsearch.painless.ContextApiSpecGenerator' classpath = sourceSets.doc.runtimeClasspath systemProperty "cluster.uri", "${-> testClusters.generateContextApiSpecCluster.singleNode().getAllHttpSocketURI().get(0)}" - systemProperty "jdksrc", System.getProperty("jdksrc") - systemProperty "packageSources", System.getProperty("packageSources") + systemProperty "jdksrc", providers.systemProperty("jdksrc").forUseAtConfigurationTime().getOrNull() + systemProperty "packageSources", providers.systemProperty("packageSources").forUseAtConfigurationTime().getOrNull() }.assertNormalExitValue() } } @@ -173,7 +180,7 @@ tasks.register("cleanGenerated", Delete) { tasks.register("regenLexer", JavaExec) { dependsOn "cleanGenerated" - main = 'org.antlr.v4.Tool' + mainClass = 'org.antlr.v4.Tool' classpath = configurations.regenerate systemProperty 'file.encoding', 'UTF-8' systemProperty 'user.language', 'en' @@ -187,7 +194,7 @@ tasks.register("regenLexer", JavaExec) { tasks.register("regenParser", JavaExec) { dependsOn "regenLexer" - main = 'org.antlr.v4.Tool' + mainClass = 'org.antlr.v4.Tool' classpath = configurations.regenerate systemProperty 'file.encoding', 'UTF-8' systemProperty 'user.language', 'en' @@ -263,7 +270,7 @@ tasks.register("cleanSuggestGenerated", Delete) { tasks.register("regenSuggestLexer", JavaExec) { dependsOn "cleanSuggestGenerated" - main = 'org.antlr.v4.Tool' + mainClass = 'org.antlr.v4.Tool' classpath = configurations.regenerate systemProperty 'file.encoding', 'UTF-8' systemProperty 'user.language', 'en' diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index b4f8c542325eb..84700c62ef7c0 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -13,8 +13,6 @@ group = 'org.elasticsearch.plugin' archivesBaseName = 'elasticsearch-scripting-painless-spi' dependencies { - api project(":server") + compileOnly project(":server") + testImplementation project(":test:framework") } - -// no tests...yet? -tasks.named("test").configure { enabled = false } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnnotationTestObject.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/AnnotationTestObject.java similarity index 98% rename from modules/lang-painless/src/test/java/org/elasticsearch/painless/AnnotationTestObject.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/AnnotationTestObject.java index 4ee91eaa4a311..5d2247ddd4c1d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnnotationTestObject.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/AnnotationTestObject.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index ff8bc001d67df..8752bd41aefb2 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -8,8 +8,6 @@ package org.elasticsearch.painless.spi; -import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; - import java.util.Collections; import java.util.List; import java.util.Objects; @@ -26,27 +24,6 @@ */ public final class Whitelist { - private static final String[] BASE_WHITELIST_FILES = new String[] { - "org.elasticsearch.txt", - "org.elasticsearch.net.txt", - "java.lang.txt", - "java.math.txt", - "java.text.txt", - "java.time.txt", - "java.time.chrono.txt", - "java.time.format.txt", - "java.time.temporal.txt", - "java.time.zone.txt", - "java.util.txt", - "java.util.function.txt", - "java.util.regex.txt", - "java.util.stream.txt" - }; - - public static final List BASE_WHITELISTS = - Collections.singletonList(WhitelistLoader.loadFromResourceFiles( - Whitelist.class, WhitelistAnnotationParser.BASE_ANNOTATION_PARSERS, BASE_WHITELIST_FILES)); - /** The {@link ClassLoader} used to look up the whitelisted Java classes, constructors, methods, and fields. */ public final ClassLoader classLoader; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java similarity index 96% rename from modules/lang-painless/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java rename to modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java index 2d4111cd1121e..3c4c30dc851e7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java +++ b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.spi.AnnotationTestObject; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistLoader; @@ -15,11 +16,12 @@ import org.elasticsearch.painless.spi.annotation.DeprecatedAnnotation; import org.elasticsearch.painless.spi.annotation.NoImportAnnotation; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; +import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.Map; -public class WhitelistLoaderTests extends ScriptTestCase { +public class WhitelistLoaderTests extends ESTestCase { public void testUnknownAnnotations() { Map parsers = new HashMap<>(WhitelistAnnotationParser.BASE_ANNOTATION_PARSERS); diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation similarity index 80% rename from modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation rename to modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation index 04e22b6dded96..703de27546354 100644 --- a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation +++ b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation @@ -1,7 +1,7 @@ # whitelist for annotation tests -class org.elasticsearch.painless.AnnotationTestObject @no_import { +class org.elasticsearch.painless.spi.AnnotationTestObject @no_import { void deprecatedMethod() @deprecated[message="use another method"] void annotatedTestMethod() @test_annotation[one="one",two="two",three="three"] void annotatedMultipleMethod() @test_annotation[one="one",two="two",three="three"] @deprecated[message="test"] -} \ No newline at end of file +} diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown similarity index 74% rename from modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown rename to modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown index c67f535705b7a..0cdbad36a3043 100644 --- a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown +++ b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown @@ -1,6 +1,6 @@ # whitelist for annotation tests with unknown annotation -class org.elasticsearch.painless.AnnotationTestObject @no_import { +class org.elasticsearch.painless.spi.AnnotationTestObject @no_import { void unknownAnnotationMethod() @unknownAnnotation void unknownAnnotationMethod() @unknownAnootationWithMessage[message="use another method"] } diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options similarity index 69% rename from modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options rename to modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options index 071b7f57fad2d..d9d5d54b24c40 100644 --- a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options +++ b/modules/lang-painless/spi/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.annotation.unknown_with_options @@ -1,5 +1,5 @@ # whitelist for annotation tests with unknown annotation containing options -class org.elasticsearch.painless.AnnotationTestObject @no_import { +class org.elasticsearch.painless.spi.AnnotationTestObject @no_import { void unknownAnnotationMethod() @unknownAnootationWithMessage[arg="arg value"] } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 058ee02df99be..6bcba534ad542 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -17,7 +17,6 @@ import org.elasticsearch.painless.phase.DefaultIRTreeToASMBytesPhase; import org.elasticsearch.painless.phase.DefaultStaticConstantExtractionPhase; import org.elasticsearch.painless.phase.DefaultStringConcatenationOptimizationPhase; -import org.elasticsearch.painless.phase.DocFieldsPhase; import org.elasticsearch.painless.phase.IRTreeVisitor; import org.elasticsearch.painless.phase.PainlessSemanticAnalysisPhase; import org.elasticsearch.painless.phase.PainlessSemanticHeaderPhase; @@ -214,8 +213,6 @@ ScriptScope compile(Loader loader, String name, String source, CompilerSettings ScriptScope scriptScope = new ScriptScope(painlessLookup, settings, scriptClassInfo, scriptName, source, root.getIdentifier() + 1); new PainlessSemanticHeaderPhase().visitClass(root, scriptScope); new PainlessSemanticAnalysisPhase().visitClass(root, scriptScope); - // TODO: Make this phase optional #60156 - new DocFieldsPhase().visitClass(root, scriptScope); new PainlessUserTreeToIRTreePhase().visitClass(root, scriptScope); ClassNode classNode = (ClassNode)scriptScope.getDecoration(root, IRNodeDecoration.class).getIRNode(); new DefaultStringConcatenationOptimizationPhase().visitClass(classNode, null); @@ -251,7 +248,6 @@ byte[] compile(String name, String source, CompilerSettings settings, Printer de ScriptScope scriptScope = new ScriptScope(painlessLookup, settings, scriptClassInfo, scriptName, source, root.getIdentifier() + 1); new PainlessSemanticHeaderPhase().visitClass(root, scriptScope); new PainlessSemanticAnalysisPhase().visitClass(root, scriptScope); - new DocFieldsPhase().visitClass(root, scriptScope); new PainlessUserTreeToIRTreePhase().visitClass(root, scriptScope); ClassNode classNode = (ClassNode)scriptScope.getDecoration(root, IRNodeDecoration.class).getIRNode(); new DefaultStringConcatenationOptimizationPhase().visitClass(classNode, null); @@ -281,7 +277,6 @@ byte[] compile(String name, String source, CompilerSettings settings, Printer de semanticPhaseVisitor.visitClass(root, scriptScope); } - new DocFieldsPhase().visitClass(root, scriptScope); new PainlessUserTreeToIRTreePhase().visitClass(root, scriptScope); if (irPhaseVisitor != null) { irPhaseVisitor.visitClass(root, scriptScope); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index ebbd67bf7cd15..9f22dda4e1f4d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -219,9 +220,12 @@ static MethodHandle lookupMethod(PainlessLookup painlessLookup, FunctionTable fu int upTo = 1; for (int i = 1; i < numArguments; i++) { if (lambdaArgs.get(i - 1)) { - String signature = (String) args[upTo++]; - int numCaptures = Integer.parseInt(signature.substring(signature.indexOf(',')+1)); - arity -= numCaptures; + Def.Encoding signature = new Def.Encoding((String) args[upTo++]); + arity -= signature.numCaptures; + // arity in painlessLookup does not include 'this' reference + if (signature.needsInstance) { + arity--; + } } } @@ -247,16 +251,10 @@ static MethodHandle lookupMethod(PainlessLookup painlessLookup, FunctionTable fu for (int i = 1; i < numArguments; i++) { // its a functional reference, replace the argument with an impl if (lambdaArgs.get(i - 1)) { - // decode signature of form 'type.call,2' - String signature = (String) args[upTo++]; - int separator = signature.lastIndexOf('.'); - int separator2 = signature.indexOf(','); - String type = signature.substring(1, separator); - String call = signature.substring(separator+1, separator2); - int numCaptures = Integer.parseInt(signature.substring(separator2+1)); + Def.Encoding defEncoding = new Encoding((String) args[upTo++]); MethodHandle filter; - Class interfaceType = method.typeParameters.get(i - 1 - replaced); - if (signature.charAt(0) == 'S') { + Class interfaceType = method.typeParameters.get(i - 1 - replaced - (defEncoding.needsInstance ? 1 : 0)); + if (defEncoding.isStatic) { // the implementation is strongly typed, now that we know the interface type, // we have everything. filter = lookupReferenceInternal(painlessLookup, @@ -264,15 +262,16 @@ static MethodHandle lookupMethod(PainlessLookup painlessLookup, FunctionTable fu constants, methodHandlesLookup, interfaceType, - type, - call, - numCaptures + defEncoding.symbol, + defEncoding.methodName, + defEncoding.numCaptures, + defEncoding.needsInstance ); - } else if (signature.charAt(0) == 'D') { + } else { // the interface type is now known, but we need to get the implementation. // this is dynamically based on the receiver type (and cached separately, underneath // this cache). It won't blow up since we never nest here (just references) - Class[] captures = new Class[numCaptures]; + Class[] captures = new Class[defEncoding.numCaptures]; for (int capture = 0; capture < captures.length; capture++) { captures[capture] = callSiteType.parameterType(i + 1 + capture); } @@ -281,20 +280,18 @@ static MethodHandle lookupMethod(PainlessLookup painlessLookup, FunctionTable fu functions, constants, methodHandlesLookup, - call, + defEncoding.methodName, nestedType, 0, DefBootstrap.REFERENCE, PainlessLookupUtility.typeToCanonicalTypeName(interfaceType)); filter = nested.dynamicInvoker(); - } else { - throw new AssertionError(); - } + } // the filter now ignores the signature (placeholder) on the stack filter = MethodHandles.dropArguments(filter, 0, String.class); - handle = MethodHandles.collectArguments(handle, i, filter); - i += numCaptures; - replaced += numCaptures; + handle = MethodHandles.collectArguments(handle, i - (defEncoding.needsInstance ? 1 : 0), filter); + i += defEncoding.numCaptures; + replaced += defEncoding.numCaptures; } } @@ -328,20 +325,23 @@ static MethodHandle lookupReference(PainlessLookup painlessLookup, FunctionTable return lookupReferenceInternal(painlessLookup, functions, constants, methodHandlesLookup, interfaceType, PainlessLookupUtility.typeToCanonicalTypeName(implMethod.targetClass), - implMethod.javaMethod.getName(), 1); + implMethod.javaMethod.getName(), 1, false); } /** Returns a method handle to an implementation of clazz, given method reference signature. */ private static MethodHandle lookupReferenceInternal( PainlessLookup painlessLookup, FunctionTable functions, Map constants, - MethodHandles.Lookup methodHandlesLookup, Class clazz, String type, String call, int captures - ) throws Throwable { + MethodHandles.Lookup methodHandlesLookup, Class clazz, String type, String call, int captures, + boolean needsScriptInstance) throws Throwable { - final FunctionRef ref = FunctionRef.create(painlessLookup, functions, null, clazz, type, call, captures, constants); + final FunctionRef ref = + FunctionRef.create(painlessLookup, functions, null, clazz, type, call, captures, constants, needsScriptInstance); + Class[] parameters = ref.factoryMethodParameters(needsScriptInstance ? methodHandlesLookup.lookupClass() : null); + MethodType factoryMethodType = MethodType.methodType(clazz, parameters); final CallSite callSite = LambdaBootstrap.lambdaBootstrap( methodHandlesLookup, ref.interfaceMethodName, - ref.factoryMethodType, + factoryMethodType, ref.interfaceMethodType, ref.delegateClassName, ref.delegateInvokeType, @@ -351,7 +351,7 @@ private static MethodHandle lookupReferenceInternal( ref.isDelegateAugmented ? 1 : 0, ref.delegateInjections ); - return callSite.dynamicInvoker().asType(MethodType.methodType(clazz, ref.factoryMethodType.parameterArray())); + return callSite.dynamicInvoker().asType(MethodType.methodType(clazz, parameters)); } /** @@ -1268,4 +1268,113 @@ static MethodHandle arrayIndexNormalizer(Class arrayType) { private ArrayIndexNormalizeHelper() {} } + + + public static class Encoding { + public final boolean isStatic; + public final boolean needsInstance; + public final String symbol; + public final String methodName; + public final int numCaptures; + + /** + * Encoding is passed to invokedynamic to help DefBootstrap find the method. invokedynamic can only take + * "Class, java.lang.invoke.MethodHandle, java.lang.invoke.MethodType, String, int, long, float, or double" types to + * help find the callsite, which is why this object is encoded as a String for indy. + * See: https://docs.oracle.com/javase/specs/jvms/se7/html/jvms-6.html#jvms-6.5.invokedynamic + * */ + public final String encoding; + + private static final String FORMAT = "[SD][tf]symbol.methodName,numCaptures"; + + public Encoding(boolean isStatic, boolean needsInstance, String symbol, String methodName, int numCaptures) { + this.isStatic = isStatic; + this.needsInstance = needsInstance; + this.symbol = Objects.requireNonNull(symbol); + this.methodName = Objects.requireNonNull(methodName); + this.numCaptures = numCaptures; + this.encoding = (isStatic ? "S" : "D") + (needsInstance ? "t" : "f") + + symbol + "." + + methodName + "," + + numCaptures; + + + if ("this".equals(symbol)) { + if (isStatic == false) { + throw new IllegalArgumentException("Def.Encoding must be static if symbol is 'this', encoding [" + encoding + "]"); + } + } else { + if (needsInstance) { + throw new IllegalArgumentException("Def.Encoding symbol must be 'this', not [" + symbol + "] if needsInstance," + + " encoding [" + encoding + "]"); + } + } + + if (methodName.isEmpty()) { + throw new IllegalArgumentException("methodName must be non-empty, encoding [" + encoding + "]"); + } + if (numCaptures < 0) { + throw new IllegalArgumentException("numCaptures must be non-negative, not [" + numCaptures + "]," + + " encoding: [" + encoding + "]"); + } + } + + // Parsing constructor, does minimal validation to avoid extra work during runtime + public Encoding(String encoding) { + this.encoding = Objects.requireNonNull(encoding); + if (encoding.length() < 6) { + throw new IllegalArgumentException("Encoding too short. Minimum 6, given [" + encoding.length() + "]," + + " encoding: [" + encoding + "], format: " + FORMAT + ""); + } + + // 'S' or 'D' + this.isStatic = encoding.charAt(0) == 'S'; + + // 't' or 'f' + this.needsInstance = encoding.charAt(1) == 't'; + + int dotIndex = encoding.lastIndexOf('.'); + if (dotIndex < 2) { + throw new IllegalArgumentException("Invalid symbol, could not find '.' at expected position after index 1, instead found" + + " index [" + dotIndex + "], encoding: [" + encoding + "], format: " + FORMAT); + } + + this.symbol = encoding.substring(2, dotIndex); + + int commaIndex = encoding.indexOf(','); + if (commaIndex <= dotIndex) { + throw new IllegalArgumentException("Invalid symbol, could not find ',' at expected position after '.' at" + + " [" + dotIndex + "], instead found index [" + commaIndex + "], encoding: [" + encoding + "], format: " + FORMAT); + } + + this.methodName = encoding.substring(dotIndex + 1, commaIndex); + + if (commaIndex == encoding.length() - 1) { + throw new IllegalArgumentException("Invalid symbol, could not find ',' at expected position, instead found" + + " index [" + commaIndex + "], encoding: [" + encoding + "], format: " + FORMAT); + } + + this.numCaptures = Integer.parseUnsignedInt(encoding.substring(commaIndex + 1)); + } + + @Override + public String toString() { + return encoding; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if ((o instanceof Encoding) == false) return false; + Encoding encoding1 = (Encoding) o; + return isStatic == encoding1.isStatic && needsInstance == encoding1.needsInstance && numCaptures == encoding1.numCaptures + && Objects.equals(symbol, encoding1.symbol) && Objects.equals(methodName, encoding1.methodName) + && Objects.equals(encoding, encoding1.encoding); + } + + @Override + public int hashCode() { + return Objects.hash(isStatic, needsInstance, symbol, methodName, numCaptures, encoding); + } + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 6d6d6651c053a..ff99e0d28da90 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -14,6 +14,7 @@ import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.symbol.FunctionTable; import org.elasticsearch.painless.symbol.FunctionTable.LocalFunction; +import org.objectweb.asm.Type; import java.lang.invoke.MethodType; import java.lang.reflect.Modifier; @@ -21,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.painless.WriterConstants.CLASS_NAME; import static org.objectweb.asm.Opcodes.H_INVOKEINTERFACE; @@ -44,9 +46,11 @@ public class FunctionRef { * @param methodName the right hand side of a method reference expression * @param numberOfCaptures number of captured arguments * @param constants constants used for injection when necessary + * @param needsScriptInstance uses an instance method and so receiver must be captured. */ public static FunctionRef create(PainlessLookup painlessLookup, FunctionTable functionTable, Location location, - Class targetClass, String typeName, String methodName, int numberOfCaptures, Map constants) { + Class targetClass, String typeName, String methodName, int numberOfCaptures, Map constants, + boolean needsScriptInstance) { Objects.requireNonNull(painlessLookup); Objects.requireNonNull(targetClass); @@ -98,7 +102,7 @@ public static FunctionRef create(PainlessLookup painlessLookup, FunctionTable fu delegateClassName = CLASS_NAME; isDelegateInterface = false; isDelegateAugmented = false; - delegateInvokeType = H_INVOKESTATIC; + delegateInvokeType = needsScriptInstance ? H_INVOKEVIRTUAL : H_INVOKESTATIC; delegateMethodName = localFunction.getMangledName(); delegateMethodType = localFunction.getMethodType(); delegateInjections = new Object[0]; @@ -213,7 +217,7 @@ public static FunctionRef create(PainlessLookup painlessLookup, FunctionTable fu return new FunctionRef(interfaceMethodName, interfaceMethodType, delegateClassName, isDelegateInterface, isDelegateAugmented, delegateInvokeType, delegateMethodName, delegateMethodType, delegateInjections, - factoryMethodType + factoryMethodType, needsScriptInstance ? WriterConstants.CLASS_TYPE : null ); } catch (IllegalArgumentException iae) { if (location != null) { @@ -243,13 +247,15 @@ public static FunctionRef create(PainlessLookup painlessLookup, FunctionTable fu /** injected constants */ public final Object[] delegateInjections; /** factory (CallSite) method signature */ - public final MethodType factoryMethodType; + private final MethodType factoryMethodType; + /** factory (CallSite) method receiver, this modifies the method descriptor for the factory method */ + public final Type factoryMethodReceiver; private FunctionRef( String interfaceMethodName, MethodType interfaceMethodType, String delegateClassName, boolean isDelegateInterface, boolean isDelegateAugmented, int delegateInvokeType, String delegateMethodName, MethodType delegateMethodType, Object[] delegateInjections, - MethodType factoryMethodType) { + MethodType factoryMethodType, Type factoryMethodReceiver) { this.interfaceMethodName = interfaceMethodName; this.interfaceMethodType = interfaceMethodType; @@ -261,5 +267,27 @@ private FunctionRef( this.delegateMethodType = delegateMethodType; this.delegateInjections = delegateInjections; this.factoryMethodType = factoryMethodType; + this.factoryMethodReceiver = factoryMethodReceiver; + } + + /** Get the factory method type, with updated receiver if {@code factoryMethodReceiver} is set */ + public String getFactoryMethodDescriptor() { + if (factoryMethodReceiver == null) { + return factoryMethodType.toMethodDescriptorString(); + } + List arguments = factoryMethodType.parameterList().stream().map(Type::getType).collect(Collectors.toList()); + arguments.add(0, factoryMethodReceiver); + Type[] argArray = new Type[arguments.size()]; + arguments.toArray(argArray); + return Type.getMethodDescriptor(Type.getType(factoryMethodType.returnType()), argArray); + } + + /** Get the factory method type, updating the receiver if {@code factoryMethodReceiverClass} is non-null */ + public Class[] factoryMethodParameters(Class factoryMethodReceiverClass) { + List> parameters = new ArrayList<>(factoryMethodType.parameterList()); + if (factoryMethodReceiverClass != null) { + parameters.add(0, factoryMethodReceiverClass); + } + return parameters.toArray(new Class[0]); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java index 6492ae361b1f9..6d8d58e43ede3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java @@ -23,6 +23,8 @@ import java.lang.invoke.MethodType; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.List; +import java.util.stream.Collectors; import static java.lang.invoke.MethodHandles.Lookup; import static org.elasticsearch.painless.WriterConstants.CLASS_VERSION; @@ -392,6 +394,8 @@ private static void generateInterfaceMethod( // Loads any passed in arguments onto the stack. iface.loadArgs(); + String functionalInterfaceWithCaptures; + // Handles the case for a lambda function or a static reference method. // interfaceMethodType and delegateMethodType both have the captured types // inserted into their type signatures. This later allows the delegate @@ -402,6 +406,7 @@ private static void generateInterfaceMethod( if (delegateInvokeType == H_INVOKESTATIC) { interfaceMethodType = interfaceMethodType.insertParameterTypes(0, factoryMethodType.parameterArray()); + functionalInterfaceWithCaptures = interfaceMethodType.toMethodDescriptorString(); delegateMethodType = delegateMethodType.insertParameterTypes(0, factoryMethodType.parameterArray()); } else if (delegateInvokeType == H_INVOKEVIRTUAL || @@ -414,19 +419,32 @@ private static void generateInterfaceMethod( Class clazz = delegateMethodType.parameterType(0); delegateClassType = Type.getType(clazz); delegateMethodType = delegateMethodType.dropParameterTypes(0, 1); + functionalInterfaceWithCaptures = interfaceMethodType.toMethodDescriptorString(); // Handles the case for a virtual or interface reference method with 'this' // captured. interfaceMethodType inserts the 'this' type into its // method signature. This later allows the delegate // method to be invoked dynamically and have the interface method types // appropriately converted to the delegate method types. // Example: something::toString - } else if (captures.length == 1) { + } else { Class clazz = factoryMethodType.parameterType(0); delegateClassType = Type.getType(clazz); - interfaceMethodType = interfaceMethodType.insertParameterTypes(0, clazz); - } else { - throw new LambdaConversionException( - "unexpected number of captures [ " + captures.length + "]"); + + // functionalInterfaceWithCaptures needs to add the receiver and other captures + List parameters = interfaceMethodType.parameterList().stream().map(Type::getType).collect(Collectors.toList()); + parameters.add(0, delegateClassType); + for (int i = 1; i < captures.length; i++) { + parameters.add(i, captures[i].type); + } + Type[] parametersArray = parameters.toArray(new Type[0]); + functionalInterfaceWithCaptures = Type.getMethodDescriptor(Type.getType(interfaceMethodType.returnType()), parametersArray); + + // delegateMethod does not need the receiver + List> factoryParameters = factoryMethodType.parameterList(); + if (factoryParameters.size() > 1) { + List> factoryParametersWithReceiver = factoryParameters.subList(1, factoryParameters.size()); + delegateMethodType = delegateMethodType.insertParameterTypes(0, factoryParametersWithReceiver); + } } } else { throw new IllegalStateException( @@ -445,7 +463,7 @@ private static void generateInterfaceMethod( System.arraycopy(injections, 0, args, 2, injections.length); iface.invokeDynamic( delegateMethodName, - Type.getMethodType(interfaceMethodType.toMethodDescriptorString()).getDescriptor(), + Type.getMethodType(functionalInterfaceWithCaptures).getDescriptor(), DELEGATE_BOOTSTRAP_HANDLE, args); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 07f0b122ff1a5..237a006293a8d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -514,11 +514,6 @@ public void invokeLambdaCall(FunctionRef functionRef) { args[6] = functionRef.isDelegateAugmented ? 1 : 0; System.arraycopy(functionRef.delegateInjections, 0, args, 7, functionRef.delegateInjections.length); - invokeDynamic( - functionRef.interfaceMethodName, - functionRef.factoryMethodType.toMethodDescriptorString(), - LAMBDA_BOOTSTRAP_HANDLE, - args - ); + invokeDynamic(functionRef.interfaceMethodName, functionRef.getFactoryMethodDescriptor(), LAMBDA_BOOTSTRAP_HANDLE, args); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 5a539b03383e3..b331007dfa16e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -30,6 +30,7 @@ import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; @@ -37,13 +38,10 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; -import org.elasticsearch.script.IngestScript; -import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -62,6 +60,26 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { private static final Map, List> whitelists; + private static final String[] BASE_WHITELIST_FILES = new String[] { + "org.elasticsearch.txt", + "org.elasticsearch.net.txt", + "org.elasticsearch.script.fields.txt", + "java.lang.txt", + "java.math.txt", + "java.text.txt", + "java.time.txt", + "java.time.chrono.txt", + "java.time.format.txt", + "java.time.temporal.txt", + "java.time.zone.txt", + "java.util.txt", + "java.util.function.txt", + "java.util.regex.txt", + "java.util.stream.txt" + }; + public static final List BASE_WHITELISTS = + Collections.singletonList(WhitelistLoader.loadFromResourceFiles( + PainlessPlugin.class, WhitelistAnnotationParser.BASE_ANNOTATION_PARSERS, BASE_WHITELIST_FILES)); /* * Contexts from Core that need custom whitelists can add them to the map below. @@ -69,49 +87,28 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin, Extens * under Painless' resources */ static { - Map, List> map = new HashMap<>(); - - // Moving Function Pipeline Agg - List movFn = new ArrayList<>(Whitelist.BASE_WHITELISTS); - Whitelist movFnWhitelist = WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.aggs.movfn.txt"); - movFn.add(movFnWhitelist); - map.put(MovingFunctionScript.CONTEXT, movFn); - - // Functions used for scoring docs - List scoreFn = new ArrayList<>(Whitelist.BASE_WHITELISTS); - Whitelist scoreFnWhitelist = WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.score.txt"); - scoreFn.add(scoreFnWhitelist); - map.put(ScoreScript.CONTEXT, scoreFn); - - // Functions available to ingest pipelines - List ingest = new ArrayList<>(Whitelist.BASE_WHITELISTS); - Whitelist ingestWhitelist = WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.ingest.txt"); - ingest.add(ingestWhitelist); - map.put(IngestScript.CONTEXT, ingest); - - // Functions available to runtime fields - - for (ScriptContext scriptContext : ScriptModule.RUNTIME_FIELDS_CONTEXTS) { - map.put(scriptContext, getRuntimeFieldWhitelist(scriptContext.name)); - } - - // Execute context gets everything - List test = new ArrayList<>(Whitelist.BASE_WHITELISTS); - test.add(movFnWhitelist); - test.add(scoreFnWhitelist); - test.add(ingestWhitelist); - test.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.json.txt")); - map.put(PainlessExecuteAction.PainlessTestScript.CONTEXT, test); + whitelists = new HashMap<>(); + + for (ScriptContext context : ScriptModule.CORE_CONTEXTS.values()) { + List contextWhitelists = new ArrayList<>(); + if (PainlessPlugin.class.getResourceAsStream("org.elasticsearch.script." + context.name.replace('-', '_') + ".txt") != null) { + contextWhitelists.add( + WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, + "org.elasticsearch.script." + context.name.replace('-', '_') + ".txt") + ); + } - whitelists = map; - } + whitelists.put(context, contextWhitelists); + } - private static List getRuntimeFieldWhitelist(String contextName) { - List scriptField = new ArrayList<>(Whitelist.BASE_WHITELISTS); - Whitelist whitelist = WhitelistLoader.loadFromResourceFiles(Whitelist.class, - "org.elasticsearch.script." + contextName + ".txt"); - scriptField.add(whitelist); - return scriptField; + List testWhitelists = new ArrayList<>(); + for (ScriptContext context : ScriptModule.CORE_CONTEXTS.values()) { + if (ScriptModule.RUNTIME_FIELDS_CONTEXTS.contains(context) == false) { + testWhitelists.addAll(whitelists.get(context)); + } + } + testWhitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.json.txt")); + whitelists.put(PainlessExecuteAction.PainlessTestScript.CONTEXT, testWhitelists); } private final SetOnce painlessScriptEngine = new SetOnce<>(); @@ -123,7 +120,9 @@ public ScriptEngine getScriptEngine(Settings settings, Collection contextWhitelists = whitelists.get(context); if (contextWhitelists == null) { - contextWhitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + contextWhitelists = new ArrayList<>(BASE_WHITELISTS); + } else { + contextWhitelists.addAll(BASE_WHITELISTS); } contextsWithWhitelists.put(context, contextWhitelists); } @@ -153,8 +152,7 @@ public void loadExtensions(ExtensionLoader loader) { loader.loadExtensions(PainlessExtension.class).stream() .flatMap(extension -> extension.getContextWhitelists().entrySet().stream()) .forEach(entry -> { - List existing = whitelists.computeIfAbsent(entry.getKey(), - c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); + List existing = whitelists.computeIfAbsent(entry.getKey(), c -> new ArrayList<>()); existing.addAll(entry.getValue()); }); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 922455e64f471..aa567750c6815 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -296,8 +296,6 @@ private T generateFactory( reflect = method; } else if ("newFactory".equals(method.getName())) { reflect = method; - } else if ("docFields".equals(method.getName())) { - docFieldsReflect = method; } } @@ -330,32 +328,6 @@ private T generateFactory( deterAdapter.returnValue(); deterAdapter.endMethod(); - if (docFieldsReflect != null) { - if (false == docFieldsReflect.getReturnType().equals(List.class)) { - throw new IllegalArgumentException("doc_fields must return a List"); - } - if (docFieldsReflect.getParameterCount() != 0) { - throw new IllegalArgumentException("doc_fields may not take parameters"); - } - org.objectweb.asm.commons.Method docFields = new org.objectweb.asm.commons.Method(docFieldsReflect.getName(), - MethodType.methodType(List.class).toMethodDescriptorString()); - GeneratorAdapter docAdapter = new GeneratorAdapter(Opcodes.ASM5, docFields, - writer.visitMethod(Opcodes.ACC_PUBLIC, docFieldsReflect.getName(), docFields.getDescriptor(), null, null)); - docAdapter.visitCode(); - docAdapter.newInstance(WriterConstants.ARRAY_LIST_TYPE); - docAdapter.dup(); - docAdapter.push(scriptScope.docFields().size()); - docAdapter.invokeConstructor(WriterConstants.ARRAY_LIST_TYPE, WriterConstants.ARRAY_LIST_CTOR_WITH_SIZE); - for (int i = 0; i < scriptScope.docFields().size(); i++) { - docAdapter.dup(); - docAdapter.push(scriptScope.docFields().get(i)); - docAdapter.invokeInterface(WriterConstants.LIST_TYPE, WriterConstants.LIST_ADD); - docAdapter.pop(); // Don't want the result of calling add - } - docAdapter.returnValue(); - docAdapter.endMethod(); - } - writer.visitEnd(); Class factory = loader.defineFactory(className.replace('/', '.'), writer.toByteArray()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 491974ccc873f..6899d58b17cfb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -34,9 +34,9 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.geo.GeoFormatterFactory; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -51,7 +51,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -69,7 +68,9 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.script.BooleanFieldScript; +import org.elasticsearch.script.CompositeFieldScript; import org.elasticsearch.script.DateFieldScript; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.DoubleFieldScript; import org.elasticsearch.script.FilterScript; import org.elasticsearch.script.GeoPointFieldScript; @@ -82,6 +83,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.StringFieldScript; +import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -519,9 +521,9 @@ static Response innerShardOperation(Request request, ScriptService scriptService } else if (scriptContext == FilterScript.CONTEXT) { return prepareRamIndex(request, (context, leafReaderContext) -> { FilterScript.Factory factory = scriptService.compile(request.script, FilterScript.CONTEXT); - FilterScript.LeafFactory leafFactory = - factory.newFactory(request.getScript().getParams(), context.lookup()); - FilterScript filterScript = leafFactory.newInstance(leafReaderContext); + SearchLookup lookup = context.lookup(); + FilterScript.LeafFactory leafFactory = factory.newFactory(request.getScript().getParams(), lookup); + FilterScript filterScript = leafFactory.newInstance(new DocValuesDocReader(lookup, leafReaderContext)); filterScript.setDocument(0); boolean result = filterScript.execute(); return new Response(result); @@ -529,9 +531,10 @@ static Response innerShardOperation(Request request, ScriptService scriptService } else if (scriptContext == ScoreScript.CONTEXT) { return prepareRamIndex(request, (context, leafReaderContext) -> { ScoreScript.Factory factory = scriptService.compile(request.script, ScoreScript.CONTEXT); + SearchLookup lookup = context.lookup(); ScoreScript.LeafFactory leafFactory = - factory.newFactory(request.getScript().getParams(), context.lookup()); - ScoreScript scoreScript = leafFactory.newInstance(leafReaderContext); + factory.newFactory(request.getScript().getParams(), lookup); + ScoreScript scoreScript = leafFactory.newInstance(new DocValuesDocReader(lookup, leafReaderContext)); scoreScript.setDocument(0); if (request.contextSetup.query != null) { @@ -588,12 +591,9 @@ static Response innerShardOperation(Request request, ScriptService scriptService List points = new ArrayList<>(); geoPointFieldScript.runGeoPointForDoc(0, gp -> points.add(new GeoPoint(gp))); // convert geo points to the standard format of the fields api - Function format = GeoFormatterFactory.getFormatter(GeoFormatterFactory.GEOJSON); - List objects = new ArrayList<>(); - for (GeoPoint gp : points) { - objects.add(format.apply(new Point(gp.getLon(), gp.getLat()))); - } - return new Response(objects); + Function, List> format = + GeometryFormatterFactory.getFormatter(GeometryFormatterFactory.GEOJSON, p -> new Point(p.lon(), p.lat())); + return new Response(format.apply(points)); }, indexService); } else if (scriptContext == IpFieldScript.CONTEXT) { return prepareRamIndex(request, (context, leafReaderContext) -> { @@ -625,12 +625,20 @@ static Response innerShardOperation(Request request, ScriptService scriptService return prepareRamIndex(request, (context, leafReaderContext) -> { StringFieldScript.Factory factory = scriptService.compile(request.script, StringFieldScript.CONTEXT); StringFieldScript.LeafFactory leafFactory = - factory.newFactory(StringFieldScript.CONTEXT.name, request.getScript().getParams(), context.lookup()); + factory.newFactory(StringFieldScript.CONTEXT.name, request.getScript().getParams(), context.lookup()); StringFieldScript stringFieldScript = leafFactory.newInstance(leafReaderContext); List keywords = new ArrayList<>(); stringFieldScript.runForDoc(0, keywords::add); return new Response(keywords); }, indexService); + } else if (scriptContext == CompositeFieldScript.CONTEXT) { + return prepareRamIndex(request, (context, leafReaderContext) -> { + CompositeFieldScript.Factory factory = scriptService.compile(request.script, CompositeFieldScript.CONTEXT); + CompositeFieldScript.LeafFactory leafFactory = + factory.newFactory(CompositeFieldScript.CONTEXT.name, request.getScript().getParams(), context.lookup()); + CompositeFieldScript compositeFieldScript = leafFactory.newInstance(leafReaderContext); + return new Response(compositeFieldScript.runForDoc(0)); + }, indexService); } else { throw new UnsupportedOperationException("unsupported context [" + scriptContext.name + "]"); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 91c1ba2c7f7a6..608ad449be3bb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -273,7 +273,7 @@ public ANode visitFunction(FunctionContext ctx) { } return new SFunction(nextIdentifier(), location(ctx), - rtnType, name, paramTypes, paramNames, new SBlock(nextIdentifier(), location(ctx), statements), false, true, false, false); + rtnType, name, paramTypes, paramNames, new SBlock(nextIdentifier(), location(ctx), statements), false, false, false, false); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index cd5214366553f..98e7255a6ab72 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -46,6 +46,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import java.util.regex.Pattern; import static org.elasticsearch.painless.WriterConstants.DEF_TO_B_BYTE_IMPLICIT; @@ -213,6 +214,21 @@ private boolean isValidType(Class type) { return type == def.class || classesToPainlessClassBuilders.containsKey(type); } + private Class loadClass(ClassLoader classLoader, String javaClassName, Supplier errorMessage) { + try { + return Class.forName(javaClassName, true, classLoader); + } catch (ClassNotFoundException cnfe) { + try { + // Painless provides some api classes that are available only through the painless implementation. + return Class.forName(javaClassName); + } catch (ClassNotFoundException cnfe2) { + IllegalArgumentException iae = new IllegalArgumentException(errorMessage.get(), cnfe2); + cnfe2.addSuppressed(cnfe); + throw iae; + } + } + } + public void addPainlessClass(ClassLoader classLoader, String javaClassName, boolean importClassName) { Objects.requireNonNull(classLoader); Objects.requireNonNull(javaClassName); @@ -229,11 +245,7 @@ public void addPainlessClass(ClassLoader classLoader, String javaClassName, bool else if ("float".equals(javaClassName)) clazz = float.class; else if ("double".equals(javaClassName)) clazz = double.class; else { - try { - clazz = Class.forName(javaClassName, true, classLoader); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("class [" + javaClassName + "] not found", cnfe); - } + clazz = loadClass(classLoader, javaClassName, () -> "class [" + javaClassName + "] not found"); } addPainlessClass(clazz, importClassName); @@ -425,12 +437,9 @@ public void addPainlessMethod(ClassLoader classLoader, String targetCanonicalCla Class augmentedClass = null; if (augmentedCanonicalClassName != null) { - try { - augmentedClass = Class.forName(augmentedCanonicalClassName, true, classLoader); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("augmented class [" + augmentedCanonicalClassName + "] not found for method " + - "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]", cnfe); - } + augmentedClass = loadClass(classLoader, augmentedCanonicalClassName, + () -> "augmented class [" + augmentedCanonicalClassName + "] not found for method " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); @@ -735,14 +744,7 @@ public void addImportedPainlessMethod(ClassLoader classLoader, String targetJava Objects.requireNonNull(returnCanonicalTypeName); Objects.requireNonNull(canonicalTypeNameParameters); - Class targetClass; - - try { - targetClass = Class.forName(targetJavaClassName, true, classLoader); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); - } - + Class targetClass = loadClass(classLoader, targetJavaClassName, () -> "class [" + targetJavaClassName + "] not found"); String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); if (targetClass == null) { @@ -888,14 +890,7 @@ public void addPainlessClassBinding(ClassLoader classLoader, String targetJavaCl Objects.requireNonNull(returnCanonicalTypeName); Objects.requireNonNull(canonicalTypeNameParameters); - Class targetClass; - - try { - targetClass = Class.forName(targetJavaClassName, true, classLoader); - } catch (ClassNotFoundException cnfe) { - throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); - } - + Class targetClass = loadClass(classLoader, targetJavaClassName, () -> "class [" + targetJavaClassName + "] not found"); String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultIRTreeToASMBytesPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultIRTreeToASMBytesPhase.java index 5abc28a2a1e80..d265a514a453c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultIRTreeToASMBytesPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultIRTreeToASMBytesPhase.java @@ -99,6 +99,7 @@ import org.elasticsearch.painless.symbol.IRDecorations.IRCCaptureBox; import org.elasticsearch.painless.symbol.IRDecorations.IRCContinuous; import org.elasticsearch.painless.symbol.IRDecorations.IRCInitialize; +import org.elasticsearch.painless.symbol.IRDecorations.IRCInstanceCapture; import org.elasticsearch.painless.symbol.IRDecorations.IRCStatic; import org.elasticsearch.painless.symbol.IRDecorations.IRCSynthetic; import org.elasticsearch.painless.symbol.IRDecorations.IRCVarArgs; @@ -141,6 +142,7 @@ import org.elasticsearch.painless.symbol.IRDecorations.IRDSize; import org.elasticsearch.painless.symbol.IRDecorations.IRDStoreType; import org.elasticsearch.painless.symbol.IRDecorations.IRDSymbol; +import org.elasticsearch.painless.symbol.IRDecorations.IRDThisMethod; import org.elasticsearch.painless.symbol.IRDecorations.IRDTypeParameters; import org.elasticsearch.painless.symbol.IRDecorations.IRDUnaryType; import org.elasticsearch.painless.symbol.IRDecorations.IRDValue; @@ -1226,6 +1228,11 @@ public void visitDefInterfaceReference(DefInterfaceReferenceNode irDefInterfaceR // which is resolved and replace at runtime methodWriter.push((String)null); + if (irDefInterfaceReferenceNode.hasCondition(IRCInstanceCapture.class)) { + Variable capturedThis = writeScope.getInternalVariable("this"); + methodWriter.visitVarInsn(CLASS_TYPE.getOpcode(Opcodes.ILOAD), capturedThis.getSlot()); + } + List captureNames = irDefInterfaceReferenceNode.getDecorationValue(IRDCaptureNames.class); boolean captureBox = irDefInterfaceReferenceNode.hasCondition(IRCCaptureBox.class); @@ -1247,6 +1254,11 @@ public void visitTypedInterfaceReference(TypedInterfaceReferenceNode irTypedInte MethodWriter methodWriter = writeScope.getMethodWriter(); methodWriter.writeDebugInfo(irTypedInterfaceReferenceNode.getLocation()); + if (irTypedInterfaceReferenceNode.hasCondition(IRCInstanceCapture.class)) { + Variable capturedThis = writeScope.getInternalVariable("this"); + methodWriter.visitVarInsn(CLASS_TYPE.getOpcode(Opcodes.ILOAD), capturedThis.getSlot()); + } + List captureNames = irTypedInterfaceReferenceNode.getDecorationValue(IRDCaptureNames.class); boolean captureBox = irTypedInterfaceReferenceNode.hasCondition(IRCCaptureBox.class); @@ -1576,7 +1588,12 @@ public void visitInvokeCallDef(InvokeCallDefNode irInvokeCallDefNode, WriteScope DefInterfaceReferenceNode defInterfaceReferenceNode = (DefInterfaceReferenceNode)irArgumentNode; List captureNames = defInterfaceReferenceNode.getDecorationValueOrDefault(IRDCaptureNames.class, Collections.emptyList()); - boostrapArguments.add(defInterfaceReferenceNode.getDecorationValue(IRDDefReferenceEncoding.class)); + boostrapArguments.add(defInterfaceReferenceNode.getDecorationValue(IRDDefReferenceEncoding.class).toString()); + + if (defInterfaceReferenceNode.hasCondition(IRCInstanceCapture.class)) { + capturedCount++; + typeParameters.add(ScriptThis.class); + } // the encoding uses a char to indicate the number of captures // where the value is the number of current arguments plus the @@ -1596,7 +1613,12 @@ public void visitInvokeCallDef(InvokeCallDefNode irInvokeCallDefNode, WriteScope Type[] asmParameterTypes = new Type[typeParameters.size()]; for (int index = 0; index < asmParameterTypes.length; ++index) { - asmParameterTypes[index] = MethodWriter.getType(typeParameters.get(index)); + Class typeParameter = typeParameters.get(index); + if (typeParameter.equals(ScriptThis.class)) { + asmParameterTypes[index] = CLASS_TYPE; + } else { + asmParameterTypes[index] = MethodWriter.getType(typeParameters.get(index)); + } } String methodName = irInvokeCallDefNode.getDecorationValue(IRDName.class); @@ -1629,6 +1651,7 @@ public void visitInvokeCallMember(InvokeCallMemberNode irInvokeCallMemberNode, W methodWriter.writeDebugInfo(irInvokeCallMemberNode.getLocation()); LocalFunction localFunction = irInvokeCallMemberNode.getDecorationValue(IRDFunction.class); + PainlessMethod thisMethod = irInvokeCallMemberNode.getDecorationValue(IRDThisMethod.class); PainlessMethod importedMethod = irInvokeCallMemberNode.getDecorationValue(IRDMethod.class); PainlessClassBinding classBinding = irInvokeCallMemberNode.getDecorationValue(IRDClassBinding.class); PainlessInstanceBinding instanceBinding = irInvokeCallMemberNode.getDecorationValue(IRDInstanceBinding.class); @@ -1648,6 +1671,16 @@ public void visitInvokeCallMember(InvokeCallMemberNode irInvokeCallMemberNode, W } else { methodWriter.invokeVirtual(CLASS_TYPE, localFunction.getAsmMethod()); } + } else if (thisMethod != null) { + methodWriter.loadThis(); + + for (ExpressionNode irArgumentNode : irArgumentNodes) { + visit(irArgumentNode, writeScope); + } + + Method asmMethod = new Method(thisMethod.javaMethod.getName(), + thisMethod.methodType.dropParameterTypes(0, 1).toMethodDescriptorString()); + methodWriter.invokeVirtual(CLASS_TYPE, asmMethod); } else if (importedMethod != null) { for (ExpressionNode irArgumentNode : irArgumentNodes) { visit(irArgumentNode, writeScope); @@ -1763,4 +1796,7 @@ public void visitDup(DupNode irDupNode, WriteScope writeScope) { methodWriter.writeDup(size, depth); } + + // placeholder class referring to the script instance + private static final class ScriptThis {} } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java index fc744b51325f8..61934ee741f35 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java @@ -88,6 +88,8 @@ import org.elasticsearch.painless.symbol.Decorations.ExpressionPainlessCast; import org.elasticsearch.painless.symbol.Decorations.GetterPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.InLoop; +import org.elasticsearch.painless.symbol.Decorations.InstanceCapturingFunctionRef; +import org.elasticsearch.painless.symbol.Decorations.InstanceCapturingLambda; import org.elasticsearch.painless.symbol.Decorations.InstanceType; import org.elasticsearch.painless.symbol.Decorations.Internal; import org.elasticsearch.painless.symbol.Decorations.IterablePainlessMethod; @@ -117,6 +119,7 @@ import org.elasticsearch.painless.symbol.Decorations.StandardPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.StaticType; import org.elasticsearch.painless.symbol.Decorations.TargetType; +import org.elasticsearch.painless.symbol.Decorations.ThisPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.TypeParameters; import org.elasticsearch.painless.symbol.Decorations.UnaryType; import org.elasticsearch.painless.symbol.Decorations.UpcastPainlessCast; @@ -1712,6 +1715,7 @@ public void visitCallLocal(ECallLocal userCallLocalNode, SemanticScope semanticS ScriptScope scriptScope = semanticScope.getScriptScope(); FunctionTable.LocalFunction localFunction = null; + PainlessMethod thisMethod = null; PainlessMethod importedMethod = null; PainlessClassBinding classBinding = null; int classBindingOffset = 0; @@ -1727,41 +1731,46 @@ public void visitCallLocal(ECallLocal userCallLocalNode, SemanticScope semanticS } if (localFunction == null) { - importedMethod = scriptScope.getPainlessLookup().lookupImportedPainlessMethod(methodName, userArgumentsSize); + thisMethod = scriptScope.getPainlessLookup().lookupPainlessMethod( + scriptScope.getScriptClassInfo().getBaseClass(), false, methodName, userArgumentsSize); - if (importedMethod == null) { - classBinding = scriptScope.getPainlessLookup().lookupPainlessClassBinding(methodName, userArgumentsSize); + if (thisMethod == null) { + importedMethod = scriptScope.getPainlessLookup().lookupImportedPainlessMethod(methodName, userArgumentsSize); - // check to see if this class binding requires an implicit this reference - if (classBinding != null && classBinding.typeParameters.isEmpty() == false && - classBinding.typeParameters.get(0) == scriptScope.getScriptClassInfo().getBaseClass()) { - classBinding = null; - } + if (importedMethod == null) { + classBinding = scriptScope.getPainlessLookup().lookupPainlessClassBinding(methodName, userArgumentsSize); - if (classBinding == null) { - // This extra check looks for a possible match where the class binding requires an implicit this - // reference. This is a temporary solution to allow the class binding access to data from the - // base script class without need for a user to add additional arguments. A long term solution - // will likely involve adding a class instance binding where any instance can have a class binding - // as part of its API. However, the situation at run-time is difficult and will modifications that - // are a substantial change if even possible to do. - classBinding = scriptScope.getPainlessLookup().lookupPainlessClassBinding(methodName, userArgumentsSize + 1); - - if (classBinding != null) { - if (classBinding.typeParameters.isEmpty() == false && - classBinding.typeParameters.get(0) == scriptScope.getScriptClassInfo().getBaseClass()) { - classBindingOffset = 1; - } else { - classBinding = null; - } + // check to see if this class binding requires an implicit this reference + if (classBinding != null && classBinding.typeParameters.isEmpty() == false && + classBinding.typeParameters.get(0) == scriptScope.getScriptClassInfo().getBaseClass()) { + classBinding = null; } if (classBinding == null) { - instanceBinding = scriptScope.getPainlessLookup().lookupPainlessInstanceBinding(methodName, userArgumentsSize); + // This extra check looks for a possible match where the class binding requires an implicit this + // reference. This is a temporary solution to allow the class binding access to data from the + // base script class without need for a user to add additional arguments. A long term solution + // will likely involve adding a class instance binding where any instance can have a class binding + // as part of its API. However, the situation at run-time is difficult and will modifications that + // are a substantial change if even possible to do. + classBinding = scriptScope.getPainlessLookup().lookupPainlessClassBinding(methodName, userArgumentsSize + 1); + + if (classBinding != null) { + if (classBinding.typeParameters.isEmpty() == false && + classBinding.typeParameters.get(0) == scriptScope.getScriptClassInfo().getBaseClass()) { + classBindingOffset = 1; + } else { + classBinding = null; + } + } + + if (classBinding == null) { + instanceBinding = scriptScope.getPainlessLookup().lookupPainlessInstanceBinding(methodName, userArgumentsSize); - if (instanceBinding == null) { - throw userCallLocalNode.createError(new IllegalArgumentException( - "Unknown call [" + methodName + "] with [" + userArgumentNodes + "] arguments.")); + if (instanceBinding == null) { + throw userCallLocalNode.createError(new IllegalArgumentException( + "Unknown call [" + methodName + "] with [" + userArgumentsSize + "] arguments.")); + } } } } @@ -1771,10 +1780,18 @@ public void visitCallLocal(ECallLocal userCallLocalNode, SemanticScope semanticS List> typeParameters; if (localFunction != null) { + semanticScope.setUsesInstanceMethod(); semanticScope.putDecoration(userCallLocalNode, new StandardLocalFunction(localFunction)); typeParameters = new ArrayList<>(localFunction.getTypeParameters()); valueType = localFunction.getReturnType(); + } else if (thisMethod != null) { + semanticScope.setUsesInstanceMethod(); + semanticScope.putDecoration(userCallLocalNode, new ThisPainlessMethod(thisMethod)); + + scriptScope.markNonDeterministic(thisMethod.annotations.containsKey(NonDeterministicAnnotation.class)); + typeParameters = new ArrayList<>(thisMethod.typeParameters); + valueType = thisMethod.returnType; } else if (importedMethod != null) { semanticScope.putDecoration(userCallLocalNode, new StandardPainlessMethod(importedMethod)); @@ -2195,6 +2212,10 @@ public void visitLambda(ELambda userLambdaNode, SemanticScope semanticScope) { semanticScope.setCondition(userBlockNode, LastSource.class); visit(userBlockNode, lambdaScope); + if (lambdaScope.usesInstanceMethod()) { + semanticScope.setCondition(userLambdaNode, InstanceCapturingLambda.class); + } + if (semanticScope.getCondition(userBlockNode, MethodEscape.class) == false) { throw userLambdaNode.createError(new IllegalArgumentException("not all paths return a value for lambda")); } @@ -2214,18 +2235,19 @@ public void visitLambda(ELambda userLambdaNode, SemanticScope semanticScope) { // desugar lambda body into a synthetic method String name = scriptScope.getNextSyntheticName("lambda"); - scriptScope.getFunctionTable().addFunction(name, returnType, typeParametersWithCaptures, true, true); + boolean isStatic = lambdaScope.usesInstanceMethod() == false; + scriptScope.getFunctionTable().addFunction(name, returnType, typeParametersWithCaptures, true, isStatic); Class valueType; // setup method reference to synthetic method if (targetType == null) { - String defReferenceEncoding = "Sthis." + name + "," + capturedVariables.size(); valueType = String.class; - semanticScope.putDecoration(userLambdaNode, new EncodingDecoration(defReferenceEncoding)); + semanticScope.putDecoration(userLambdaNode, + new EncodingDecoration(true, lambdaScope.usesInstanceMethod(), "this", name, capturedVariables.size())); } else { FunctionRef ref = FunctionRef.create(scriptScope.getPainlessLookup(), scriptScope.getFunctionTable(), location, targetType.getTargetType(), "this", name, capturedVariables.size(), - scriptScope.getCompilerSettings().asMap()); + scriptScope.getCompilerSettings().asMap(), lambdaScope.usesInstanceMethod()); valueType = targetType.getTargetType(); semanticScope.putDecoration(userLambdaNode, new ReferenceDecoration(ref)); } @@ -2256,7 +2278,8 @@ public void visitFunctionRef(EFunctionRef userFunctionRefNode, SemanticScope sem TargetType targetType = semanticScope.getDecoration(userFunctionRefNode, TargetType.class); Class valueType; - if (symbol.equals("this") || type != null) { + boolean isInstanceReference = "this".equals(symbol); + if (isInstanceReference || type != null) { if (semanticScope.getCondition(userFunctionRefNode, Write.class)) { throw userFunctionRefNode.createError(new IllegalArgumentException( "invalid assignment: cannot assign a value to function reference [" + symbol + ":" + methodName + "]")); @@ -2267,14 +2290,16 @@ public void visitFunctionRef(EFunctionRef userFunctionRefNode, SemanticScope sem "not a statement: function reference [" + symbol + ":" + methodName + "] not used")); } + if (isInstanceReference) { + semanticScope.setCondition(userFunctionRefNode, InstanceCapturingFunctionRef.class); + } if (targetType == null) { valueType = String.class; - String defReferenceEncoding = "S" + symbol + "." + methodName + ",0"; - semanticScope.putDecoration(userFunctionRefNode, new EncodingDecoration(defReferenceEncoding)); + semanticScope.putDecoration(userFunctionRefNode, new EncodingDecoration(true, isInstanceReference, symbol, methodName, 0)); } else { FunctionRef ref = FunctionRef.create(scriptScope.getPainlessLookup(), scriptScope.getFunctionTable(), location, targetType.getTargetType(), symbol, methodName, 0, - scriptScope.getCompilerSettings().asMap()); + scriptScope.getCompilerSettings().asMap(), isInstanceReference); valueType = targetType.getTargetType(); semanticScope.putDecoration(userFunctionRefNode, new ReferenceDecoration(ref)); } @@ -2297,23 +2322,23 @@ public void visitFunctionRef(EFunctionRef userFunctionRefNode, SemanticScope sem } if (targetType == null) { - String defReferenceEncoding; + EncodingDecoration encodingDecoration; if (captured.getType() == def.class) { // dynamic implementation - defReferenceEncoding = "D" + symbol + "." + methodName + ",1"; + encodingDecoration = new EncodingDecoration(false, false, symbol, methodName, 1); } else { // typed implementation - defReferenceEncoding = "S" + captured.getCanonicalTypeName() + "." + methodName + ",1"; + encodingDecoration = new EncodingDecoration(true, false, captured.getCanonicalTypeName(), methodName, 1); } valueType = String.class; - semanticScope.putDecoration(userFunctionRefNode, new EncodingDecoration(defReferenceEncoding)); + semanticScope.putDecoration(userFunctionRefNode, encodingDecoration); } else { valueType = targetType.getTargetType(); // static case if (captured.getType() != def.class) { FunctionRef ref = FunctionRef.create(scriptScope.getPainlessLookup(), scriptScope.getFunctionTable(), location, targetType.getTargetType(), captured.getCanonicalTypeName(), methodName, 1, - scriptScope.getCompilerSettings().asMap()); + scriptScope.getCompilerSettings().asMap(), false); semanticScope.putDecoration(userFunctionRefNode, new ReferenceDecoration(ref)); } } @@ -2357,13 +2382,12 @@ public void visitNewArrayFunctionRef(ENewArrayFunctionRef userNewArrayFunctionRe semanticScope.putDecoration(userNewArrayFunctionRefNode, new MethodNameDecoration(name)); if (targetType == null) { - String defReferenceEncoding = "Sthis." + name + ",0"; valueType = String.class; - scriptScope.putDecoration(userNewArrayFunctionRefNode, new EncodingDecoration(defReferenceEncoding)); + scriptScope.putDecoration(userNewArrayFunctionRefNode, new EncodingDecoration(true, false, "this", name, 0)); } else { FunctionRef ref = FunctionRef.create(scriptScope.getPainlessLookup(), scriptScope.getFunctionTable(), userNewArrayFunctionRefNode.getLocation(), targetType.getTargetType(), "this", name, 0, - scriptScope.getCompilerSettings().asMap()); + scriptScope.getCompilerSettings().asMap(), false); valueType = targetType.getTargetType(); semanticScope.putDecoration(userNewArrayFunctionRefNode, new ReferenceDecoration(ref)); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java index 0bf3826f19a45..d67cc59b95045 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.phase; +import org.elasticsearch.painless.Def; import org.elasticsearch.painless.DefBootstrap; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.Location; @@ -158,6 +159,8 @@ import org.elasticsearch.painless.symbol.Decorations.ExpressionPainlessCast; import org.elasticsearch.painless.symbol.Decorations.GetterPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.IRNodeDecoration; +import org.elasticsearch.painless.symbol.Decorations.InstanceCapturingLambda; +import org.elasticsearch.painless.symbol.Decorations.InstanceCapturingFunctionRef; import org.elasticsearch.painless.symbol.Decorations.InstanceType; import org.elasticsearch.painless.symbol.Decorations.IterablePainlessMethod; import org.elasticsearch.painless.symbol.Decorations.ListShortcut; @@ -182,6 +185,7 @@ import org.elasticsearch.painless.symbol.Decorations.StandardPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.StaticType; import org.elasticsearch.painless.symbol.Decorations.TargetType; +import org.elasticsearch.painless.symbol.Decorations.ThisPainlessMethod; import org.elasticsearch.painless.symbol.Decorations.TypeParameters; import org.elasticsearch.painless.symbol.Decorations.UnaryType; import org.elasticsearch.painless.symbol.Decorations.UpcastPainlessCast; @@ -193,6 +197,7 @@ import org.elasticsearch.painless.symbol.IRDecorations.IRCCaptureBox; import org.elasticsearch.painless.symbol.IRDecorations.IRCContinuous; import org.elasticsearch.painless.symbol.IRDecorations.IRCInitialize; +import org.elasticsearch.painless.symbol.IRDecorations.IRCInstanceCapture; import org.elasticsearch.painless.symbol.IRDecorations.IRCRead; import org.elasticsearch.painless.symbol.IRDecorations.IRCStatic; import org.elasticsearch.painless.symbol.IRDecorations.IRCSynthetic; @@ -235,6 +240,7 @@ import org.elasticsearch.painless.symbol.IRDecorations.IRDSize; import org.elasticsearch.painless.symbol.IRDecorations.IRDStoreType; import org.elasticsearch.painless.symbol.IRDecorations.IRDSymbol; +import org.elasticsearch.painless.symbol.IRDecorations.IRDThisMethod; import org.elasticsearch.painless.symbol.IRDecorations.IRDTypeParameters; import org.elasticsearch.painless.symbol.IRDecorations.IRDUnaryType; import org.elasticsearch.painless.symbol.IRDecorations.IRDValue; @@ -1217,6 +1223,10 @@ public void visitCallLocal(ECallLocal callLocalNode, ScriptScope scriptScope) { if (scriptScope.hasDecoration(callLocalNode, StandardLocalFunction.class)) { LocalFunction localFunction = scriptScope.getDecoration(callLocalNode, StandardLocalFunction.class).getLocalFunction(); irInvokeCallMemberNode.attachDecoration(new IRDFunction(localFunction)); + } else if (scriptScope.hasDecoration(callLocalNode, ThisPainlessMethod.class)) { + PainlessMethod thisMethod = + scriptScope.getDecoration(callLocalNode, ThisPainlessMethod.class).getThisPainlessMethod(); + irInvokeCallMemberNode.attachDecoration(new IRDThisMethod(thisMethod)); } else if (scriptScope.hasDecoration(callLocalNode, StandardPainlessMethod.class)) { PainlessMethod importedMethod = scriptScope.getDecoration(callLocalNode, StandardPainlessMethod.class).getStandardPainlessMethod(); @@ -1356,7 +1366,12 @@ public void visitLambda(ELambda userLambdaNode, ScriptScope scriptScope) { new ArrayList<>(scriptScope.getDecoration(userLambdaNode, TypeParameters.class).getTypeParameters()))); irFunctionNode.attachDecoration(new IRDParameterNames( new ArrayList<>(scriptScope.getDecoration(userLambdaNode, ParameterNames.class).getParameterNames()))); - irFunctionNode.attachCondition(IRCStatic.class); + if (scriptScope.getCondition(userLambdaNode, InstanceCapturingLambda.class)) { + irFunctionNode.attachCondition(IRCInstanceCapture.class); + irExpressionNode.attachCondition(IRCInstanceCapture.class); + } else { + irFunctionNode.attachCondition(IRCStatic.class); + } irFunctionNode.attachCondition(IRCSynthetic.class); irFunctionNode.attachDecoration(new IRDMaxLoopCounter(scriptScope.getCompilerSettings().getMaxLoopCounter())); irClassNode.addFunctionNode(irFunctionNode); @@ -1386,9 +1401,12 @@ public void visitFunctionRef(EFunctionRef userFunctionRefNode, ScriptScope scrip CapturesDecoration capturesDecoration = scriptScope.getDecoration(userFunctionRefNode, CapturesDecoration.class); if (targetType == null) { - String encoding = scriptScope.getDecoration(userFunctionRefNode, EncodingDecoration.class).getEncoding(); + Def.Encoding encoding = scriptScope.getDecoration(userFunctionRefNode, EncodingDecoration.class).getEncoding(); DefInterfaceReferenceNode defInterfaceReferenceNode = new DefInterfaceReferenceNode(userFunctionRefNode.getLocation()); defInterfaceReferenceNode.attachDecoration(new IRDDefReferenceEncoding(encoding)); + if (scriptScope.getCondition(userFunctionRefNode, InstanceCapturingFunctionRef.class)) { + defInterfaceReferenceNode.attachCondition(IRCInstanceCapture.class); + } irReferenceNode = defInterfaceReferenceNode; } else if (capturesDecoration != null && capturesDecoration.getCaptures().get(0).getType() == def.class) { TypedCaptureReferenceNode typedCaptureReferenceNode = new TypedCaptureReferenceNode(userFunctionRefNode.getLocation()); @@ -1398,6 +1416,9 @@ public void visitFunctionRef(EFunctionRef userFunctionRefNode, ScriptScope scrip FunctionRef reference = scriptScope.getDecoration(userFunctionRefNode, ReferenceDecoration.class).getReference(); TypedInterfaceReferenceNode typedInterfaceReferenceNode = new TypedInterfaceReferenceNode(userFunctionRefNode.getLocation()); typedInterfaceReferenceNode.attachDecoration(new IRDReference(reference)); + if (scriptScope.getCondition(userFunctionRefNode, InstanceCapturingFunctionRef.class)) { + typedInterfaceReferenceNode.attachCondition(IRCInstanceCapture.class); + } irReferenceNode = typedInterfaceReferenceNode; } @@ -1427,7 +1448,7 @@ public void visitNewArrayFunctionRef(ENewArrayFunctionRef userNewArrayFunctionRe typedInterfaceReferenceNode.attachDecoration(new IRDReference(reference)); irReferenceNode = typedInterfaceReferenceNode; } else { - String encoding = scriptScope.getDecoration(userNewArrayFunctionRefNode, EncodingDecoration.class).getEncoding(); + Def.Encoding encoding = scriptScope.getDecoration(userNewArrayFunctionRefNode, EncodingDecoration.class).getEncoding(); DefInterfaceReferenceNode defInterfaceReferenceNode = new DefInterfaceReferenceNode(userNewArrayFunctionRefNode.getLocation()); defInterfaceReferenceNode.attachDecoration(new IRDDefReferenceEncoding(encoding)); irReferenceNode = defInterfaceReferenceNode; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DocFieldsPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DocFieldsPhase.java deleted file mode 100644 index 0843326979abd..0000000000000 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DocFieldsPhase.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.painless.phase; - -import org.elasticsearch.painless.node.AExpression; -import org.elasticsearch.painless.node.EBrace; -import org.elasticsearch.painless.node.ECall; -import org.elasticsearch.painless.node.EDot; -import org.elasticsearch.painless.node.EString; -import org.elasticsearch.painless.node.ESymbol; -import org.elasticsearch.painless.symbol.Decorations; -import org.elasticsearch.painless.symbol.ScriptScope; - -import java.util.List; - -/** - * Find all document field accesses. - */ -public class DocFieldsPhase extends UserTreeBaseVisitor { - @Override - public void visitSymbol(ESymbol userSymbolNode, ScriptScope scriptScope) { - // variables are a leaf node - if (userSymbolNode.getSymbol().equals("doc")) { - scriptScope.setCondition(userSymbolNode, Decorations.IsDocument.class); - } - } - - @Override - public void visitBrace(EBrace userBraceNode, ScriptScope scriptScope) { - userBraceNode.getPrefixNode().visit(this, scriptScope); - scriptScope.replicateCondition(userBraceNode.getPrefixNode(), userBraceNode.getIndexNode(), Decorations.IsDocument.class); - userBraceNode.getIndexNode().visit(this, scriptScope); - } - - @Override - public void visitDot(EDot userDotNode, ScriptScope scriptScope) { - AExpression prefixNode = userDotNode.getPrefixNode(); - prefixNode.visit(this, scriptScope); - if (scriptScope.getCondition(prefixNode, Decorations.IsDocument.class)) { - scriptScope.addDocField(userDotNode.getIndex()); - } - } - - @Override - public void visitCall(ECall userCallNode, ScriptScope scriptScope) { - // looking for doc.get - AExpression prefixNode = userCallNode.getPrefixNode(); - prefixNode.visit(this, scriptScope); - - List argumentNodes = userCallNode.getArgumentNodes(); - if (argumentNodes.size() != 1 || userCallNode.getMethodName().equals("get") == false) { - for (AExpression argumentNode : argumentNodes) { - argumentNode.visit(this, scriptScope); - } - } else { - AExpression argument = argumentNodes.get(0); - scriptScope.replicateCondition(prefixNode, argument, Decorations.IsDocument.class); - argument.visit(this, scriptScope); - } - } - - @Override - public void visitString(EString userStringNode, ScriptScope scriptScope) { - if (scriptScope.getCondition(userStringNode, Decorations.IsDocument.class)) { - scriptScope.addDocField(userStringNode.getString()); - } - } -} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/Decorations.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/Decorations.java index 5c5f503433fd3..de6f748928870 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/Decorations.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/Decorations.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.symbol; +import org.elasticsearch.painless.Def; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.ir.IRNode; import org.elasticsearch.painless.lookup.PainlessCast; @@ -416,6 +417,19 @@ public LocalFunction getLocalFunction() { } } + public static class ThisPainlessMethod implements Decoration { + + private final PainlessMethod thisPainlessMethod; + + public ThisPainlessMethod(PainlessMethod thisPainlessMethod) { + this.thisPainlessMethod = Objects.requireNonNull(thisPainlessMethod); + } + + public PainlessMethod getThisPainlessMethod() { + return thisPainlessMethod; + } + } + public static class StandardPainlessClassBinding implements Decoration { private final PainlessClassBinding painlessClassBinding; @@ -513,13 +527,13 @@ public FunctionRef getReference() { public static class EncodingDecoration implements Decoration { - private final String encoding; + private final Def.Encoding encoding; - public EncodingDecoration(String encoding) { - this.encoding = Objects.requireNonNull(encoding); + public EncodingDecoration(boolean isStatic, boolean needsInstance, String symbol, String methodName, int captures) { + this.encoding = new Def.Encoding(isStatic, needsInstance, symbol, methodName, captures); } - public String getEncoding() { + public Def.Encoding getEncoding() { return encoding; } } @@ -610,4 +624,14 @@ public LocalFunction getConverter() { public interface IsDocument extends Condition { } + + // Does the lambda need to capture the enclosing instance? + public interface InstanceCapturingLambda extends Condition { + + } + + // Does the function reference need to capture the enclosing instance? + public interface InstanceCapturingFunctionRef extends Condition { + + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java index 7e0086b932272..f9e76e5317b17 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/IRDecorations.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.symbol; +import org.elasticsearch.painless.Def; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ir.IRNode.IRCondition; @@ -165,9 +166,9 @@ public IRDName(String value) { } /** describes an encoding used to resolve references and lambdas at runtime */ - public static class IRDDefReferenceEncoding extends IRDecoration { + public static class IRDDefReferenceEncoding extends IRDecoration { - public IRDDefReferenceEncoding(String value) { + public IRDDefReferenceEncoding(Def.Encoding value) { super(value); } } @@ -337,6 +338,14 @@ private IRCSynthetic() { } } + /** describes if a method needs to capture the script "this" */ + public static class IRCInstanceCapture implements IRCondition { + + private IRCInstanceCapture() { + + } + } + /** describes the maximum number of loop iterations possible in a method */ public static class IRDMaxLoopCounter extends IRDecoration { @@ -361,6 +370,19 @@ public IRDFunction(LocalFunction value) { } } + /** describes a method for a node on the script class; which method depends on node type */ + public static class IRDThisMethod extends IRDecoration { + + public IRDThisMethod(PainlessMethod value) { + super(value); + } + + @Override + public String toString() { + return PainlessLookupUtility.buildPainlessMethodKey(getValue().javaMethod.getName(), getValue().typeParameters.size()); + } + } + /** describes the call to a class binding */ public static class IRDClassBinding extends IRDecoration { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/ScriptScope.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/ScriptScope.java index ab65bfb2121e2..67eca4b0756e6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/ScriptScope.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/ScriptScope.java @@ -13,10 +13,8 @@ import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.node.ANode; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -36,7 +34,6 @@ public class ScriptScope extends Decorator { protected int syntheticCounter = 0; protected boolean deterministic = true; - protected List docFields = new ArrayList<>(); protected Set usedVariables = Collections.emptySet(); protected Map staticConstants = new HashMap<>(); @@ -97,17 +94,6 @@ public boolean isDeterministic() { return deterministic; } - /** - * Document fields read or written using constant strings - */ - public List docFields() { - return Collections.unmodifiableList(docFields); - } - - public void addDocField(String field) { - docFields.add(field); - } - public void setUsedVariables(Set usedVariables) { this.usedVariables = usedVariables; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/SemanticScope.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/SemanticScope.java index 15d4e87de5aa6..ff29353ef761a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/SemanticScope.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/symbol/SemanticScope.java @@ -137,6 +137,7 @@ public static class LambdaScope extends SemanticScope { protected final SemanticScope parent; protected final Class returnType; protected final Set captures = new HashSet<>(); + protected boolean usesInstanceMethod = false; protected LambdaScope(SemanticScope parent, Class returnType) { super(parent.scriptScope, parent.usedVariables); @@ -190,6 +191,19 @@ public String getReturnCanonicalTypeName() { public Set getCaptures() { return Collections.unmodifiableSet(captures); } + + @Override + public void setUsesInstanceMethod() { + if (usesInstanceMethod) { + return; + } + usesInstanceMethod = true; + } + + @Override + public boolean usesInstanceMethod() { + return usesInstanceMethod; + } } /** @@ -340,6 +354,13 @@ public Variable defineVariable(Location location, Class type, String name, bo public abstract boolean isVariableDefined(String name); public abstract Variable getVariable(Location location, String name); + // We only want to track instance method use inside of lambdas for "this" injection. It's a noop for other scopes. + public void setUsesInstanceMethod() {} + + public boolean usesInstanceMethod() { + return false; + } + public Variable defineInternalVariable(Location location, Class type, String name, boolean isReadOnly) { return defineVariable(location, type, "#" + name, isReadOnly); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/toxcontent/DecorationToXContent.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/toxcontent/DecorationToXContent.java index 8f47164a2b2ff..069113f88b522 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/toxcontent/DecorationToXContent.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/toxcontent/DecorationToXContent.java @@ -291,8 +291,8 @@ public static void ToXContent(ReferenceDecoration referenceDecoration, XContentB builder.endArray(); } - builder.field("factoryMethodType"); - ToXContent(ref.factoryMethodType, builder); + builder.field("factoryMethodDescriptor", ref.getFactoryMethodDescriptor()); + builder.endObject(); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.json.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.json.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.json.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.json.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.net.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.net.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.net.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.net.txt diff --git a/build-tools-internal/src/main/resources/buildSrc.marker b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.painless.api.txt similarity index 100% rename from build-tools-internal/src/main/resources/buildSrc.marker rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.painless.api.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs.txt new file mode 100644 index 0000000000000..a09bce101a155 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs.txt @@ -0,0 +1,15 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.AggregationScript @no_import { +} +class org.elasticsearch.script.AggregationScript$Factory @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs_map.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs_map.txt new file mode 100644 index 0000000000000..84df365c4fe2c --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.aggs_map.txt @@ -0,0 +1,15 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.ScriptedMetricAggContexts$MapScript @no_import { +} +class org.elasticsearch.script.ScriptedMetricAggContexts$MapScript$Factory @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.boolean_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.boolean_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.boolean_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.boolean_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.composite_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.composite_field.txt new file mode 100644 index 0000000000000..b5c499abb8777 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.composite_field.txt @@ -0,0 +1,21 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for composite runtime fields + +# These two whitelists are required for painless to find the classes +class org.elasticsearch.script.CompositeFieldScript @no_import { +} +class org.elasticsearch.script.CompositeFieldScript$Factory @no_import { +} + +static_import { + # The `emit` callback to collect values for the fields + void emit(org.elasticsearch.script.CompositeFieldScript, String, Object) bound_to org.elasticsearch.script.CompositeFieldScript$EmitField + void emit(org.elasticsearch.script.CompositeFieldScript, Map) bound_to org.elasticsearch.script.CompositeFieldScript$EmitMap +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.date_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.date_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.date_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.date_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.double_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.double_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.double_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.double_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.field.txt new file mode 100644 index 0000000000000..9f1c4b2368bc8 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.field.txt @@ -0,0 +1,15 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.FieldScript @no_import { +} +class org.elasticsearch.script.FieldScript @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt new file mode 100644 index 0000000000000..b21437b14a884 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt @@ -0,0 +1,21 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# API +class org.elasticsearch.script.Field { + String getName() + boolean isEmpty() + List getValues() + def getValue(def) +} + +class org.elasticsearch.script.DocBasedScript { + org.elasticsearch.script.Field field(String) +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.filter.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.filter.txt new file mode 100644 index 0000000000000..15ffc4e68f2ef --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.filter.txt @@ -0,0 +1,15 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.FilterScript @no_import { +} +class org.elasticsearch.script.FilterScript$Factory @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.geo_point_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.geo_point_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.geo_point_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.geo_point_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.ingest.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.ingest.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.ip_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ip_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.ip_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ip_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.keyword_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.keyword_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.keyword_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.keyword_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.long_field.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.long_field.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.script.long_field.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.long_field.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.moving_function.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.moving_function.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.number_sort.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.number_sort.txt new file mode 100644 index 0000000000000..c25c7729f0cfc --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.number_sort.txt @@ -0,0 +1,15 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api + +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.NumberSortScript @no_import { +} +class org.elasticsearch.script.NumberSortScript$Factory @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt similarity index 97% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt index 15006e9a8f3f4..b77943e0691e5 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt @@ -10,6 +10,8 @@ class org.elasticsearch.script.ScoreScript @no_import { } +class org.elasticsearch.script.ScoreScript$Factory @no_import { +} static_import { double saturation(double, double) from_class org.elasticsearch.script.ScoreScriptUtils diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.string_sort.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.string_sort.txt new file mode 100644 index 0000000000000..1e653e56bbc8a --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.string_sort.txt @@ -0,0 +1,14 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +# The whitelist for the fields api +# The scripts must be whitelisted for painless to find the classes for the field API +class org.elasticsearch.script.StringSortScript @no_import { +} +class org.elasticsearch.script.StringSortScript$Factory @no_import { +} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt similarity index 99% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 1adda3bcef102..5ff8535c967a1 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -158,6 +158,8 @@ class org.elasticsearch.index.fielddata.ScriptDocValues$Geometry { int getDimensionalType() org.elasticsearch.common.geo.GeoPoint getCentroid() org.elasticsearch.common.geo.GeoBoundingBox getBoundingBox() + double getMercatorWidth() + double getMercatorHeight() } class org.elasticsearch.index.fielddata.ScriptDocValues$GeoPoints { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index 3131730c5325c..37c21100603a0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -25,8 +25,8 @@ public class AugmentationTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { Map, List> contexts = super.scriptContexts(); - List digestWhitelist = new ArrayList<>(Whitelist.BASE_WHITELISTS); - digestWhitelist.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.ingest.txt")); + List digestWhitelist = new ArrayList<>(PainlessPlugin.BASE_WHITELISTS); + digestWhitelist.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.script.ingest.txt")); contexts.put(DigestTestScript.CONTEXT, digestWhitelist); return contexts; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index 7a8551fcd8b9f..6813b40d61007 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -28,27 +28,27 @@ public class BaseClassTests extends ScriptTestCase { protected Map, List> scriptContexts() { Map, List> contexts = new HashMap<>(); - contexts.put(Gets.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(NoArgs.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(OneArg.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(PrimitiveArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(DefArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ManyArgs.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(VarArgs.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(DefaultMethods.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ReturnsVoid.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ReturnsPrimitiveBoolean.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ReturnsPrimitiveInt.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ReturnsPrimitiveFloat.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ReturnsPrimitiveDouble.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(NoArgsConstant.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(WrongArgsConstant.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(WrongLengthOfArgConstant.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(UnknownArgType.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(UnknownReturnType.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(UnknownArgTypeInArray.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(TwoExecuteMethods.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(Gets.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(NoArgs.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(OneArg.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ArrayArg.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(PrimitiveArrayArg.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(DefArrayArg.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ManyArgs.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(VarArgs.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(DefaultMethods.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ReturnsVoid.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveBoolean.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveInt.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveFloat.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveDouble.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(NoArgsConstant.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(WrongArgsConstant.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(WrongLengthOfArgConstant.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(UnknownArgType.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(UnknownReturnType.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(UnknownArgTypeInArray.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(TwoExecuteMethods.CONTEXT, PainlessPlugin.BASE_WHITELISTS); return contexts; } @@ -120,7 +120,7 @@ public void testNoArgs() throws Exception { scriptEngine.compile("testNoArgs3", "_score", NoArgs.CONTEXT, emptyMap())); assertEquals("cannot resolve symbol [_score]", e.getMessage()); - String debug = Debugger.toString(NoArgs.class, "int i = 0", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(NoArgs.class, "int i = 0", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); assertThat(debug, containsString("ACONST_NULL")); assertThat(debug, containsString("ARETURN")); } @@ -306,7 +306,7 @@ public void testReturnsVoid() throws Exception { scriptEngine.compile("testReturnsVoid1", "map.remove('a')", ReturnsVoid.CONTEXT, emptyMap()).newInstance().execute(map); assertEquals(emptyMap(), map); - String debug = Debugger.toString(ReturnsVoid.class, "int i = 0", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(ReturnsVoid.class, "int i = 0", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); // The important thing is that this contains the opcode for returning void assertThat(debug, containsString(" RETURN")); // We shouldn't contain any weird "default to null" logic @@ -347,7 +347,7 @@ public void testReturnsPrimitiveBoolean() throws Exception { scriptEngine.compile("testReturnsPrimitiveBoolean6", "true || false", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) .newInstance().execute()); - String debug = Debugger.toString(ReturnsPrimitiveBoolean.class, "false", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(ReturnsPrimitiveBoolean.class, "false", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); assertThat(debug, containsString("ICONST_0")); // The important thing here is that we have the bytecode for returning an integer instead of an object. booleans are integers. assertThat(debug, containsString("IRETURN")); @@ -415,7 +415,7 @@ public void testReturnsPrimitiveInt() throws Exception { assertEquals(2, scriptEngine.compile("testReturnsPrimitiveInt7", "1 + 1", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()); - String debug = Debugger.toString(ReturnsPrimitiveInt.class, "1", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(ReturnsPrimitiveInt.class, "1", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); assertThat(debug, containsString("ICONST_1")); // The important thing here is that we have the bytecode for returning an integer instead of an object assertThat(debug, containsString("IRETURN")); @@ -482,7 +482,7 @@ public void testReturnsPrimitiveFloat() throws Exception { "testReturnsPrimitiveFloat7", "def d = Double.valueOf(1.1); d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) .newInstance().execute()); - String debug = Debugger.toString(ReturnsPrimitiveFloat.class, "1f", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(ReturnsPrimitiveFloat.class, "1f", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); assertThat(debug, containsString("FCONST_1")); // The important thing here is that we have the bytecode for returning a float instead of an object assertThat(debug, containsString("FRETURN")); @@ -545,7 +545,7 @@ public void testReturnsPrimitiveDouble() throws Exception { scriptEngine.compile("testReturnsPrimitiveDouble12", "1.1 + 6.7", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) .newInstance().execute(), 0); - String debug = Debugger.toString(ReturnsPrimitiveDouble.class, "1", new CompilerSettings(), Whitelist.BASE_WHITELISTS); + String debug = Debugger.toString(ReturnsPrimitiveDouble.class, "1", new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); // The important thing here is that we have the bytecode for returning a double instead of an object assertThat(debug, containsString("DRETURN")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index da9c674e9bfe6..c971163034523 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -24,7 +24,7 @@ public class BasicStatementTests extends ScriptTestCase { protected Map, List> scriptContexts() { Map, List> contexts = super.scriptContexts(); - contexts.put(OneArg.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(OneArg.CONTEXT, PainlessPlugin.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java index b525035886332..005e084cd8c9f 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -108,8 +108,8 @@ public interface Factory { @Override protected Map, List> scriptContexts() { Map, List> contexts = super.scriptContexts(); - List whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); - whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test")); + List whitelists = new ArrayList<>(PainlessPlugin.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.test")); InstanceBindingTestClass instanceBindingTestClass = new InstanceBindingTestClass(1); WhitelistInstanceBinding getter = new WhitelistInstanceBinding("test", instanceBindingTestClass, diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index 7b699d61ea7b3..509b1a11dc359 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptException; import java.io.IOException; @@ -26,7 +25,7 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(PainlessPlugin.BASE_WHITELISTS); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 63c174ba1303a..16775df059cc9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -26,7 +26,7 @@ final class Debugger { /** compiles source to bytecode, and returns debugging output */ static String toString(final String source) { - return toString(PainlessTestScript.class, source, new CompilerSettings(), Whitelist.BASE_WHITELISTS); + return toString(PainlessTestScript.class, source, new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS); } /** compiles to bytecode, and returns debugging output */ @@ -69,7 +69,7 @@ private static String tree(Class iface, String source, CompilerSettings setti static void phases(final String source, UserTreeVisitor semanticPhaseVisitor, UserTreeVisitor irPhaseVisitor, IRTreeVisitor asmPhaseVisitor) { - tree(PainlessTestScript.class, source, new CompilerSettings(), Whitelist.BASE_WHITELISTS, semanticPhaseVisitor, irPhaseVisitor, + tree(PainlessTestScript.class, source, new CompilerSettings(), PainlessPlugin.BASE_WHITELISTS, semanticPhaseVisitor, irPhaseVisitor, asmPhaseVisitor); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index 6cc70e0743065..bbb9f380d1b0c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.symbol.FunctionTable; import org.elasticsearch.test.ESTestCase; @@ -23,7 +22,7 @@ import java.util.HashMap; public class DefBootstrapTests extends ESTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); + private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(PainlessPlugin.BASE_WHITELISTS); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefEncodingTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefEncodingTests.java new file mode 100644 index 0000000000000..e7d9fca729d59 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefEncodingTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.startsWith; + +public class DefEncodingTests extends ESTestCase { + + public void testParse() { + assertEquals(new Def.Encoding(true, false, "java.util.Comparator", "thenComparing", 1), + new Def.Encoding("Sfjava.util.Comparator.thenComparing,1")); + + assertEquals(new Def.Encoding(false, false, "ft0", "augmentInjectMultiTimesX", 1), + new Def.Encoding("Dfft0.augmentInjectMultiTimesX,1")); + + assertEquals(new Def.Encoding(false, false, "x", "concat", 1), + new Def.Encoding("Dfx.concat,1")); + + assertEquals(new Def.Encoding(true, false, "java.lang.StringBuilder", "setLength", 1), + new Def.Encoding("Sfjava.lang.StringBuilder.setLength,1")); + + assertEquals(new Def.Encoding(true, false, "org.elasticsearch.painless.FeatureTestObject", "overloadedStatic", 0), + new Def.Encoding("Sforg.elasticsearch.painless.FeatureTestObject.overloadedStatic,0")); + + assertEquals(new Def.Encoding(true, false, "this", "lambda$synthetic$0", 1), + new Def.Encoding("Sfthis.lambda$synthetic$0,1")); + + assertEquals(new Def.Encoding(true, true, "this", "lambda$synthetic$0", 2), + new Def.Encoding("Stthis.lambda$synthetic$0,2")); + + assertEquals(new Def.Encoding(true, true, "this", "mycompare", 0), + new Def.Encoding("Stthis.mycompare,0")); + } + + public void testValidate() { + IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, + () -> new Def.Encoding(false, false, "this", "myMethod", 0)); + + assertThat(expected.getMessage(), + startsWith("Def.Encoding must be static if symbol is 'this', encoding [Dfthis.myMethod,0]")); + + expected = expectThrows(IllegalArgumentException.class, + () -> new Def.Encoding(true, true, "org.elasticsearch.painless.FeatureTestObject", "overloadedStatic", 0)); + + assertThat(expected.getMessage(), + startsWith("Def.Encoding symbol must be 'this', not [org.elasticsearch.painless.FeatureTestObject] if needsInstance")); + + expected = expectThrows(IllegalArgumentException.class, + () -> new Def.Encoding(false, false, "x", "", 1)); + + assertThat(expected.getMessage(), + startsWith("methodName must be non-empty, encoding [Dfx.,1]")); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DocFieldsPhaseTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DocFieldsPhaseTests.java deleted file mode 100644 index 816330f69e754..0000000000000 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DocFieldsPhaseTests.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.painless; - -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; -import org.elasticsearch.painless.spi.Whitelist; -import org.elasticsearch.painless.symbol.ScriptScope; -import org.elasticsearch.script.ScriptContext; - -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -public class DocFieldsPhaseTests extends ScriptTestCase { - PainlessLookup lookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); - - ScriptScope compile(String script) { - Compiler compiler = new Compiler( - MockDocTestScript.CONTEXT.instanceClazz, - MockDocTestScript.CONTEXT.factoryClazz, - MockDocTestScript.CONTEXT.statefulFactoryClazz, lookup - ); - - // Create our loader (which loads compiled code with no permissions). - final Compiler.Loader loader = AccessController.doPrivileged(new PrivilegedAction<>() { - @Override - public Compiler.Loader run() { - return compiler.createLoader(getClass().getClassLoader()); - } - }); - - return compiler.compile(loader,"test", script, new CompilerSettings()); - } - - public abstract static class MockDocTestScript { - public static final String[] PARAMETERS = {"doc", "other"}; - public abstract void execute(Map doc, Map other); - - public interface Factory { - MockDocTestScript newInstance(); - } - - public static final ScriptContext CONTEXT = - new ScriptContext<>("test", MockDocTestScript.Factory.class); - } - - public void testArray() { - List expected = List.of("my_field"); - // Order shouldn't matter - assertEquals(expected, compile("def a = doc['my_field']; def b = other['foo']").docFields()); - assertEquals(expected, compile("def b = other['foo']; def a = doc['my_field']").docFields()); - - // Only collect array on doc - assertEquals(Collections.emptyList(), compile("def a = other['bar']").docFields()); - - // Only handle str const - assertEquals(Collections.emptyList(), compile("String f = 'bar'; def a = other[f]").docFields()); - } - - public void testDot() { - List expected = List.of("my_field"); - // Order shouldn't matter - assertEquals(expected, compile("def a = doc.my_field; def b = other.foo").docFields()); - assertEquals(expected, compile("def b = other.foo; def a = doc.my_field").docFields()); - - // Only collect doc dots - assertEquals(Collections.emptyList(), compile("def a = other.bar").docFields()); - } - - public void testGet() { - // Order shouldn't matter - List expected = List.of("my_field"); - assertEquals(expected, compile("def a = doc.get('my_field'); def b = other.get('foo')").docFields()); - assertEquals(expected, compile("def b = other.get('foo'); def a = doc.get('my_field')").docFields()); - - // Should work in Lambda - assertEquals(expected, compile("[].sort((a, b) -> doc.get('my_field')); [].sort((a, b) -> doc.equals('bar') ? 1:2)").docFields()); - - // Only collect get on doc - assertEquals(Collections.emptyList(), compile("def a = other.get('bar')").docFields()); - assertEquals(Collections.emptyList(), compile("def a = doc.equals('bar')").docFields()); - - // Only handle str const - assertEquals(Collections.emptyList(), compile("String f = 'bar'; def b = doc.get(f)").docFields()); - } -} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EmitTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EmitTests.java new file mode 100644 index 0000000000000..06f75d17ddca9 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EmitTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class EmitTests extends ScriptTestCase { + @Override + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + List whitelists = new ArrayList<>(PainlessPlugin.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.test")); + contexts.put(TestFieldScript.CONTEXT, whitelists); + return contexts; + } + + @Override + public TestFieldScript exec(String script) { + TestFieldScript.Factory factory = scriptEngine.compile(null, script, TestFieldScript.CONTEXT, new HashMap<>()); + TestFieldScript testScript = factory.newInstance(); + testScript.execute(); + return testScript; + } + + public void testEmit() { + TestFieldScript script = exec("emit(1L)"); + assertNotNull(script); + assertArrayEquals(new long[]{1L}, script.fetchValues()); + } + + public void testEmitFromUserFunction() { + TestFieldScript script = exec("void doEmit(long l) { emit(l) } doEmit(1L); doEmit(100L)"); + assertNotNull(script); + assertArrayEquals(new long[]{1L, 100L}, script.fetchValues()); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java index 03f37c7df1e0f..40ab60ffd1045 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java @@ -18,22 +18,19 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.equalTo; - public class FactoryTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { Map, List> contexts = super.scriptContexts(); - contexts.put(StatefulFactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(FactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(DeterministicFactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(EmptyTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(TemplateScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(VoidReturnTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(FactoryTestConverterScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(FactoryTestConverterScriptBadDef.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(DocFieldsTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(StatefulFactoryTestScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(FactoryTestScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(DeterministicFactoryTestScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(EmptyTestScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(TemplateScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(VoidReturnTestScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(FactoryTestConverterScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(FactoryTestConverterScriptBadDef.CONTEXT, PainlessPlugin.BASE_WHITELISTS); return contexts; } @@ -177,8 +174,6 @@ public void testFactory() { FactoryTestScript script = factory.newInstance(Collections.singletonMap("test", 2)); assertEquals(4, script.execute(2)); assertEquals(5, script.execute(3)); - // The factory interface doesn't define `docFields` so we don't generate it. - expectThrows(NoSuchMethodException.class, () -> factory.getClass().getMethod("docFields")); script = factory.newInstance(Collections.singletonMap("test", 3)); assertEquals(5, script.execute(2)); assertEquals(2, script.execute(-1)); @@ -485,31 +480,4 @@ public void testConverterFactoryBadDef() { assertNotNull(ise); assertEquals("convertFromDef must take a single Object as an argument, not [int]", ise.getMessage()); } - - public abstract static class DocFieldsTestScript { - public static final ScriptContext CONTEXT = new ScriptContext<>( - "test", - DocFieldsTestScript.Factory.class - ); - - public interface Factory { - DocFieldsTestScript newInstance(); - - List docFields(); - } - - public static final String[] PARAMETERS = new String[] {}; - - public abstract String execute(); - - public final Map getDoc() { - return Map.of("cat", "meow", "dog", "woof"); - } - } - - public void testDocFields() { - DocFieldsTestScript.Factory f = scriptEngine.compile("test", "doc['cat'] + doc['dog']", DocFieldsTestScript.CONTEXT, Map.of()); - assertThat(f.docFields(), equalTo(List.of("cat", "dog"))); - assertThat(f.newInstance().execute(), equalTo("meowwoof")); - } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java index b95a1ea35558b..d7bf274370a69 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java @@ -150,12 +150,12 @@ public void testCapturingMethodReferenceMultipleLambdasDefEverywhere() { "return test.twoFunctionsOfX(x::concat, y::substring);")); } - public void testOwnStaticMethodReference() { + public void testOwnMethodReference() { assertEquals(2, exec("int mycompare(int i, int j) { j - i } " + "List l = new ArrayList(); l.add(2); l.add(1); l.sort(this::mycompare); return l.get(0);")); } - public void testOwnStaticMethodReferenceDef() { + public void testOwnMethodReferenceDef() { assertEquals(2, exec("int mycompare(int i, int j) { j - i } " + "def l = new ArrayList(); l.add(2); l.add(1); l.sort(this::mycompare); return l.get(0);")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index 697e46be6c97d..33a66c7564df4 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -33,7 +33,7 @@ public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); Map, List> contexts = new HashMap<>(); - contexts.put(NumberSortScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(NumberSortScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); SearchExecutionContext searchExecutionContext = index.newSearchExecutionContext(0, 0, null, () -> 0, null, emptyMap()); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index f31a2ec7df839..4b36706388751 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -52,8 +52,8 @@ protected Settings scriptEngineSettings() { */ protected Map, List> scriptContexts() { Map, List> contexts = new HashMap<>(); - List whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); - whitelists.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.painless.test")); + List whitelists = new ArrayList<>(PainlessPlugin.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.test")); contexts.put(PainlessTestScript.CONTEXT, whitelists); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index ce200fae716cd..27f7884123147 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -32,10 +32,10 @@ public class ScriptedMetricAggContextsTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { Map, List> contexts = new HashMap<>(); - contexts.put(ScriptedMetricAggContexts.InitScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ScriptedMetricAggContexts.MapScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ScriptedMetricAggContexts.CombineScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(ScriptedMetricAggContexts.ReduceScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ScriptedMetricAggContexts.InitScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ScriptedMetricAggContexts.MapScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ScriptedMetricAggContexts.CombineScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(ScriptedMetricAggContexts.ReduceScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index 896d76628f6a6..c8b6fc9232d99 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -42,8 +42,8 @@ public class SimilarityScriptTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { Map, List> contexts = new HashMap<>(); - contexts.put(SimilarityScript.CONTEXT, Whitelist.BASE_WHITELISTS); - contexts.put(SimilarityWeightScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(SimilarityScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); + contexts.put(SimilarityWeightScript.CONTEXT, PainlessPlugin.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/TestFieldScript.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/TestFieldScript.java new file mode 100644 index 0000000000000..9982bddf46e1d --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/TestFieldScript.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ScriptContext; + +import java.util.ArrayList; +import java.util.List; + +public abstract class TestFieldScript { + private final List values = new ArrayList<>(); + + @SuppressWarnings("unused") + public static final String[] PARAMETERS = {}; + public interface Factory { + TestFieldScript newInstance(); + } + + public static final ScriptContext CONTEXT = + new ScriptContext<>("painless_test_fieldscript", TestFieldScript.Factory.class); + + public static class Emit { + private final TestFieldScript script; + + public Emit(TestFieldScript script) { + this.script = script; + } + + public void emit(long v) { + script.emit(v); + } + } + + public abstract void execute(); + + public final void emit(long v) { + values.add(v); + } + + public long[] fetchValues() { + return values.stream().mapToLong(i->i).toArray(); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ThisTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ThisTests.java new file mode 100644 index 0000000000000..a92cb308e6af6 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ThisTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ThisTests extends ScriptTestCase { + + public abstract static class ThisBaseScript { + + protected String baseString; + + public ThisBaseScript(String baseString) { + this.baseString = baseString; + } + + public String getBaseString() { + return baseString; + } + + public void setBaseString(String testString) { + this.baseString = testString; + } + + public int getBaseLength() { + return baseString.length(); + } + } + + public abstract static class ThisScript extends ThisBaseScript { + + protected String thisString; + + public ThisScript(String baseString, String thisString) { + super(baseString); + + this.thisString = thisString; + } + + public String thisString() { + return thisString; + } + + public void thisString(String testString) { + this.thisString = testString; + } + + public int thisLength() { + return thisString.length(); + } + + public abstract Object execute(); + + public interface Factory { + + ThisScript newInstance(String baseString, String testString); + } + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = + new ScriptContext<>("this_test", ThisScript.Factory.class); + } + + @Override + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + List whitelists = new ArrayList<>(PainlessPlugin.BASE_WHITELISTS); + whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.this")); + contexts.put(ThisScript.CONTEXT, whitelists); + return contexts; + } + + public Object exec(String script, String baseString, String testString) { + ThisScript.Factory factory = scriptEngine.compile(null, script, ThisScript.CONTEXT, new HashMap<>()); + ThisScript testThisScript = factory.newInstance(baseString, testString); + return testThisScript.execute(); + } + + public void testThisMethods() { + assertEquals("basethis", exec("getBaseString() + thisString()", "base", "this")); + assertEquals(8, exec("getBaseLength() + thisLength()", "yyy", "xxxxx")); + + List result = new ArrayList<>(); + result.add("this"); + result.add("base"); + assertEquals(result, exec("List result = []; " + + "thisString('this');" + + "setBaseString('base');" + + "result.add(thisString()); " + + "result.add(getBaseString());" + + "result;", "", "")); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ToXContentTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ToXContentTests.java index e6a74a7cff64e..4ef3953ae8065 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ToXContentTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ToXContentTests.java @@ -25,7 +25,7 @@ public class ToXContentTests extends ScriptTestCase { public void testUserFunction() { Map func = getFunction("def twofive(int i) { return 25 + i; } int j = 23; twofive(j)", "twofive"); assertFalse((Boolean)func.get("isInternal")); - assertTrue((Boolean)func.get("isStatic")); + assertFalse((Boolean)func.get("isStatic")); assertEquals("SFunction", func.get("node")); assertEquals("def", func.get("returns")); assertEquals(List.of("int"), func.get("parameterTypes")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/UserFunctionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/UserFunctionTests.java index 6ac4ac1483c07..175fa03614d98 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/UserFunctionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/UserFunctionTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.painless; import java.util.List; +import java.util.Map; public class UserFunctionTests extends ScriptTestCase { public void testZeroArgumentUserFunction() { @@ -28,10 +29,126 @@ public void testUserFunctionDefCallRef() { "if (getSource().startsWith('sour')) { l.add(255); }\n" + "return l;"; assertEquals(List.of(1, 49, 100, 255), exec(source)); - assertBytecodeExists(source, "public static &getSource()Ljava/lang/String"); - assertBytecodeExists(source, "public static &getMulti()I"); - assertBytecodeExists(source, "INVOKESTATIC org/elasticsearch/painless/PainlessScript$Script.&getMulti ()I"); - assertBytecodeExists(source, "public static &myCompare(II)I"); - assertBytecodeExists(source, "INVOKESTATIC org/elasticsearch/painless/PainlessScript$Script.&myCompare (II)I"); + assertBytecodeExists(source, "public &getSource()Ljava/lang/String"); + assertBytecodeExists(source, "public &getMulti()I"); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&getMulti ()I"); + assertBytecodeExists(source, "public &myCompare(II)I"); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&myCompare (II)I"); + } + + public void testChainedUserMethods() { + String source = "int myCompare(int a, int b) { getMulti() * (a - b) }\n" + + "int getMulti() { -1 }\n" + + "List l = [1, 100, -100];\n" + + "l.sort(this::myCompare);\n" + + "l;\n"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + } + + + public void testChainedUserMethodsLambda() { + String source = "int myCompare(int a, int b) { getMulti() * (a - b) }\n" + + "int getMulti() { -1 }\n" + + "List l = [1, 100, -100];\n" + + "l.sort((a, b) -> myCompare(a, b));\n" + + "l;\n"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + } + + public void testChainedUserMethodsDef() { + String source = "int myCompare(int a, int b) { getMulti() * (a - b) }\n" + + "int getMulti() { -1 }\n" + + "def l = [1, 100, -100];\n" + + "l.sort(this::myCompare);\n" + + "l;\n"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + } + + + public void testChainedUserMethodsLambdaDef() { + String source = "int myCompare(int a, int b) { getMulti() * (a - b) }\n" + + "int getMulti() { -1 }\n" + + "def l = [1, 100, -100];\n" + + "l.sort((a, b) -> myCompare(a, b));\n" + + "l;\n"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + } + + public void testChainedUserMethodsLambdaCaptureDef() { + String source = "int myCompare(int a, int b, int x, int m) { getMulti(m) * (a - b + x) }\n" + + "int getMulti(int m) { -1 * m }\n" + + "def l = [1, 100, -100];\n" + + "int cx = 100;\n" + + "int cm = 1;\n" + + "l.sort((a, b) -> myCompare(a, b, cx, cm));\n" + + "l;\n"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + } + + public void testMethodReferenceInUserFunction() { + String source = "int myCompare(int a, int b, String s) { " + + " Map m = ['f': 5];" + + " a - b + m.computeIfAbsent(s, this::getLength) " + + "}\n" + + "int getLength(String s) { s.length() }\n" + + "def l = [1, 0, -2];\n" + + "String s = 'g';\n" + + "l.sort((a, b) -> myCompare(a, b, s));\n" + + "l;\n"; + assertEquals(List.of(-2, 1, 0), exec(source, Map.of("a", 1), false)); + } + + public void testUserFunctionVirtual() { + String source = "int myCompare(int x, int y) { return -1 * (x - y) }\n" + + "return myCompare(100, 90);"; + assertEquals(-10, exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&myCompare (II)I"); + } + + public void testUserFunctionRef() { + String source = "int myCompare(int x, int y) { return -1 * x - y }\n" + + "List l = [1, 100, -100];\n" + + "l.sort(this::myCompare);\n" + + "return l;"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "public &myCompare(II)I"); + } + + public void testUserFunctionRefEmpty() { + String source = "int myCompare(int x, int y) { return -1 * x - y }\n" + + "[].sort((a, b) -> myCompare(a, b));\n"; + assertNull(exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "public &myCompare(II)I"); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&myCompare (II)I"); + } + + public void testUserFunctionCallInLambda() { + String source = "int myCompare(int x, int y) { -1 * ( x - y ) }\n" + + "List l = [1, 100, -100];\n" + + "l.sort((a, b) -> myCompare(a, b));\n" + + "return l;"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "public &myCompare(II)I"); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&myCompare (II)I"); + } + + public void testUserFunctionLambdaCapture() { + String source = "int myCompare(Object o, int x, int y) { return o != null ? -1 * ( x - y ) : ( x - y ) }\n" + + "List l = [1, 100, -100];\n" + + "Object q = '';\n" + + "l.sort((a, b) -> myCompare(q, a, b));\n" + + "return l;"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "public &myCompare(Ljava/lang/Object;II)I"); + assertBytecodeExists(source, "INVOKEVIRTUAL org/elasticsearch/painless/PainlessScript$Script.&myCompare (Ljava/lang/Object;II)I"); + } + + public void testLambdaCapture() { + String source = "List l = [1, 100, -100];\n" + + "int q = -1;\n" + + "l.sort((a, b) -> q * ( a - b ));\n" + + "return l;"; + assertEquals(List.of(100, 1, -100), exec(source, Map.of("a", 1), false)); + assertBytecodeExists(source, "public static synthetic lambda$synthetic$0(ILjava/lang/Object;Ljava/lang/Object;)I"); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 1b528331ea22f..d0fb1d59431f8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -850,4 +850,11 @@ public void testInvalidNullSafeBehavior() { exec("def test = ['hostname': 'somehostname']; test?.hostname && params.host.hostname != ''")); expectScriptThrows(NullPointerException.class, () -> exec("params?.host?.hostname && params.host?.hostname != ''")); } + + public void testInstanceMethodNotFound() { + IllegalArgumentException iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("doesNotExist()")); + assertEquals(iae.getMessage(), "Unknown call [doesNotExist] with [0] arguments."); + iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("doesNotExist(1, 'string', false)")); + assertEquals(iae.getMessage(), "Unknown call [doesNotExist] with [3] arguments."); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteApiTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteApiTests.java index a91cd4858dff2..1ea75f511a1d1 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteApiTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteApiTests.java @@ -288,6 +288,20 @@ public void testKeywordFieldExecutionContext() throws IOException { assertEquals(Arrays.asList("test", "baz was not here", "Data", "-10", "20", "9"), response.getResult()); } + public void testCompositeExecutionContext() throws IOException { + ScriptService scriptService = getInstanceFromNode(ScriptService.class); + IndexService indexService = createIndex("index", Settings.EMPTY, "doc", "rank", "type=long", "text", "type=keyword"); + + Request.ContextSetup contextSetup = new Request.ContextSetup("index", new BytesArray("{}"), new MatchAllQueryBuilder()); + contextSetup.setXContentType(XContentType.JSON); + Request request = new Request(new Script(ScriptType.INLINE, "painless", + "emit(\"foo\", \"bar\"); emit(\"foo2\", 2);", emptyMap()), "composite_field", contextSetup); + Response response = innerShardOperation(request, scriptService, indexService); + assertEquals(Map.of( + "composite_field.foo", List.of("bar"), + "composite_field.foo2", List.of(2)), response.getResult()); + } + public void testContextWhitelists() throws IOException { ScriptService scriptService = getInstanceFromNode(ScriptService.class); // score diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.test similarity index 91% rename from modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test rename to modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.test index 28c032418f373..c6554db9169ac 100644 --- a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/spi/org.elasticsearch.painless.test +++ b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.test @@ -15,6 +15,13 @@ class org.elasticsearch.painless.api.Json { class org.elasticsearch.painless.BindingsTests$BindingsTestScript { } +# Runtime-field-like test objects +class org.elasticsearch.painless.TestFieldScript @no_import { +} +class org.elasticsearch.painless.TestFieldScript$Factory @no_import { +} + + class org.elasticsearch.painless.FeatureTestObject @no_import { int z () @@ -55,4 +62,5 @@ static_import { int classMul(int, int) from_class org.elasticsearch.painless.BindingsTests @compile_time_only int compileTimeBlowUp(int, int) from_class org.elasticsearch.painless.BindingsTests @compile_time_only List fancyConstant(String, String) from_class org.elasticsearch.painless.BindingsTests @compile_time_only + void emit(org.elasticsearch.painless.TestFieldScript, long) bound_to org.elasticsearch.painless.TestFieldScript$Emit } diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.this b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.this new file mode 100644 index 0000000000000..fb5eedf3388c9 --- /dev/null +++ b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.this @@ -0,0 +1,12 @@ +class org.elasticsearch.painless.ThisTests$ThisBaseScript @no_import { + String getBaseString(); + void setBaseString(String); + int getBaseLength(); +} + + +class org.elasticsearch.painless.ThisTests$ThisScript @no_import { + String thisString(); + void thisString(String); + int thisLength(); +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml index 000e1af694d7d..824036e193037 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/100_terms_agg.yml @@ -66,6 +66,24 @@ setup: - is_false: aggregations.str_terms.buckets.1.key_as_string - match: { aggregations.str_terms.buckets.1.doc_count: 1 } +--- +"String Value Script with doc notation (fields api)": + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "script": { "source": "return field('str').getValue('') + \"1\""} } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.str_terms.buckets: 2 } + - match: { aggregations.str_terms.buckets.0.key: "abc1" } + - is_false: aggregations.str_terms.buckets.0.key_as_string + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } + - match: { aggregations.str_terms.buckets.1.key: "bcd1" } + - is_false: aggregations.str_terms.buckets.1.key_as_string + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + --- "Long Value Script with doc notation": @@ -84,6 +102,24 @@ setup: - is_false: aggregations.long_terms.buckets.1.key_as_string - match: { aggregations.long_terms.buckets.1.doc_count: 1 } +--- +"Long Value Script with doc notation (fields api)": + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "long_terms" : { "terms" : { "field" : "number", "script": { "source": "return field('number').getValue(0L) + 1"} } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.long_terms.buckets: 2 } + - match: { aggregations.long_terms.buckets.0.key: 2.0 } + - is_false: aggregations.long_terms.buckets.0.key_as_string + - match: { aggregations.long_terms.buckets.0.doc_count: 2 } + - match: { aggregations.long_terms.buckets.1.key: 3.0 } + - is_false: aggregations.long_terms.buckets.1.key_as_string + - match: { aggregations.long_terms.buckets.1.doc_count: 1 } + --- "Double Value Script with doc notation": @@ -102,6 +138,24 @@ setup: - is_false: aggregations.double_terms.buckets.1.key_as_string - match: { aggregations.double_terms.buckets.1.doc_count: 1 } +--- +"Double Value Script with doc notation (fields api)": + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "double_terms" : { "terms" : { "field" : "double", "script": { "source": "return field('double').getValue(0.0) + 1"} } } } } + + - match: { hits.total: 3 } + + - length: { aggregations.double_terms.buckets: 2 } + - match: { aggregations.double_terms.buckets.0.key: 2.0 } + - is_false: aggregations.double_terms.buckets.0.key_as_string + - match: { aggregations.double_terms.buckets.0.doc_count: 2 } + - match: { aggregations.double_terms.buckets.1.key: 3.0 } + - is_false: aggregations.double_terms.buckets.1.key_as_string + - match: { aggregations.double_terms.buckets.1.doc_count: 1 } + --- "Bucket script with keys": diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml new file mode 100644 index 0000000000000..60cc31f623cfe --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml @@ -0,0 +1,85 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + double: + type: double + + - do: + cluster.health: + wait_for_status: green + + - do: + index: + index: test + id: 1 + body: + double: 1.0 + + - do: + index: + index: test + id: 2 + body: + double: 1.0 + + - do: + index: + index: test + id: 3 + body: + double: 2.0 + + - do: + indices.refresh: {} + +--- +"Scripted Metric Agg Total": + + - do: + search: + rest_total_hits_as_int: true + body: { + "size": 0, + "aggs": { + "total": { + "scripted_metric": { + "init_script": "state.transactions = []", + "map_script": "state.transactions.add(doc['double'].value)", + "combine_script": "double total = 0.0; for (t in state.transactions) { total += t } return total", + "reduce_script": "double total = 0; for (a in states) { total += a } return total" + } + } + } + } + + - match: { hits.total: 3 } + - match: { aggregations.total.value: 4.0 } + +--- +"Scripted Metric Agg Total (fields api)": + + - do: + search: + rest_total_hits_as_int: true + body: { + "size": 0, + "aggs": { + "total": { + "scripted_metric": { + "init_script": "state.transactions = []", + "map_script": "state.transactions.add(field('double').getValue(0.0))", + "combine_script": "double total = 0.0; for (t in state.transactions) { total += t } return total", + "reduce_script": "double total = 0; for (a in states) { total += a } return total" + } + } + } + } + + - match: { hits.total: 3 } + - match: { aggregations.total.value: 4.0 } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml index 3a3e5608e02bf..0ff7c38b77633 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml @@ -44,6 +44,21 @@ setup: - match: { hits.hits.0.fields.bar.0: "aaabbb"} +--- +"Scripted Field (fields api)": + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "field('foo').getValue('') + params.x;" + params: + x: "bbb" + + - match: { hits.hits.0.fields.bar.0: "aaabbb"} + --- "Scripted Field Doing Compare": - do: @@ -73,6 +88,35 @@ setup: - match: { hits.hits.0.fields.bar.0: false} +--- +"Scripted Field Doing Compare (fields api)": + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "boolean compare(Supplier s, def v) {return s.get() == v;} + compare(() -> { return field('foo').getValue('') }, params.x);" + params: + x: "aaa" + + - match: { hits.hits.0.fields.bar.0: true} + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "boolean compare(Supplier s, def v) {return s.get() == v;} + compare(() -> { return doc['foo'].value }, params.x);" + params: + x: "bbb" + + - match: { hits.hits.0.fields.bar.0: false} + --- "Scripted Field with a null safe dereference (non-null)": - do: @@ -116,6 +160,19 @@ setup: - match: { hits.hits.0.fields.bar.0: 7} +--- +"Access a date (fields api)": + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "field('date').getValue(new Date()).dayOfWeekEnum.value" + + - match: { hits.hits.0.fields.bar.0: 7} + --- "Access many dates": - do: @@ -134,6 +191,24 @@ setup: - match: { hits.hits.0.fields.bar.0: "7 3 3"} +--- +"Access many dates (fields api)": + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: > + StringBuilder b = new StringBuilder(); + for (def date : field('dates').getValues()) { + b.append(" ").append(date.getDayOfWeekEnum().value); + } + return b.toString().trim() + + - match: { hits.hits.0.fields.bar.0: "7 3 3"} + --- "Scripted Field with script error": - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml new file mode 100644 index 0000000000000..61749d8333cd4 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml @@ -0,0 +1,160 @@ +# Integration tests for sort script queries using Painless + +setup: +- skip: + version: " - 7.14.99" + reason: "sort script fields api was added in 7.15.0" + +--- +"sort script fields api": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + dval: + type: double + - do: + index: + index: test + id: d1 + body: {"dval": 10, "sval": "f"} + - do: + index: + index: test + id: d2 + body: {} + - do: + index: + index: test + id: d3 + body: {"dval": 5, "sval": "a"} + - do: + indices.refresh: {} + - do: + search: + rest_total_hits_as_int: true + index: test + body: + sort: + _script: + type: number + script: + source: "field('dval').getValue(3)" + - match: { hits.total: 3 } + - match: { hits.hits.0._id: d2 } + - match: { hits.hits.1._id: d3 } + - match: { hits.hits.2._id: d1 } + - do: + search: + rest_total_hits_as_int: true + index: test + body: + sort: + _script: + type: string + script: + source: "field('sval.keyword').getValue('g')" + - match: { hits.total: 3 } + - match: { hits.hits.0._id: d3 } + - match: { hits.hits.1._id: d1 } + - match: { hits.hits.2._id: d2 } + +--- +"script score fields api": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + dval: + type: double + - do: + index: + index: test + id: d1 + body: {"dval": 10} + - do: + index: + index: test + id: d2 + body: {} + - do: + index: + index: test + id: d3 + body: {"dval": 5} + - do: + indices.refresh: {} + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "field('dval').getValue(3)" + - match: { hits.total: 3 } + - match: { hits.hits.0._id: d1 } + - match: { hits.hits.1._id: d3 } + - match: { hits.hits.2._id: d2 } + +--- +"filter script fields api": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + dval: + type: double + - do: + index: + index: test + id: d1 + body: {"dval": 10, "sval": "f"} + - do: + index: + index: test + id: d2 + body: {} + - do: + index: + index: test + id: d3 + body: {"dval": 5, "sval": "a"} + - do: + indices.refresh: {} + - do: + search: + rest_total_hits_as_int: true + body: + query: + bool: + filter: + script: + script: "field('dval').getValue(6) > 6" + - match: { hits.total: 1 } + - match: { hits.hits.0._id: d1 } + - do: + search: + rest_total_hits_as_int: true + body: + query: + bool: + filter: + script: + script: "field('sval.keyword').getValue('b') == 'a'" + - match: { hits.total: 1 } + - match: { hits.hits.0._id: d3 } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml index 7c7a7390107ee..29d526a7c7187 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -182,6 +182,20 @@ setup: source: "doc['geo_point'].getDimensionalType()" - match: { hits.hits.0.fields.type.0: 0 } + - do: + search: + rest_total_hits_as_int: true + body: + script_fields: + width: + script: + source: "doc['geo_point'].getMercatorWidth()" + height: + script: + source: "doc['geo_point'].getMercatorHeight()" + - match: { hits.hits.0.fields.width.0: 0.0 } + - match: { hits.hits.0.fields.height.0: 0.0 } + --- "ip": - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml index bb01fc3eca154..abdf09fa76819 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml @@ -118,6 +118,39 @@ setup: index: "my-index" - match: { result: [ false, false, true, true ] } +--- +"Execute with boolean field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(false) < params.max_rank);" + params: + max_rank: 5.0 + context: "boolean_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ true ] } + + +--- +"Execute with boolean field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(false) < params.max_rank); emit(false); emit(false); emit(true);" + params: + max_rank: 5.0 + context: "boolean_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ false, false, true, true ] } + --- "Execute with date field context (single-value)": - do: @@ -147,6 +180,35 @@ setup: index: "my-index" - match: { result: [ "2015-01-01T12:10:30.000Z", "2010-11-30T13:14:35.000Z" ] } +--- +"Execute with date field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('date').getValue(0).toInstant().toEpochMilli())" + context: "date_field" + context_setup: + document: + date: "2015-01-01T12:10:30Z" + index: "my-index" + - match: { result: [ "2015-01-01T12:10:30.000Z" ] } + +--- +"Execute with date field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('date0').values[0].toInstant().toEpochMilli()); emit(field('date1').values[0].toInstant().toEpochMilli());" + context: "date_field" + context_setup: + document: + date0: "2015-01-01T12:10:30Z" + date1: "2010-11-30T13:14:35Z" + index: "my-index" + - match: { result: [ "2015-01-01T12:10:30.000Z", "2010-11-30T13:14:35.000Z" ] } + --- "Execute with double field context (single-value)": - do: @@ -179,6 +241,38 @@ setup: index: "my-index" - match: { result: [ 20.0, 400.0, 55.0 ] } +--- +"Execute with double field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(0.0) * params.max_rank)" + params: + max_rank: 5.0 + context: "double_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ 20.0 ] } + +--- +"Execute with double field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(0.0) * params.max_rank); emit(400.0); emit(55.0)" + params: + max_rank: 5.0 + context: "double_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ 20.0, 400.0, 55.0 ] } + --- "Execute with geo point field context (single-value)": - skip: @@ -219,6 +313,47 @@ setup: - close_to: { result.1.coordinates.1: { value: 41.0, error: 0.00001 } } - match: { result.1.type: "Point" } +--- +"Execute with geo point field context (single-value, fields api)": + - skip: + features: close_to + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('point').getValue(null).lat + 1.0, field('point').getValue(null).lon - 1.0)" + context: "geo_point_field" + context_setup: + document: + point: "30.0,40.0" + index: "my-index" + - close_to: { result.0.coordinates.0: { value: 39.0, error: 0.00001 } } + - close_to: { result.0.coordinates.1: { value: 31.0, error: 0.00001 } } + - match: { result.0.type: "Point" } + +--- +"Execute with geo point field context (multi-value, fields api)": + - skip: + features: close_to + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('p0').values[0].lat + 1.0, field('p0').values[0].lon - 1.0); + emit(field('p1').values[0].lat + 1.0, field('p1').values[0].lon - 1.0)" + context: "geo_point_field" + context_setup: + document: + p0: "30.0,40.0" + p1: "40.0,30.0" + index: "my-index" + - close_to: { result.0.coordinates.0: { value: 39.0, error: 0.00001 } } + - close_to: { result.0.coordinates.1: { value: 31.0, error: 0.00001 } } + - match: { result.0.type: "Point" } + - close_to: { result.1.coordinates.0: { value: 29.0, error: 0.00001 } } + - close_to: { result.1.coordinates.1: { value: 41.0, error: 0.00001 } } + - match: { result.1.type: "Point" } + --- "Execute with ip field context (single-value)": - do: @@ -247,6 +382,36 @@ setup: index: "my-index" - match: { result: [ "2001:db8::8a2e:370:7333", "192.168.1.254", "2001:db8::8a2e:370:7334" ] } +--- +"Execute with ip field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('ip').getValue('0.0.0.0'));" + context: "ip_field" + context_setup: + document: + ip: "192.168.1.254" + index: "my-index" + - match: { result: [ "192.168.1.254" ] } + +--- +"Execute with ip field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit('2001:0db8:0000:0000:0000:8a2e:0370:7333'); + emit(field('ip').getValue('0.0.0.0')); + emit('2001:db8::8a2e:370:7334')" + context: "ip_field" + context_setup: + document: + ip: "192.168.1.254" + index: "my-index" + - match: { result: [ "2001:db8::8a2e:370:7333", "192.168.1.254", "2001:db8::8a2e:370:7334" ] } + --- "Execute with long field context (single-value)": - do: @@ -279,6 +444,38 @@ setup: index: "my-index" - match: { result: [ 20, 35, 0, -90, 20 ] } +--- +"Execute with long field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(0L) * (long)params.max_rank)" + params: + max_rank: 5.0 + context: "long_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ 20 ] } + +--- +"Execute with long field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('rank').getValue(0L) * (long)params.max_rank); emit(35); emit(0); emit(-90); emit(20);" + params: + max_rank: 5.0 + context: "long_field" + context_setup: + document: + rank: 4 + index: "my-index" + - match: { result: [ 20, 35, 0, -90, 20 ] } + --- "Execute with keyword field context (single-value)": - do: @@ -308,6 +505,35 @@ setup: - match: { result.0: "my_keyword" } - match: { result.1: "my_keyword_test" } +--- +"Execute with keyword field context (single-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('keyword').getValue(''));" + context: "keyword_field" + context_setup: + document: + keyword: "my_keyword" + index: "my-index" + - match: { result.0: "my_keyword" } + +--- +"Execute with keyword field context (multi-value, fields api)": + - do: + scripts_painless_execute: + body: + script: + source: "emit(field('keyword').getValue('')); emit(field('keyword').getValue('') + '_test');" + context: "keyword_field" + context_setup: + document: + keyword: "my_keyword" + index: "my-index" + - match: { result.0: "my_keyword" } + - match: { result.1: "my_keyword_test" } + --- "Execute against an empty index with no mappings": - do: diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java index 592a2bbce7f76..fff7ac2a86ce5 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -317,7 +317,7 @@ public FieldMapper.Builder getMergeBuilder() { } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value = context.parser().textOrNull(); if (value == null) { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java index 8c27e3c4fecd6..f67cd6b80a175 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java @@ -134,7 +134,7 @@ public RankFeatureFieldType fieldType() { } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { float value; if (context.parser().currentToken() == Token.VALUE_NULL) { // skip diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java index 73cf6314827b8..c1f54235d0e96 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java @@ -116,7 +116,7 @@ public RankFeaturesFieldType fieldType() { } @Override - public void parse(ParseContext context) throws IOException { + public void parse(DocumentParserContext context) throws IOException { if (context.parser().currentToken() != Token.START_OBJECT) { throw new IllegalArgumentException("[rank_features] fields must be json objects, expected a START_OBJECT but got: " + @@ -148,7 +148,7 @@ public void parse(ParseContext context) throws IOException { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new AssertionError("parse is implemented directly"); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 37f349844b0c5..4b3a65d91fd60 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -311,7 +311,7 @@ public FieldMapper.Builder getMergeBuilder() { } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { XContentParser parser = context.parser(); Object value; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java index f53fcafe52461..450b98cc23173 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -416,7 +416,7 @@ FieldType getLuceneFieldType() { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new UnsupportedOperationException(); } @@ -455,7 +455,7 @@ public ShingleFieldType fieldType() { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new UnsupportedOperationException(); } @@ -570,7 +570,7 @@ public SearchAsYouTypeFieldMapper(String simpleName, } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value = context.parser().textOrNull(); if (value == null) { return; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java index 793d60ffdfb9f..8a1ee05873e18 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java @@ -77,7 +77,7 @@ static class TokenCountFieldType extends NumberFieldMapper.NumberFieldType { TokenCountFieldType(String name, boolean isSearchable, boolean isStored, boolean hasDocValues, Number nullValue, Map meta) { - super(name, NumberFieldMapper.NumberType.INTEGER, isSearchable, isStored, hasDocValues, false, nullValue, meta, null); + super(name, NumberFieldMapper.NumberType.INTEGER, isSearchable, isStored, hasDocValues, false, nullValue, meta, null, false); } @Override @@ -110,7 +110,7 @@ protected TokenCountFieldMapper(String simpleName, MappedFieldType defaultFieldT } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value = context.parser().textOrNull(); if (value == null && nullValue == null) { diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java index e7cd312457f4b..bb04cc1d3e200 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java @@ -38,7 +38,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { } @Override - protected void assertExistsQuery(MappedFieldType fieldType, Query query, ParseContext.Document fields) { + protected void assertExistsQuery(MappedFieldType fieldType, Query query, LuceneDocument fields) { assertThat(query, instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) query; assertEquals("_feature", termQuery.getTerm().field()); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 1af19b60d27e1..ae9a1e9941a0f 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -133,19 +133,19 @@ public TokenStreamComponents createComponents(String fieldName) { public void testParseNullValue() throws Exception { DocumentMapper mapper = createIndexWithTokenCountField(); - ParseContext.Document doc = parseDocument(mapper, createDocument(null)); + LuceneDocument doc = parseDocument(mapper, createDocument(null)); assertNull(doc.getField("test.tc")); } public void testParseEmptyValue() throws Exception { DocumentMapper mapper = createIndexWithTokenCountField(); - ParseContext.Document doc = parseDocument(mapper, createDocument("")); + LuceneDocument doc = parseDocument(mapper, createDocument("")); assertEquals(0, doc.getField("test.tc").numericValue()); } public void testParseNotNullValue() throws Exception { DocumentMapper mapper = createIndexWithTokenCountField(); - ParseContext.Document doc = parseDocument(mapper, createDocument("three tokens string")); + LuceneDocument doc = parseDocument(mapper, createDocument("three tokens string")); assertEquals(3, doc.getField("test.tc").numericValue()); } @@ -173,7 +173,7 @@ private SourceToParse createDocument(String fieldValue) throws Exception { return source(b -> b.field("test", fieldValue)); } - private ParseContext.Document parseDocument(DocumentMapper mapper, SourceToParse request) { + private LuceneDocument parseDocument(DocumentMapper mapper, SourceToParse request) { return mapper.parse(request) .docs().stream().findFirst().orElseThrow(() -> new IllegalStateException("Test object not parsed")); } diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index a892ac569a273..0b3099dc80a6e 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -19,11 +19,3 @@ restResources { include '_common', 'bulk', 'cluster', 'nodes', 'indices', 'index', 'search' } } - -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - '/20_parent_join/Test parent_id query', - '/20_parent_join/Test basic', - '/30_inner_hits/Test two sub-queries with only one having inner_hits' - ].join(',') -} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java index cd07c3fe99bcb..87635140f6f5d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java @@ -16,8 +16,8 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; @@ -94,11 +94,11 @@ protected ParentIdFieldMapper(String name, boolean eagerGlobalOrdinals) { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new UnsupportedOperationException("Cannot directly call parse() on a ParentIdFieldMapper"); } - public void indexValue(ParseContext context, String refId) { + public void indexValue(DocumentParserContext context, String refId) { BytesRef binaryValue = new BytesRef(refId); Field field = new Field(fieldType().name(), binaryValue, Defaults.FIELD_TYPE); context.doc().add(field); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index 2b99444203381..a2368c37357e5 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -20,11 +20,11 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; @@ -203,12 +203,12 @@ public Iterator iterator() { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new UnsupportedOperationException("parsing is implemented in parse(), this method should NEVER be called"); } @Override - public void parse(ParseContext context) throws IOException { + public void parse(DocumentParserContext context) throws IOException { context.path().add(simpleName()); XContentParser.Token token = context.parser().currentToken(); String name = null; diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index dfc32d8b195f4..81936f44a7ec6 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -24,3 +24,7 @@ restResources { include '_common', 'indices', 'index', 'search', 'msearch' } } + +tasks.named("transformV7RestTests").configure({ task -> + task.addAllowedWarningRegex("\\[types removal\\].*") +}) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 75074482ef991..376949c91420c 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -36,33 +36,36 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -74,19 +77,30 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.core.RestApiVersion.equalTo; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; public class PercolateQueryBuilder extends AbstractQueryBuilder { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); + static final String DOCUMENT_TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " + + "The [document_type] should no longer be specified."; + static final String TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " + + "The [type] of the indexed document should no longer be specified."; + + public static final String NAME = "percolate"; static final ParseField DOCUMENT_FIELD = new ParseField("document"); static final ParseField DOCUMENTS_FIELD = new ParseField("documents"); private static final ParseField NAME_FIELD = new ParseField("name"); private static final ParseField QUERY_FIELD = new ParseField("field"); + private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); + private static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type"); private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); private static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); private static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); @@ -287,6 +301,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep if (indexedDocumentIndex != null) { builder.field(INDEXED_DOCUMENT_FIELD_INDEX.getPreferredName(), indexedDocumentIndex); } + if (builder.getRestApiVersion() == RestApiVersion.V_7) { + builder.field(INDEXED_DOCUMENT_FIELD_TYPE.getPreferredName(), MapperService.SINGLE_MAPPING_NAME); + } if (indexedDocumentId != null) { builder.field(INDEXED_DOCUMENT_FIELD_ID.getPreferredName(), indexedDocumentId); } @@ -338,6 +355,14 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep DOCUMENTS_FIELD.getPreferredName(), INDEXED_DOCUMENT_FIELD_ID.getPreferredName()); PARSER.declareExclusiveFieldSet(DOCUMENT_FIELD.getPreferredName(), DOCUMENTS_FIELD.getPreferredName(), INDEXED_DOCUMENT_FIELD_ID.getPreferredName()); + PARSER.declareString(deprecateAndIgnoreType("percolate_with_type", TYPE_DEPRECATION_MESSAGE), + INDEXED_DOCUMENT_FIELD_TYPE.forRestApiVersion(equalTo(RestApiVersion.V_7))); + PARSER.declareString(deprecateAndIgnoreType("percolate_with_document_type", DOCUMENT_TYPE_DEPRECATION_MESSAGE), + DOCUMENT_TYPE_FIELD.forRestApiVersion(equalTo(RestApiVersion.V_7))); + } + + private static BiConsumer deprecateAndIgnoreType(String key, String message) { + return (target, type) -> deprecationLogger.compatibleApiWarning(key, message); } private static BytesReference parseDocument(XContentParser parser) throws IOException { @@ -509,7 +534,7 @@ static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection

iterable = () -> docs.stream() + Iterable iterable = () -> docs.stream() .map(ParsedDocument::docs) .flatMap(Collection::stream) .iterator(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 19e6a434e3404..3df7ee04f15ca 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.lucene.search.Queries; @@ -42,15 +41,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MappingParserContext; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.SourceValueFetcher; @@ -61,9 +63,9 @@ import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import java.io.ByteArrayOutputStream; @@ -173,7 +175,7 @@ static NumberFieldMapper createMinimumShouldMatchField(ContentPath contentPath) static class TypeParser implements Mapper.TypeParser { @Override - public Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { return new Builder(name, parserContext.searchExecutionContext(), getMapUnmappedFieldAsText(parserContext.getSettings())); } } @@ -331,7 +333,7 @@ Tuple, Map>> extractTermsAndRanges(IndexRead } @Override - public void parse(ParseContext context) throws IOException { + public void parse(DocumentParserContext context) throws IOException { SearchExecutionContext searchExecutionContext = this.searchExecutionContext.get(); if (context.doc().getField(queryBuilderField.name()) != null) { // If a percolator query has been defined in an array object then multiple percolator queries @@ -361,7 +363,7 @@ public void parse(ParseContext context) throws IOException { } static void createQueryBuilderField(Version indexVersion, BinaryFieldMapper qbField, - QueryBuilder queryBuilder, ParseContext context) throws IOException { + QueryBuilder queryBuilder, DocumentParserContext context) throws IOException { try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { try (OutputStreamStreamOutput out = new OutputStreamStreamOutput(stream)) { out.setVersion(indexVersion); @@ -379,8 +381,8 @@ static void createQueryBuilderField(Version indexVersion, BinaryFieldMapper qbFi INDEXED_KEYWORD.freeze(); } - void processQuery(Query query, ParseContext context) { - ParseContext.Document doc = context.doc(); + void processQuery(Query query, DocumentParserContext context) { + LuceneDocument doc = context.doc(); PercolatorFieldType pft = (PercolatorFieldType) this.fieldType(); QueryAnalyzer.Result result; result = QueryAnalyzer.analyze(query); @@ -450,7 +452,7 @@ public Iterator iterator() { } @Override - protected void parseCreateField(ParseContext context) { + protected void parseCreateField(DocumentParserContext context) { throw new UnsupportedOperationException("should not be invoked"); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 3b7995790dd19..7f9c0ef0e62ec 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -66,7 +66,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; -import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -75,12 +74,14 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentParserContext; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.TestDocumentParserContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -109,7 +110,6 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { private Directory directory; private IndexWriter indexWriter; - private DocumentMapper documentMapper; private DirectoryReader directoryReader; private IndexService indexService; private MapperService mapperService; @@ -146,7 +146,7 @@ public void init() throws Exception { .startObject("ip_field").field("type", "ip").endObject() .startObject("field").field("type", "keyword").endObject() .endObject().endObject().endObject()); - documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); String queryField = "query_field"; String percolatorMapper = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") @@ -230,7 +230,7 @@ public void testDuel() throws Exception { return new FunctionScoreQuery(innerQuery, minScore, 1f); }); - List documents = new ArrayList<>(); + List documents = new ArrayList<>(); for (Supplier queryFunction : queryFunctions) { Query query = queryFunction.get(); addQuery(query, documents); @@ -326,7 +326,7 @@ public void testDuel2() throws Exception { ranges.add(new int[]{15, 50}); SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - List documents = new ArrayList<>(); + List documents = new ArrayList<>(); { addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); } @@ -490,7 +490,7 @@ public void testDuelIdBased() throws Exception { queryFunctions.add((id) -> new MatchNoDocsQuery("no reason at all")); int numDocs = randomIntBetween(queryFunctions.size(), queryFunctions.size() * 3); - List documents = new ArrayList<>(); + List documents = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); Query query = queryFunctions.get(i % queryFunctions.size()).apply(id); @@ -520,7 +520,7 @@ public void testDuelIdBased() throws Exception { } public void testDuelSpecificQueries() throws Exception { - List documents = new ArrayList<>(); + List documents = new ArrayList<>(); BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(new Term[]{new Term("field", "quick"), new Term("field", "brown"), new Term("field", "fox")}, 1.0f); @@ -567,7 +567,7 @@ public void testDuelSpecificQueries() throws Exception { } public void testRangeQueries() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); addQuery(IntPoint.newRangeQuery("int_field", 0, 5), docs); addQuery(LongPoint.newRangeQuery("long_field", 5L, 10L), docs); addQuery(HalfFloatPoint.newRangeQuery("half_float_field", 10, 15), docs); @@ -634,7 +634,7 @@ public void testRangeQueries() throws Exception { } public void testDuelRangeQueries() throws Exception { - List documents = new ArrayList<>(); + List documents = new ArrayList<>(); int lowerInt = randomIntBetween(0, 256); int upperInt = lowerInt + randomIntBetween(0, 32); @@ -737,7 +737,7 @@ public void testDuelRangeQueries() throws Exception { } public void testPercolateMatchAll() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); addQuery(new MatchAllDocsQuery(), docs); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(new TermQuery(new Term("field", "value1")), Occur.MUST); @@ -783,7 +783,7 @@ public void testPercolateMatchAll() throws Exception { } public void testFunctionScoreQuery() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); addQuery(new FunctionScoreQuery(new TermQuery(new Term("field", "value")), null, 1f), docs); addQuery(new FunctionScoreQuery(new TermQuery(new Term("field", "value")), 10f, 1f), docs); addQuery(new FunctionScoreQuery(new MatchAllDocsQuery(), null, 1f), docs); @@ -808,7 +808,7 @@ public void testFunctionScoreQuery() throws Exception { } public void testPercolateSmallAndLargeDocument() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(new TermQuery(new Term("field", "value1")), Occur.MUST); builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST); @@ -868,10 +868,13 @@ public void testPercolateSmallAndLargeDocument() throws Exception { } // This will trigger using the TermsQuery instead of individual term query clauses in the CoveringQuery: + int origMaxClauseCount = BooleanQuery.getMaxClauseCount(); try (Directory directory = new ByteBuffersDirectory()) { + final int maxClauseCount = 100; + BooleanQuery.setMaxClauseCount(maxClauseCount); try (IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig())) { Document document = new Document(); - for (int i = 0; i < 1024; i++) { + for (int i = 0; i < maxClauseCount; i++) { int fieldNumber = 2 + i; document.add(new StringField("field", "value" + fieldNumber, Field.Store.NO)); } @@ -897,11 +900,13 @@ public void testPercolateSmallAndLargeDocument() throws Exception { assertEquals(1, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); } + } finally { + BooleanQuery.setMaxClauseCount(origMaxClauseCount); } } public void testDuplicatedClauses() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); BooleanQuery.Builder builder = new BooleanQuery.Builder(); BooleanQuery.Builder builder1 = new BooleanQuery.Builder(); @@ -950,7 +955,7 @@ public void testDuplicatedClauses() throws Exception { } public void testDuplicatedClauses2() throws Exception { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(3); @@ -1001,7 +1006,7 @@ public void testDuplicatedClauses2() throws Exception { public void testMsmAndRanges_disjunction() throws Exception { // Recreates a similar scenario that made testDuel() fail randomly: // https://github.com/elastic/elasticsearch/issues/29393 - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(2); @@ -1101,11 +1106,10 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd } } - private void addQuery(Query query, List docs) { - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext( - documentMapper.mappers(), indexService.getIndexSettings(), indexService.getIndexAnalyzers(), null, null, null); - fieldMapper.processQuery(query, parseContext); - ParseContext.Document queryDocument = parseContext.doc(); + private void addQuery(Query query, List docs) { + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(query, documentParserContext); + LuceneDocument queryDocument = documentParserContext.doc(); // Add to string representation of the query to make debugging easier: queryDocument.add(new StoredField("query_to_string", query.toString())); docs.add(queryDocument); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index c30ff710ee77d..22cdb91966264 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -22,7 +22,10 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilder; @@ -40,6 +43,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -185,10 +189,12 @@ public void testIndexedDocumentDoesNotExist() throws IOException { } @Override - protected Set getObjectsHoldingArbitraryContent() { + protected Map getObjectsHoldingArbitraryContent() { //document contains arbitrary content, no error expected when an object is added to it - return new HashSet<>(Arrays.asList(PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName(), - PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName())); + final Map objects = new HashMap<>(); + objects.put(PercolateQueryBuilder.DOCUMENT_FIELD.getPreferredName(), null); + objects.put(PercolateQueryBuilder.DOCUMENTS_FIELD.getPreferredName(), null); + return objects; } public void testRequiredParameters() { @@ -346,4 +352,29 @@ public void testDisallowExpensiveQueries() { assertEquals("[percolate] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testFromJsonWithDocumentType() throws IOException { + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + String queryAsString = "{\"percolate\" : { \"document\": {}, \"document_type\":\"" + docType + "\", \"field\":\"" + + queryField + "\"}}"; + XContentParser parser = createParserWithCompatibilityFor(JsonXContent.jsonXContent, queryAsString, RestApiVersion.V_7); + QueryBuilder queryBuilder = parseQuery(parser); + queryBuilder.toQuery(searchExecutionContext); + assertWarnings(PercolateQueryBuilder.DOCUMENT_TYPE_DEPRECATION_MESSAGE); + } + + public void testFromJsonWithType() throws IOException { + indexedDocumentIndex = randomAlphaOfLength(4); + indexedDocumentId = randomAlphaOfLength(4); + indexedDocumentVersion = Versions.MATCH_ANY; + documentSource = Collections.singletonList(randomSource(new HashSet<>())); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + + String queryAsString = "{\"percolate\" : { \"index\": \"" + indexedDocumentIndex + + "\", \"type\": \"_doc\", \"id\": \"" + indexedDocumentId + "\", \"field\":\"" + queryField + "\"}}"; + XContentParser parser = createParserWithCompatibilityFor(JsonXContent.jsonXContent, queryAsString, RestApiVersion.V_7); + QueryBuilder queryBuilder = parseQuery(parser); + rewriteAndFetch(queryBuilder, searchExecutionContext).toQuery(searchExecutionContext); + assertWarnings(PercolateQueryBuilder.TYPE_DEPRECATION_MESSAGE); + } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 5fb65cd2a1653..3b2210676cc27 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -46,13 +45,16 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentParserContext; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TestDocumentParserContext; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; @@ -170,10 +172,9 @@ public void testExtractTerms() throws Exception { DocumentMapper documentMapper = mapperService.documentMapper(); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), - mapperService.getIndexSettings(), null, null, null, null); - fieldMapper.processQuery(bq.build(), parseContext); - ParseContext.Document document = parseContext.doc(); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + LuceneDocument document = documentParserContext.doc(); PercolatorFieldMapper.PercolatorFieldType fieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); assertThat(document.getField(fieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_COMPLETE)); @@ -192,10 +193,9 @@ public void testExtractTerms() throws Exception { bq.add(termQuery1, Occur.MUST); bq.add(termQuery2, Occur.MUST); - parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), mapperService.getIndexSettings(), - null, null, null, null); - fieldMapper.processQuery(bq.build(), parseContext); - document = parseContext.doc(); + documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + document = documentParserContext.doc(); assertThat(document.getField(fieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_COMPLETE)); fields = new ArrayList<>(Arrays.asList(document.getFields(fieldType.queryTermsField.name()))); @@ -222,10 +222,9 @@ public void testExtractRanges() throws Exception { DocumentMapper documentMapper = mapperService.documentMapper(); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), - mapperService.getIndexSettings(), null, null, null, null); - fieldMapper.processQuery(bq.build(), parseContext); - ParseContext.Document document = parseContext.doc(); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + LuceneDocument document = documentParserContext.doc(); PercolatorFieldMapper.PercolatorFieldType fieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); assertThat(document.getField(fieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); @@ -248,10 +247,9 @@ public void testExtractRanges() throws Exception { .rangeQuery(15, 20, true, true, null, null, null, context); bq.add(rangeQuery2, Occur.MUST); - parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), mapperService.getIndexSettings(), - null, null, null, null); - fieldMapper.processQuery(bq.build(), parseContext); - document = parseContext.doc(); + documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + document = documentParserContext.doc(); assertThat(document.getField(fieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); fields = new ArrayList<>(Arrays.asList(document.getFields(fieldType.rangeField.name()))); @@ -272,10 +270,9 @@ public void testExtractTermsAndRanges_failed() throws Exception { TermRangeQuery query = new TermRangeQuery("field1", new BytesRef("a"), new BytesRef("z"), true, true); DocumentMapper documentMapper = mapperService.documentMapper(); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), - mapperService.getIndexSettings(), null, null, null, null); - fieldMapper.processQuery(query, parseContext); - ParseContext.Document document = parseContext.doc(); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(query, documentParserContext); + LuceneDocument document = documentParserContext.doc(); PercolatorFieldMapper.PercolatorFieldType fieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); assertThat(document.getFields().size(), equalTo(1)); @@ -287,10 +284,9 @@ public void testExtractTermsAndRanges_partial() throws Exception { PhraseQuery phraseQuery = new PhraseQuery("field", "term"); DocumentMapper documentMapper = mapperService.documentMapper(); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(documentMapper.mappers(), - mapperService.getIndexSettings(), null, null, null, null); - fieldMapper.processQuery(phraseQuery, parseContext); - ParseContext.Document document = parseContext.doc(); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(phraseQuery, documentParserContext); + LuceneDocument document = documentParserContext.doc(); PercolatorFieldMapper.PercolatorFieldType fieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); assertThat(document.getFields().size(), equalTo(3)); @@ -341,33 +337,40 @@ public void testExtractTermsAndRanges() throws Exception { public void testCreateCandidateQuery() throws Exception { - addQueryFieldMappings(); - - MemoryIndex memoryIndex = new MemoryIndex(false); - StringBuilder text = new StringBuilder(); - for (int i = 0; i < 1022; i++) { - text.append(i).append(' '); + int origMaxClauseCount = BooleanQuery.getMaxClauseCount(); + try { + final int maxClauseCount = 100; + BooleanQuery.setMaxClauseCount(maxClauseCount); + addQueryFieldMappings(); + + MemoryIndex memoryIndex = new MemoryIndex(false); + StringBuilder text = new StringBuilder(); + for (int i = 0; i < maxClauseCount - 2; i++) { + text.append(i).append(' '); + } + memoryIndex.addField("field1", text.toString(), new WhitespaceAnalyzer()); + memoryIndex.addField(new LongPoint("field2", 10L), new WhitespaceAnalyzer()); + IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); + + Tuple t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); + assertTrue(t.v2()); + assertEquals(2, t.v1().clauses().size()); + assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); + assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); + + // Now push it over the edge, so that it falls back using TermInSetQuery + memoryIndex.addField("field2", "value", new WhitespaceAnalyzer()); + indexReader = memoryIndex.createSearcher().getIndexReader(); + t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); + assertFalse(t.v2()); + assertEquals(3, t.v1().clauses().size()); + TermInSetQuery terms = (TermInSetQuery) t.v1().clauses().get(0).getQuery(); + assertEquals(maxClauseCount - 1, terms.getTermData().size()); + assertThat(t.v1().clauses().get(1).getQuery().toString(), containsString(fieldName + ".range_field: t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); - assertTrue(t.v2()); - assertEquals(2, t.v1().clauses().size()); - assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); - assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); - - // Now push it over the edge, so that it falls back using TermInSetQuery - memoryIndex.addField("field2", "value", new WhitespaceAnalyzer()); - indexReader = memoryIndex.createSearcher().getIndexReader(); - t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); - assertFalse(t.v2()); - assertEquals(3, t.v1().clauses().size()); - TermInSetQuery terms = (TermInSetQuery) t.v1().clauses().get(0).getQuery(); - assertEquals(1023, terms.getTermData().size()); - assertThat(t.v1().clauses().get(1).getQuery().toString(), containsString(fieldName + ".range_field: + task.addAllowedWarningRegex("\\[types removal\\].*") +}) + +def v7compatibilityNotSupportedTests = { + return [ + 'reindex/20_validation/reindex without source gives useful error message', // exception with a type. Not much benefit adding _doc there. + 'update_by_query/20_validation/update_by_query without source gives useful error message' // exception with a type. Not much benefit adding _doc there. + ] +} tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - /*type related failures */ - 'reindex/20_validation/reindex without source gives useful error message', - 'reindex/85_scripting/Reindex all docs with one doc deletion', - 'delete_by_query/10_basic/Response for version conflict (seq no powered)', - 'update_by_query/10_basic/Response for version conflict (seq no powered)', - 'update_by_query/20_validation/update_by_query without source gives useful error message' - ].join(',') + systemProperty 'tests.rest.blacklist', v7compatibilityNotSupportedTests().join(',') } tasks.named('splitPackagesAudit').configure { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index ba0f5c3cc7e94..32a9567204328 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -353,8 +353,9 @@ void prepareBulkRequest(long thisBatchStartTimeNS, ScrollConsumableHitsResponse * Send a bulk request, handling retries. */ void sendBulkRequest(BulkRequest request, Runnable onSuccess) { + final int requestSize = request.requests().size(); if (logger.isDebugEnabled()) { - logger.debug("[{}]: sending [{}] entry, [{}] bulk request", task.getId(), request.requests().size(), + logger.debug("[{}]: sending [{}] entry, [{}] bulk request", task.getId(), requestSize, new ByteSizeValue(request.estimatedSizeInBytes())); } if (task.isCancelled()) { @@ -365,6 +366,7 @@ void sendBulkRequest(BulkRequest request, Runnable onSuccess) { bulkRetry.withBackoff(bulkClient::bulk, request, new ActionListener() { @Override public void onResponse(BulkResponse response) { + logger.debug("[{}]: completed [{}] entry bulk request", task.getId(), requestSize); onBulkResponse(response, onSuccess); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexSslConfig.java index f6ceb039aae8c..71fef44de4666 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexSslConfig.java @@ -80,6 +80,10 @@ public static List> getSettings() { public ReindexSslConfig(Settings settings, Environment environment, ResourceWatcherService resourceWatcher) { final SslConfigurationLoader loader = new SslConfigurationLoader("reindex.ssl.") { + @Override + protected boolean hasSettings(String prefix) { + return settings.getAsSettings(prefix).isEmpty() == false; + } @Override protected String getSettingAsString(String key) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 7cc9b12e2ca15..0e4fd436f4307 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -277,7 +277,7 @@ public void testBulkResponseSetsLotsOfStatus() throws Exception { ShardId shardId = new ShardId(new Index("name", "uid"), 0); if (rarely()) { versionConflicts++; - responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), + responses[i] = BulkItemResponse.failure(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "id" + i, new VersionConflictEngineException(shardId, "id", "test"))); continue; @@ -307,7 +307,7 @@ public void testBulkResponseSetsLotsOfStatus() throws Exception { final int primaryTerm = randomIntBetween(1, 16); final IndexResponse response = new IndexResponse(shardId, "id" + i, seqNo, primaryTerm, randomInt(), createdResponse); - responses[i] = new BulkItemResponse(i, opType, response); + responses[i] = BulkItemResponse.success(i, opType, response); } assertExactlyOnce(onSuccess -> new DummyAsyncBulkByScrollAction().onBulkResponse(new BulkResponse(responses, 0), @@ -389,8 +389,10 @@ public void testSearchTimeoutsAbortRequest() throws Exception { public void testBulkFailuresAbortRequest() throws Exception { Failure failure = new Failure("index", "id", new RuntimeException("test")); DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction(); - BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] - {new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure)}, randomLong()); + BulkResponse bulkResponse = new BulkResponse( + new BulkItemResponse[] { BulkItemResponse.failure(0, DocWriteRequest.OpType.CREATE, failure) }, + randomLong() + ); action.onBulkResponse(bulkResponse, Assert::fail); BulkByScrollResponse response = listener.get(); assertThat(response.getBulkFailures(), contains(failure)); @@ -974,10 +976,10 @@ void doExecute(ActionType action, Request request, ActionListener caList = Collections.singletonList(ca.toString()); + final X509ExtendedTrustManager trustManager = new PemTrustConfig(caList, configPath).createTrustManager(); sslContext.init(new KeyManager[] { keyManager }, new TrustManager[] { trustManager }, null); return sslContext; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 524bc5617ba3b..7b9bf54dfef8f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -108,15 +108,16 @@ public void testInitialSearchParamsFields() { // Test stored_fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); - // V_5_0_0_alpha4 => current - remoteVersion = Version.fromId(between(5000004, Version.CURRENT.id)); + // V_5_0_0 (final) => current + int minStoredFieldsVersion = 5000099; + remoteVersion = Version.fromId(randomBoolean() ? minStoredFieldsVersion : between(minStoredFieldsVersion, Version.CURRENT.id)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("stored_fields", "_source,_id")); // Test fields for versions that support it searchRequest = new SearchRequest().source(new SearchSourceBuilder()); searchRequest.source().storedField("_source").storedField("_id"); // V_2_0_0 => V_5_0_0_alpha3 - remoteVersion = Version.fromId(between(2000099, 5000003)); + remoteVersion = Version.fromId(randomBoolean() ? minStoredFieldsVersion - 1 : between(2000099, minStoredFieldsVersion - 1)); assertThat(initialSearch(searchRequest, query, remoteVersion).getParameters(), hasEntry("fields", "_source,_id")); // Test extra fields for versions that need it diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index d003380519512..0245a3ea69b92 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -63,10 +63,3 @@ testClusters.all { }, PropertyNormalization.IGNORE_VALUE } -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'repository_url/10_basic/Get a non existing snapshot', - 'repository_url/10_basic/Restore with repository-url using http://', - 'repository_url/10_basic/Restore with repository-url using file://' - ].join(',') -} diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index f39c4ab1ee274..cc130ca913b42 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -28,7 +28,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class URLSnapshotRestoreIT extends ESIntegTestCase { @@ -77,7 +76,7 @@ public void testUrlRepository() throws Exception { .prepareGetSnapshots("test-repo") .setSnapshots("test-snap") .get() - .getSnapshots("test-repo") + .getSnapshots() .get(0) .state(); assertThat(state, equalTo(SnapshotState.SUCCESS)); @@ -105,8 +104,7 @@ public void testUrlRepository() throws Exception { logger.info("--> list available shapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); - assertThat(getSnapshotsResponse.getSnapshots("url-repo"), notNullValue()); - assertThat(getSnapshotsResponse.getSnapshots("url-repo").size(), equalTo(1)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> delete snapshot"); AcknowledgedResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get(); @@ -114,7 +112,6 @@ public void testUrlRepository() throws Exception { logger.info("--> list available shapshot again, no snapshots should be returned"); getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); - assertThat(getSnapshotsResponse.getSnapshots("url-repo"), notNullValue()); - assertThat(getSnapshotsResponse.getSnapshots("url-repo").size(), equalTo(0)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0)); } } diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index 0d8e6806a43e3..db0b92a894053 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -8,6 +8,7 @@ package org.elasticsearch.common.blobstore.url; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; @@ -20,6 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.URL; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -121,6 +123,14 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b throw new UnsupportedOperationException("URL repository doesn't support this operation"); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + throw new UnsupportedOperationException("URL repository doesn't support this operation"); + } + @Override public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/URLBlobContainerRetriesTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/URLBlobContainerRetriesTests.java index a08fd0eda619b..ad173b209b4f8 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/URLBlobContainerRetriesTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/URLBlobContainerRetriesTests.java @@ -24,7 +24,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.SocketTimeoutException; @@ -43,13 +42,13 @@ public static void setUpHttpClient() { } @AfterClass - public static void tearDownHttpClient() throws IOException { + public static void tearDownHttpClient() { factory.close(); } @Override - protected String downloadStorageEndpoint(String blob) { - return "/" + blob; + protected String downloadStorageEndpoint(BlobContainer container, String blob) { + return "/" + container.path().buildAsString() + blob; } @Override diff --git a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml index fb49f3da24af5..b932f0d53caad 100644 --- a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -112,9 +112,6 @@ teardown: --- "Restore with repository-url using http://": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Ensure that the URL repository is registered - do: @@ -129,9 +126,9 @@ teardown: repository: repository-url snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -177,9 +174,6 @@ teardown: --- "Restore with repository-url using file://": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Ensure that the URL repository is registered - do: @@ -194,9 +188,9 @@ teardown: repository: repository-file snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -242,18 +236,13 @@ teardown: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository-url snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/110_composite.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/110_composite.yml new file mode 100644 index 0000000000000..2c80545050bd8 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/110_composite.yml @@ -0,0 +1,104 @@ +--- +setup: + - do: + indices.create: + index: http_logs + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + http: + type: composite + script: + source: | + emit(grok('%{COMMONAPACHELOG}').extract(doc["message"].value)); + fields: + clientip: + type: ip + verb: + type: keyword + response: + type: long + properties: + timestamp: + type: date + message: + type: keyword + - do: + bulk: + index: http_logs + refresh: true + body: | + {"index":{}} + {"timestamp": "1998-04-30T14:30:17-05:00", "message" : "40.135.0.0 - - [30/Apr/1998:14:30:17 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736"} + {"index":{}} + {"timestamp": "1998-04-30T14:30:53-05:00", "message" : "232.0.0.0 - - [30/Apr/1998:14:30:53 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736"} + {"index":{}} + {"timestamp": "1998-04-30T14:31:12-05:00", "message" : "26.1.0.0 - - [30/Apr/1998:14:31:12 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736"} + {"index":{}} + {"timestamp": "1998-04-30T14:31:19-05:00", "message" : "247.37.0.0 - - [30/Apr/1998:14:31:19 -0500] \"GET /french/splash_inet.html HTTP/1.0\" 200 3781"} + {"index":{}} + {"timestamp": "1998-04-30T14:31:22-05:00", "message" : "247.37.0.0 - - [30/Apr/1998:14:31:22 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0"} + {"index":{}} + {"timestamp": "1998-04-30T14:31:27-05:00", "message" : "252.0.0.0 - - [30/Apr/1998:14:31:27 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736"} + {"index":{}} + {"timestamp": "1998-04-30T14:31:28-05:00", "message" : "not a valid apache log"} + +--- +fetch: + - do: + search: + index: http_logs + body: + sort: timestamp + fields: + - http.clientip + - http.verb + - http.response + - match: {hits.total.value: 7} + - match: {hits.hits.0.fields.http\.clientip: [40.135.0.0] } + - match: {hits.hits.0.fields.http\.verb: [GET] } + - match: {hits.hits.0.fields.http\.response: [200] } + - is_false: hits.hits.6.fields.http\.clientip + - is_false: hits.hits.6.fields.http\.verb + - is_false: hits.hits.6.fields.http\.response + +--- +query: + - do: + search: + index: http_logs + body: + query: + term: + http.verb: GET + - match: { hits.total.value: 6 } + + - do: + search: + index: http_logs + body: + query: + range: + http.clientip: + from: 232.0.0.0 + to: 253.0.0.0 + - match: { hits.total.value: 4 } + +--- +"terms agg": + - do: + search: + index: http_logs + body: + aggs: + response: + terms: + field: http.response + - match: {hits.total.value: 7} + - match: {aggregations.response.buckets.0.key: 200 } + - match: {aggregations.response.buckets.0.doc_count: 5 } + - match: {aggregations.response.buckets.1.key: 304 } + - match: {aggregations.response.buckets.1.doc_count: 1 } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5f91f2baba235..9cd1db75e86d0 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -49,31 +49,13 @@ tasks.named("dependencyLicenses").configure { mapping from: /netty-.*/, to: 'netty' } -tasks.named("test").configure { - /* - * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each - * other if we allow them to set the number of available processors as it's set-once in Netty. - */ - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} - -tasks.named("internalClusterTest").configure { - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} - -tasks.named("javaRestTest").configure { - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} - TaskProvider pooledTest = tasks.register("pooledTest", Test) { include '**/*Tests.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'es.use_unpooled_allocator', 'false' } TaskProvider pooledInternalClusterTest = tasks.register("pooledInternalClusterTest", Test) { include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'es.use_unpooled_allocator', 'false' SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) @@ -82,7 +64,6 @@ TaskProvider pooledInternalClusterTest = tasks.register("pooledInternalClu } TaskProvider pooledJavaRestTest = tasks.register("pooledJavaRestTest", RestIntegTestTask) { - systemProperty 'es.set.netty.runtime.available.processors', 'false' SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet javaRestTestSourceSet = sourceSets.getByName(JavaRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(javaRestTestSourceSet.getOutput().getClassesDirs()) @@ -101,6 +82,12 @@ tasks.named("check").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( // classes are missing + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.Encoder', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) 'com.google.protobuf.ExtensionRegistry', @@ -161,6 +148,7 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.MessageLiteOrBuilder', 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', + 'com.github.luben.zstd.Zstd', 'com.jcraft.jzlib.Deflater', 'com.jcraft.jzlib.Inflater', 'com.jcraft.jzlib.JZlib$WrapperType', @@ -173,11 +161,8 @@ tasks.named("thirdPartyAudit").configure { 'com.ning.compress.lzf.util.ChunkDecoderFactory', 'com.ning.compress.lzf.util.ChunkEncoderFactory', 'lzma.sdk.lzma.Encoder', - 'net.jpountz.lz4.LZ4Compressor', - 'net.jpountz.lz4.LZ4Factory', - 'net.jpountz.lz4.LZ4FastDecompressor', - 'net.jpountz.xxhash.XXHash32', - 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', 'io.netty.internal.tcnative.CertificateCallback', 'io.netty.internal.tcnative.CertificateVerifier', 'io.netty.internal.tcnative.SessionTicketKey', @@ -185,6 +170,7 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.internal.tcnative.SSL', 'io.netty.internal.tcnative.SSLSession', 'io.netty.internal.tcnative.SSLSessionCache', + 'io.netty.internal.tcnative.ResultCallback', 'org.eclipse.jetty.alpn.ALPN$ClientProvider', 'org.eclipse.jetty.alpn.ALPN$ServerProvider', 'org.eclipse.jetty.alpn.ALPN', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.63.Final.jar.sha1 deleted file mode 100644 index d472369d69bc0..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40028ce5ac7c43f1c9a1439f74637cad04013e23 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..973ba015d2079 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +8d4be9506ea5f54af58bcd596ba3fe2fc5036413 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.63.Final.jar.sha1 deleted file mode 100644 index 8bfbe331c55c9..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4d2fccea88c80e56d59ce1053c53df0f9f4f5db \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..ae8837c2664a8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +e7cfff848e6c1294645638d74fce6ad89cc6f3f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.63.Final.jar.sha1 deleted file mode 100644 index 0279e286e318d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8c9b159dcb76452dc98a370a5511ff993670419 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..74435145e041c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +15fff6bae9e4b09ba5d48a70bb88841c9fc22a32 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.63.Final.jar.sha1 deleted file mode 100644 index 54e103f1d8b5f..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1206b46384d4dcbecee2901f18ce65ecf02e8a4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..164add2d48e57 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +d1c4eda38f525a02fb1ea8d94a8d98dc2935fd02 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.63.Final.jar.sha1 deleted file mode 100644 index ae180d9ae4016..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879a43c2325b08e92e8967218b6ddb0ed4b7a0d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..657b3ad736c1e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +1e6ec9b58725a96b2bd0f173709b59c79175225c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.63.Final.jar.sha1 deleted file mode 100644 index eb6858e75cc21..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d07cd47c101dfa655d6d5cc304d523742fd78ca8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..4a085c20c9ec0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +2f018d8df6f533c3d75dc5fdb11071bc2e7b591b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.63.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.63.Final.jar.sha1 deleted file mode 100644 index c41cdc86c51c8..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09a8bbe1ba082c9434e6f524d3864a53f340f2df \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.66.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..c21ce614d86e9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +3511bc4e13198de644eefe4c8c758245145da128 \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index 1eae706ba9377..35d88842ec246 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -99,9 +99,8 @@ public void channelWritabilityChanged(ChannelHandlerContext ctx) { @Override public void flush(ChannelHandlerContext ctx) { assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); - Channel channel = ctx.channel(); - if (channel.isWritable() || channel.isActive() == false) { - doFlush(ctx); + if (doFlush(ctx) == false) { + ctx.flush(); } } @@ -113,16 +112,14 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { super.channelInactive(ctx); } - private void doFlush(ChannelHandlerContext ctx) { + private boolean doFlush(ChannelHandlerContext ctx) { assert ctx.executor().inEventLoop(); final Channel channel = ctx.channel(); if (channel.isActive() == false) { - if (currentWrite != null) { - currentWrite.promise.tryFailure(new ClosedChannelException()); - } failQueuedWrites(); - return; + return false; } + boolean needsFlush = true; while (channel.isWritable()) { if (currentWrite == null) { currentWrite = queuedWrites.poll(); @@ -131,11 +128,6 @@ private void doFlush(ChannelHandlerContext ctx) { break; } final WriteOperation write = currentWrite; - if (write.buf.readableBytes() == 0) { - write.promise.trySuccess(); - currentWrite = null; - continue; - } final int readableBytes = write.buf.readableBytes(); final int bufferSize = Math.min(readableBytes, 1 << 18); final int readerIndex = write.buf.readerIndex(); @@ -148,7 +140,8 @@ private void doFlush(ChannelHandlerContext ctx) { writeBuffer = write.buf; } final ChannelFuture writeFuture = ctx.write(writeBuffer); - if (sliced == false || write.buf.readableBytes() == 0) { + needsFlush = true; + if (sliced == false) { currentWrite = null; writeFuture.addListener(future -> { assert ctx.executor().inEventLoop(); @@ -166,18 +159,30 @@ private void doFlush(ChannelHandlerContext ctx) { } }); } - ctx.flush(); - if (channel.isActive() == false) { - failQueuedWrites(); - return; + if (channel.isWritable() == false) { + // try flushing to make channel writable again, loop will only continue if channel becomes writable again + ctx.flush(); + needsFlush = false; } } + if (needsFlush) { + ctx.flush(); + } + if (channel.isActive() == false) { + failQueuedWrites(); + } + return true; } private void failQueuedWrites() { + if (currentWrite != null) { + final WriteOperation current = currentWrite; + currentWrite = null; + current.failAsClosedChannel(); + } WriteOperation queuedWrite; while ((queuedWrite = queuedWrites.poll()) != null) { - queuedWrite.promise.tryFailure(new ClosedChannelException()); + queuedWrite.failAsClosedChannel(); } } @@ -191,5 +196,10 @@ private static final class WriteOperation { this.buf = buf; this.promise = promise; } + + void failAsClosedChannel() { + promise.tryFailure(new ClosedChannelException()); + buf.release(); + } } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index c4ab52601c25a..4139db762c6fb 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -87,6 +87,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add("traceparent", "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index f5607a39f5d7a..d33b3eab2e755 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.AbstractHttpServerTransportTestCase; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.CorsHandler; import org.elasticsearch.http.HttpServerTransport; @@ -55,7 +56,6 @@ import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -84,7 +84,7 @@ /** * Tests for the {@link Netty4HttpServerTransport} class. */ -public class Netty4HttpServerTransportTests extends ESTestCase { +public class Netty4HttpServerTransportTests extends AbstractHttpServerTransportTestCase { private NetworkService networkService; private ThreadPool threadPool; @@ -96,7 +96,7 @@ public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); threadPool = new TestThreadPool("test"); bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings = randomClusterSettings(); } @After @@ -360,7 +360,7 @@ public void dispatchBadRequest(final RestChannel channel, .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co").build(); try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, - xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + xContentRegistry(), dispatcher, randomClusterSettings(), new SharedGroupFactory(settings))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -423,7 +423,7 @@ public void dispatchBadRequest(final RestChannel channel, NioEventLoopGroup group = new NioEventLoopGroup(); try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, - xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + xContentRegistry(), dispatcher, randomClusterSettings(), new SharedGroupFactory(settings))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index b41cd5b93f923..72425fe1c1784 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -36,11 +36,15 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } +def v7compatibilityNotSupportedTests = { + return [ + //marked as not needing compatible api + 'analysis_icu/10_basic/Normalization with deprecated unicodeSetFilter' // Cleanup versioned deprecations in analysis #41560 + ] +} tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'analysis_icu/10_basic/Normalization with deprecated unicodeSetFilter' - ].join(',') + systemProperty 'tests.rest.blacklist', v7compatibilityNotSupportedTests().join(',') } tasks.named('splitPackagesAudit').configure { diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 1b024f3887111..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8280f1cccdbca043f45d9b8d002f0d9bc76cf0c \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..8862b76301215 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.9.0.jar.sha1 @@ -0,0 +1 @@ +390ebbb1cdb3ab1e8e5286ae4d77d336fd3895ba \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index 51bc1d5233e0d..b66e56d7e0d56 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -20,11 +20,11 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.analysis.IndexableBinaryStringTools; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -435,7 +435,7 @@ public FieldMapper.Builder getMergeBuilder() { } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value; XContentParser parser = context.parser(); if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { @@ -444,7 +444,12 @@ protected void parseCreateField(ParseContext context) throws IOException { value = parser.textOrNull(); } - if (value == null || value.length() > ignoreAbove) { + if (value == null) { + return; + } + + if (value.length() > ignoreAbove) { + context.addIgnoredField(name()); return; } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index a71ecf1b65aef..b4ddc8a2b9d79 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -267,10 +267,15 @@ public void testIgnoreAbove() throws IOException { ParsedDocument doc = mapper.parse(source(b -> b.field("field", "elk"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); + fields = doc.rootDoc().getFields("_ignored"); + assertEquals(0, fields.length); doc = mapper.parse(source(b -> b.field("field", "elasticsearch"))); fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); + fields = doc.rootDoc().getFields("_ignored"); + assertEquals(1, fields.length); + assertEquals("field", fields[0].stringValue()); } public void testUpdateIgnoreAbove() throws IOException { diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 4fb367a6032d0..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c7904158a18d2ffd5d19bdcca177dabb23cde108 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..1b60c193938a9 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.9.0.jar.sha1 @@ -0,0 +1 @@ +d7553997ad55671bb8c4c023f607f71463da4d0f \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 77911b66d03a9..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e9082976506bf9cae24c3f96fcf0559073086b9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..b9ea18c6de216 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.9.0.jar.sha1 @@ -0,0 +1 @@ +99ae12d053d27e2b8d78d084e1bf1c893b896554 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index d1fbb62a830b7..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee1efd5a2ddce917426013e60a5dec3723fde2ba \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..19d4411ac82f5 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.9.0.jar.sha1 @@ -0,0 +1 @@ +144cbfb8c5a66ae9ac7abddb8855974599d1ea67 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 59697d250b018..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01cf2b26a9896ae62b37c40866ae5cc8a36fec59 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..81834aaa9b9e6 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.9.0.jar.sha1 @@ -0,0 +1 @@ +35f24955ae3d079a262a3aa7ac2ca2449c6d10ee \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 0b969ccfeb362..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3695fb069b0062254ea37356396cd89c8b9b78d9 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..ff6375c1634ab --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.9.0.jar.sha1 @@ -0,0 +1 @@ +d2a6fb6327e54c4eccfaa0f8e74953cd69a31e07 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 237908311bbbc..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca46a2556e2cdb6f3d8eb3e8b4655ccfe45432fc \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..f901aa40fa5a2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.9.0.jar.sha1 @@ -0,0 +1 @@ +37fd6e23bbb1c146c729bb32b6c043ed0642a73e \ No newline at end of file diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index be31c77b92896..5d87ad59fef1f 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -6,8 +6,7 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.internal.test.RestIntegTestTask @@ -35,7 +34,7 @@ Map expansions = [ tasks.named("processYamlRestTestResources").configure { inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } // disable default yamlRestTest task, use spezialized ones below diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml index 682327b72dd9e..48afe1a334bf8 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: ${expected_nodes} + wait_for_nodes: @expected_nodes@ --- "All nodes are correctly discovered": @@ -12,4 +12,4 @@ setup: nodes.info: metric: [ transport ] - - match: { _nodes.total: ${expected_nodes} } + - match: { _nodes.total: @expected_nodes@ } diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 4b05eae7b8689..738929e6f2b26 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -17,8 +17,6 @@ dependencies { api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:1.3.9' - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" @@ -53,6 +51,55 @@ tasks.named("thirdPartyAudit").configure { 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', - 'org.apache.log.Logger' + 'org.apache.log.Logger', + 'org.apache.http.ConnectionReuseStrategy', + 'org.apache.http.Header', + 'org.apache.http.HttpEntity', + 'org.apache.http.HttpEntityEnclosingRequest', + 'org.apache.http.HttpHost', + 'org.apache.http.HttpRequest', + 'org.apache.http.HttpResponse', + 'org.apache.http.HttpVersion', + 'org.apache.http.RequestLine', + 'org.apache.http.StatusLine', + 'org.apache.http.client.AuthenticationHandler', + 'org.apache.http.client.HttpClient', + 'org.apache.http.client.HttpRequestRetryHandler', + 'org.apache.http.client.RedirectHandler', + 'org.apache.http.client.RequestDirector', + 'org.apache.http.client.UserTokenHandler', + 'org.apache.http.client.methods.HttpDelete', + 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', + 'org.apache.http.client.methods.HttpGet', + 'org.apache.http.client.methods.HttpHead', + 'org.apache.http.client.methods.HttpOptions', + 'org.apache.http.client.methods.HttpPost', + 'org.apache.http.client.methods.HttpPut', + 'org.apache.http.client.methods.HttpRequestBase', + 'org.apache.http.client.methods.HttpTrace', + 'org.apache.http.conn.ClientConnectionManager', + 'org.apache.http.conn.ConnectionKeepAliveStrategy', + 'org.apache.http.conn.params.ConnManagerParams', + 'org.apache.http.conn.params.ConnPerRouteBean', + 'org.apache.http.conn.params.ConnRouteParams', + 'org.apache.http.conn.routing.HttpRoutePlanner', + 'org.apache.http.conn.scheme.PlainSocketFactory', + 'org.apache.http.conn.scheme.Scheme', + 'org.apache.http.conn.scheme.SchemeRegistry', + 'org.apache.http.conn.ssl.SSLSocketFactory', + 'org.apache.http.conn.ssl.X509HostnameVerifier', + 'org.apache.http.entity.AbstractHttpEntity', + 'org.apache.http.impl.client.DefaultHttpClient', + 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', + 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', + 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', + 'org.apache.http.message.BasicHttpResponse', + 'org.apache.http.params.BasicHttpParams', + 'org.apache.http.params.HttpConnectionParams', + 'org.apache.http.params.HttpParams', + 'org.apache.http.params.HttpProtocolParams', + 'org.apache.http.protocol.HttpContext', + 'org.apache.http.protocol.HttpProcessor', + 'org.apache.http.protocol.HttpRequestExecutor' ) } diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 deleted file mode 100644 index b708efd0dd57f..0000000000000 --- a/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-LICENSE.txt b/plugins/discovery-gce/licenses/httpclient-LICENSE.txt deleted file mode 100644 index 32f01eda18fe9..0000000000000 --- a/plugins/discovery-gce/licenses/httpclient-LICENSE.txt +++ /dev/null @@ -1,558 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -========================================================================= - -This project includes Public Suffix List copied from - -licensed under the terms of the Mozilla Public License, v. 2.0 - -Full license text: - -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 deleted file mode 100644 index 3c046171b30da..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-LICENSE.txt b/plugins/discovery-gce/licenses/httpcore-LICENSE.txt deleted file mode 100644 index 72819a9f06f2a..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-LICENSE.txt +++ /dev/null @@ -1,241 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -========================================================================= - -This project contains annotations in the package org.apache.http.annotation -which are derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) -Full text: http://creativecommons.org/licenses/by/2.5/legalcode - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. - -1. Definitions - - "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. - "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. - "Licensor" means the individual or entity that offers the Work under the terms of this License. - "Original Author" means the individual or entity who created the Work. - "Work" means the copyrightable work of authorship offered under the terms of this License. - "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. - -2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: - - to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; - to create and reproduce Derivative Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; - to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. - - For the avoidance of doubt, where the work is a musical composition: - Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. - Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). - Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). - -The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. - -4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: - - You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. - If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. - Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. - -8. Miscellaneous - - Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. - Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. - If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. - This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/discovery-gce/licenses/httpcore-NOTICE.txt b/plugins/discovery-gce/licenses/httpcore-NOTICE.txt deleted file mode 100644 index c0be50a505ec1..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-NOTICE.txt +++ /dev/null @@ -1,8 +0,0 @@ -Apache HttpComponents Core -Copyright 2005-2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 061c755a885a0..86fbf30963461 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -7,7 +7,7 @@ */ -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture @@ -41,7 +41,7 @@ Map expansions = [ tasks.named("processYamlRestTestResources").configure { inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } tasks.named("yamlRestTest").configure { diff --git a/plugins/discovery-gce/qa/gce/src/yamlRestTest/resources/rest-api-spec/test/discovery_gce/10_basic.yml b/plugins/discovery-gce/qa/gce/src/yamlRestTest/resources/rest-api-spec/test/discovery_gce/10_basic.yml index 562d69a7a386c..62939470d686b 100644 --- a/plugins/discovery-gce/qa/gce/src/yamlRestTest/resources/rest-api-spec/test/discovery_gce/10_basic.yml +++ b/plugins/discovery-gce/qa/gce/src/yamlRestTest/resources/rest-api-spec/test/discovery_gce/10_basic.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: ${expected_nodes} + wait_for_nodes: @expected_nodes@ --- "All nodes are correctly discovered": @@ -12,4 +12,4 @@ setup: nodes.info: metric: [ transport ] - - match: { _nodes.total: ${expected_nodes} } + - match: { _nodes.total: @expected_nodes@ } diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index 2bead2971bf35..e3bbc6cb22b34 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -1,15 +1,13 @@ import org.elasticsearch.gradle.internal.info.BuildParams // Subprojects aren't published so do not assemble -gradle.projectsEvaluated { - subprojects { - project.tasks.matching { it.name.equals('assemble') }.configureEach { +subprojects { + project.tasks.matching { it.name.equals('assemble') }.configureEach { + enabled = false + } + if (BuildParams.inFipsJvm) { + project.tasks.configureEach { enabled = false } - if (BuildParams.inFipsJvm) { - project.tasks.configureEach { - enabled = false - } - } } } diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 7281c6b2be065..894f4ebe4bc54 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -8,12 +8,13 @@ package org.elasticsearch.example.expertscript; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.DocReader; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScoreScript.LeafFactory; import org.elasticsearch.script.ScriptContext; @@ -127,16 +128,17 @@ public boolean needs_score() { } @Override - public ScoreScript newInstance(LeafReaderContext context) + public ScoreScript newInstance(DocReader docReader) throws IOException { - PostingsEnum postings = context.reader().postings( - new Term(field, term)); + DocValuesDocReader dvReader = ((DocValuesDocReader) docReader); + PostingsEnum postings = dvReader.getLeafReaderContext() + .reader().postings(new Term(field, term)); if (postings == null) { /* * the field and/or term don't exist in this segment, * so always return 0 */ - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, docReader) { @Override public double execute( ExplanationHolder explanation @@ -145,7 +147,7 @@ public double execute( } }; } - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, docReader) { int currentDocid = -1; @Override public void setDocument(int docid) { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 03a5e5f717213..0f5f7a4031789 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -16,10 +16,10 @@ esplugin { } versions << [ - 'tika' : '1.24', - 'pdfbox': '2.0.19', + 'tika' : '1.27', + 'pdfbox': '2.0.24', 'poi' : '4.1.2', - 'mime4j': '0.8.3' + 'mime4j': '0.8.5' ] dependencies { diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 deleted file mode 100644 index 464a34dd97643..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1179b56c9919c1a8e20d3a528ee4c6cee19bcbe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 new file mode 100644 index 0000000000000..f73bbd03803c3 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.5.jar.sha1 @@ -0,0 +1 @@ +0fc7258f948358c8caace27b9b191437a50a7ecc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 deleted file mode 100644 index 4f98753aa0af4..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e80733714eb6a70895bfc74a9528c658504c2c83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 new file mode 100644 index 0000000000000..1625e7d33617e --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.5.jar.sha1 @@ -0,0 +1 @@ +6808f50c447fb033b334ca5ca25830647d85abe1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.19.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.19.jar.sha1 deleted file mode 100644 index ac83b56e463ad..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8b3873aacde51f1a3f4a052b236de828867d000 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.24.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.24.jar.sha1 new file mode 100644 index 0000000000000..1f6388867917a --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.24.jar.sha1 @@ -0,0 +1 @@ +df8ecb3006dfcd52355a5902096e5ec34f06112e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.19.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.19.jar.sha1 deleted file mode 100644 index 14757fabc207c..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dfcfc278b4b66e1a8b0e9e681b84ffe48da2c21 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.24.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.24.jar.sha1 new file mode 100644 index 0000000000000..2eb2e357cbf1c --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.24.jar.sha1 @@ -0,0 +1 @@ +cb562ee5f43e29415af4477e62fbe668ef88d18b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.24.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.24.jar.sha1 deleted file mode 100644 index 6579753564ddd..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-1.24.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed2e85f034743ea8e7ff25ab594caf361c408a44 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.27.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.27.jar.sha1 new file mode 100644 index 0000000000000..2cd08a1849a31 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-1.27.jar.sha1 @@ -0,0 +1 @@ +079ad0f72558b8fbce947147959e2faff8b7b70a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.24.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.24.jar.sha1 deleted file mode 100644 index 6ce939379f830..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-1.24.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -282c0ecd31cb235f7d96bef18b8cdcb56573a195 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.27.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.27.jar.sha1 new file mode 100644 index 0000000000000..218f8a037e7c3 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-1.27.jar.sha1 @@ -0,0 +1 @@ +269e021ed326239fd8f62e3ff85f4b4e8dbd6ec9 \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 3bb3ed7816728..da05111e20fcc 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -25,8 +25,8 @@ import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TextParams; import org.elasticsearch.index.mapper.TextSearchInfo; @@ -516,7 +516,7 @@ protected AnnotatedTextFieldMapper(String simpleName, FieldType fieldType, Annot } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value = context.parser().textOrNull(); if (value == null) { diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 00adef515b82a..5e9709d1ff4dc 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -19,9 +19,9 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; @@ -121,7 +121,7 @@ protected String contentType() { } @Override - protected void parseCreateField(ParseContext context) + protected void parseCreateField(DocumentParserContext context) throws IOException { final String value = context.parser().textOrNull(); if (value != null) { diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 2ea7a77f27572..f9334ba7c5a74 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -10,12 +10,12 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.index.mapper.DocValueFetcher; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; @@ -85,7 +85,7 @@ public boolean enabled() { } @Override - public void postParse(ParseContext context) throws IOException { + public void postParse(DocumentParserContext context) throws IOException { // we post parse it so we get the size stored, possibly compressed (source will be preParse) if (enabled.value() == false) { return; diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index d808b58b45c96..82459bd6f78ca 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -1,4 +1,5 @@ -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.PropertyNormalization import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin @@ -112,6 +113,13 @@ tasks.named("dependencyLicenses").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.Encoder', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // from reactory-netty metric collection 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', @@ -197,6 +205,8 @@ tasks.named("thirdPartyAudit").configure { 'com.jcraft.jzlib.JZlib$WrapperType', 'com.jcraft.jzlib.JZlib', + 'com.github.luben.zstd.Zstd', + // from io.netty.handler.codec.compression.LzfDecoder // from io.netty.handler.codec.compression.LzfEncoder (netty-codec) 'com.ning.compress.BufferRecycler', @@ -210,15 +220,6 @@ tasks.named("thirdPartyAudit").configure { // from io.netty.handler.codec.compression.LzmaFrameEncoder (netty-codec) 'lzma.sdk.lzma.Encoder', - // from io.netty.handler.codec.compression.Lz4FrameDecoder (netty-codec) - 'net.jpountz.lz4.LZ4Compressor', - 'net.jpountz.lz4.LZ4Factory', - 'net.jpountz.lz4.LZ4FastDecompressor', - - // from io.netty.handler.codec.compression.Lz4XXHash32 (netty-codec) - 'net.jpountz.xxhash.XXHash32', - 'net.jpountz.xxhash.XXHashFactory', - // from io.netty.handler.ssl.JettyAlpnSslEngin (netty-handler optional dependency) 'org.eclipse.jetty.alpn.ALPN$ClientProvider', 'org.eclipse.jetty.alpn.ALPN$ServerProvider', @@ -231,6 +232,8 @@ tasks.named("thirdPartyAudit").configure { 'org.conscrypt.HandshakeListener', // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.Library', 'io.netty.internal.tcnative.SSL', @@ -238,6 +241,7 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.internal.tcnative.SSLPrivateKeyMethod', 'io.netty.internal.tcnative.CertificateCallback', 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', 'io.netty.internal.tcnative.SessionTicketKey', 'io.netty.internal.tcnative.SniHostNameMatcher', 'io.netty.internal.tcnative.SSLSession', @@ -333,14 +337,14 @@ if (!azureAccount && !azureKey && !azureContainer && !azureBasePath && !azureSas testFixtures.useFixture ':test:fixtures:azure-fixture', 'azure-fixture' } -Map expansions = [ +Map expansions = [ 'container': azureContainer, 'base_path': azureBasePath + "_integration_tests" ] tasks.named("processYamlRestTestResources").configure { inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions, ReplaceTokens.class) } tasks.named("internalClusterTest").configure { @@ -359,7 +363,9 @@ testClusters.matching { it.name == "yamlRestTest" }.configureEach { if (useFixture) { setting 'azure.client.integration_test.endpoint_suffix', azureAddress String firstPartOfSeed = BuildParams.testSeed.tokenize(':').get(0) - setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString(), System.getProperty('ignore.tests.seed') == null ? DEFAULT : IGNORE_VALUE + + def ignoreTestSeed = providers.systemProperty('ignore.tests.seed').forUseAtConfigurationTime().isPresent() ? PropertyNormalization.IGNORE_VALUE : PropertyNormalization.DEFAULT + setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString(), ignoreTestSeed } } diff --git a/plugins/repository-azure/licenses/netty-buffer-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-buffer-4.1.63.Final.jar.sha1 deleted file mode 100644 index d472369d69bc0..0000000000000 --- a/plugins/repository-azure/licenses/netty-buffer-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40028ce5ac7c43f1c9a1439f74637cad04013e23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-buffer-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-buffer-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..973ba015d2079 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-buffer-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +8d4be9506ea5f54af58bcd596ba3fe2fc5036413 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-4.1.63.Final.jar.sha1 deleted file mode 100644 index 8bfbe331c55c9..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4d2fccea88c80e56d59ce1053c53df0f9f4f5db \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..ae8837c2664a8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +e7cfff848e6c1294645638d74fce6ad89cc6f3f3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http-4.1.63.Final.jar.sha1 deleted file mode 100644 index 0279e286e318d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8c9b159dcb76452dc98a370a5511ff993670419 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..74435145e041c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +15fff6bae9e4b09ba5d48a70bb88841c9fc22a32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.63.Final.jar.sha1 deleted file mode 100644 index ad44612f004a3..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -294e90696d8d6e20c889511d2484b37158cb9caa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..3b563c112dcc5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +83f51766236096bd6d493a9f858711fd7974268e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.63.Final.jar.sha1 deleted file mode 100644 index 8a7f988798303..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97707b764c9287836dcf626dd03c81f3bbfc86c6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..8337fe0cd47cb --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +8f728166fab4e808c2154a44aa0297d06c2b807a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-common-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-common-4.1.63.Final.jar.sha1 deleted file mode 100644 index 54e103f1d8b5f..0000000000000 --- a/plugins/repository-azure/licenses/netty-common-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1206b46384d4dcbecee2901f18ce65ecf02e8a4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-common-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-common-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..164add2d48e57 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-common-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +d1c4eda38f525a02fb1ea8d94a8d98dc2935fd02 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-4.1.63.Final.jar.sha1 deleted file mode 100644 index ae180d9ae4016..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879a43c2325b08e92e8967218b6ddb0ed4b7a0d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..657b3ad736c1e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +1e6ec9b58725a96b2bd0f173709b59c79175225c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.63.Final.jar.sha1 deleted file mode 100644 index bede283973185..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8cbdc537d75f219c04a057b984b2f0b55c1dbff \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..4e6c1b41553cd --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +1ad345d6e4342ff31f4387ec8b4c52593c490c37 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-4.1.63.Final.jar.sha1 deleted file mode 100644 index eb6858e75cc21..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d07cd47c101dfa655d6d5cc304d523742fd78ca8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..4a085c20c9ec0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +2f018d8df6f533c3d75dc5fdb11071bc2e7b591b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-4.1.63.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-4.1.63.Final.jar.sha1 deleted file mode 100644 index c41cdc86c51c8..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09a8bbe1ba082c9434e6f524d3864a53f340f2df \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-4.1.66.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..c21ce614d86e9 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +3511bc4e13198de644eefe4c8c758245145da128 \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 4ecf0012e77ff..d293ad7c6c2d9 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -7,14 +7,15 @@ */ package org.elasticsearch.repositories.azure; +import fixture.azure.AzureHttpHandler; + import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import fixture.azure.AzureHttpHandler; + import org.elasticsearch.common.Randomness; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -24,12 +25,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; @@ -40,6 +43,7 @@ import java.util.regex.Pattern; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") @@ -243,4 +247,12 @@ public void testDeleteBlobsIgnoringIfNotExists() throws Exception { assertThat(container.listBlobs(), is(anEmptyMap())); } } + + public void testNotFoundErrorMessageContainsFullKey() throws Exception { + try (BlobStore store = newBlobStore()) { + BlobContainer container = store.blobContainer(BlobPath.EMPTY.add("nested").add("dir")); + NoSuchFileException exception = expectThrows(NoSuchFileException.class, () -> container.readBlob("blob")); + assertThat(exception.getMessage(), containsString("nested/dir/blob] not found")); + } + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 6365b248470b2..7bd91c2daef84 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.util.Throwables; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; @@ -22,6 +23,7 @@ import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.file.NoSuchFileException; import java.util.Iterator; import java.util.Map; @@ -45,6 +47,7 @@ public boolean blobExists(String blobName) throws IOException { } private InputStream openInputStream(String blobName, long position, @Nullable Long length) throws IOException { + String blobKey = buildKey(blobName); logger.trace("readBlob({}) from position [{}] with length [{}]", blobName, position, length != null ? length : "unlimited"); if (blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY && blobExists(blobName) == false) { // On Azure, if the location path is a secondary location, and the blob does not @@ -53,18 +56,18 @@ private InputStream openInputStream(String blobName, long position, @Nullable Lo // before throwing a storage exception. This can cause long delays in retrieving // snapshots, so we first check if the blob exists before trying to open an input // stream to it. - throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + throw new NoSuchFileException("Blob [" + blobKey + "] not found"); } try { - return blobStore.getInputStream(buildKey(blobName), position, length); + return blobStore.getInputStream(blobKey, position, length); } catch (Exception e) { Throwable rootCause = Throwables.getRootCause(e); if (rootCause instanceof BlobStorageException) { if (((BlobStorageException) rootCause).getStatusCode() == 404) { - throw new NoSuchFileException("Blob [" + blobName + "] not found"); + throw new NoSuchFileException("Blob [" + blobKey + "] not found"); } } - throw new IOException("Unable to get input stream for blob [" + blobName + "]", e); + throw new IOException("Unable to get input stream for blob [" + blobKey + "]", e); } } @@ -99,6 +102,14 @@ public void writeBlob(String blobName, BytesReference bytes, boolean failIfAlrea blobStore.writeBlob(buildKey(blobName), bytes, failIfAlreadyExists); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + blobStore.writeBlob(buildKey(blobName), failIfAlreadyExists, writer); + } + @Override public DeleteResult delete() throws IOException { return blobStore.deleteBlobDirectory(keyPath); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index e7683fb959e25..35482bbe93fa0 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -33,6 +33,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -46,6 +48,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.azure.AzureRepository.Repository; +import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.core.scheduler.Schedulers; @@ -53,6 +56,7 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; @@ -82,6 +86,8 @@ public class AzureBlobStore implements BlobStore { private final AzureStorageService service; + private final BigArrays bigArrays; + private final String clientName; private final String container; private final LocationMode locationMode; @@ -90,10 +96,11 @@ public class AzureBlobStore implements BlobStore { private final Stats stats = new Stats(); private final BiConsumer statsConsumer; - public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service) { + public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service, BigArrays bigArrays) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; + this.bigArrays = bigArrays; // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); this.maxSinglePartUploadSize = Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.get(metadata.settings()); @@ -383,6 +390,49 @@ public void writeBlob(String blobName, BytesReference bytes, boolean failIfAlrea executeSingleUpload(blobName, byteBufferFlux, bytes.length(), failIfAlreadyExists); } + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + CheckedConsumer writer) throws IOException { + final BlockBlobAsyncClient blockBlobAsyncClient = asyncClient().getBlobContainerAsyncClient(container) + .getBlobAsyncClient(blobName).getBlockBlobAsyncClient(); + try (ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>(bigArrays, getUploadBlockSize()) { + + @Override + protected void flushBuffer() { + if (buffer.size() == 0) { + return; + } + final String blockId = makeMultipartBlockId(); + SocketAccess.doPrivilegedVoidException(() -> blockBlobAsyncClient.stageBlock( + blockId, + Flux.fromArray(BytesReference.toByteBuffers(buffer.bytes())), + buffer.size() + ).block()); + finishPart(blockId); + } + + @Override + protected void onCompletion() { + if (flushedBytes == 0L) { + writeBlob(blobName, buffer.bytes(), failIfAlreadyExists); + } else { + flushBuffer(); + SocketAccess.doPrivilegedVoidException( + () -> blockBlobAsyncClient.commitBlockList(parts, failIfAlreadyExists == false).block()); + } + } + + @Override + protected void onFailure() { + // Nothing to do here, already uploaded blocks will be GCed by Azure after a week. + // see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block#remarks + } + }) { + writer.accept(out); + out.markSuccess(); + } + } + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { assert inputStream.markSupported() : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; @@ -439,13 +489,11 @@ private void executeMultipartUpload(String blobName, InputStream inputStream, lo assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes"; final List blockIds = new ArrayList<>(nbParts); - final Base64.Encoder base64Encoder = Base64.getEncoder().withoutPadding(); - final Base64.Decoder base64UrlDecoder = Base64.getUrlDecoder(); for (int i = 0; i < nbParts; i++) { final long length = i < nbParts - 1 ? partSize : lastPartSize; Flux byteBufferFlux = convertStreamToByteBuffer(inputStream, length, DEFAULT_UPLOAD_BUFFERS_SIZE); - final String blockId = base64Encoder.encodeToString(base64UrlDecoder.decode(UUIDs.base64UUID())); + final String blockId = makeMultipartBlockId(); blockBlobAsyncClient.stageBlock(blockId, byteBufferFlux, length).block(); blockIds.add(blockId); } @@ -454,6 +502,13 @@ private void executeMultipartUpload(String blobName, InputStream inputStream, lo }); } + private static final Base64.Encoder base64Encoder = Base64.getEncoder().withoutPadding(); + private static final Base64.Decoder base64UrlDecoder = Base64.getUrlDecoder(); + + private String makeMultipartBlockId() { + return base64Encoder.encodeToString(base64UrlDecoder.decode(UUIDs.base64UUID())); + } + /** * Converts the provided input stream into a Flux of ByteBuffer. To avoid having large amounts of outstanding * memory this Flux reads the InputStream into ByteBuffers of {@code chunkSize} size. diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index d40f76b7bb138..777d0c14ac50a 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -124,7 +124,7 @@ protected BlobStore getBlobStore() { @Override protected AzureBlobStore createBlobStore() { - final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); + final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService, bigArrays); logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index 29dde60d590e7..0909fc7a5e237 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -15,6 +15,7 @@ import fixture.azure.AzureHttpHandler; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -58,6 +59,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -172,7 +174,7 @@ int getMaxReadRetries(String clientName) { .put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) .build()); - return new AzureBlobContainer(BlobPath.EMPTY, new AzureBlobStore(repositoryMetadata, service)); + return new AzureBlobContainer(BlobPath.EMPTY, new AzureBlobStore(repositoryMetadata, service, BigArrays.NON_RECYCLING_INSTANCE)); } public void testReadNonexistentBlobThrowsNoSuchFileException() { @@ -391,6 +393,82 @@ public void testWriteLargeBlob() throws Exception { assertThat(blocks.isEmpty(), is(true)); } + public void testWriteLargeBlobStreaming() throws Exception { + final int maxRetries = randomIntBetween(2, 5); + + final int blobSize = (int) ByteSizeUnit.MB.toBytes(10); + final byte[] data = randomBytes(blobSize); + + final int nbErrors = 2; // we want all requests to fail at least once + final AtomicInteger counterUploads = new AtomicInteger(0); + final AtomicLong bytesReceived = new AtomicLong(0L); + final CountDown countDownComplete = new CountDown(nbErrors); + + final Map blocks = new ConcurrentHashMap<>(); + httpServer.createContext("/account/container/write_large_blob_streaming", exchange -> { + + if ("PUT".equals(exchange.getRequestMethod())) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getRawQuery(), 0, params); + + final String blockId = params.get("blockid"); + assert Strings.hasText(blockId) == false || AzureFixtureHelper.assertValidBlockId(blockId); + + if (Strings.hasText(blockId) && (counterUploads.incrementAndGet() % 2 == 0)) { + final BytesReference blockData = Streams.readFully(exchange.getRequestBody()); + blocks.put(blockId, blockData); + bytesReceived.addAndGet(blockData.length()); + exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + exchange.close(); + return; + } + + final String complete = params.get("comp"); + if ("blocklist".equals(complete) && (countDownComplete.countDown())) { + final String blockList = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), UTF_8)); + final List blockUids = Arrays.stream(blockList.split("")) + .filter(line -> line.contains("")) + .map(line -> line.substring(0, line.indexOf(""))) + .collect(Collectors.toList()); + + final ByteArrayOutputStream blob = new ByteArrayOutputStream(); + for (String blockUid : blockUids) { + BytesReference block = blocks.remove(blockUid); + assert block != null; + block.writeTo(blob); + } + assertArrayEquals(data, blob.toByteArray()); + exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); + exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + exchange.close(); + return; + } + } + + if (randomBoolean()) { + Streams.readFully(exchange.getRequestBody()); + AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); + } + exchange.close(); + }); + + final BlobContainer blobContainer = createBlobContainer(maxRetries); + blobContainer.writeBlob("write_large_blob_streaming", false, randomBoolean(), out -> { + int outstanding = data.length; + while (outstanding > 0) { + if (randomBoolean()) { + int toWrite = Math.toIntExact(Math.min(randomIntBetween(64, data.length), outstanding)); + out.write(data, data.length - outstanding, toWrite); + outstanding -= toWrite; + } else { + out.write(data[data.length - outstanding]); + outstanding--; + } + } + }); + assertEquals(blobSize, bytesReceived.get()); + } + public void testRetryUntilFail() throws Exception { final int maxRetries = randomIntBetween(2, 5); final AtomicInteger requestsReceived = new AtomicInteger(0); diff --git a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 5ee9446e8501a..ffe3c4988f051 100644 --- a/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/plugins/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -9,9 +9,9 @@ setup: body: type: azure settings: - container: ${container} + container: @container@ client: "integration_test" - base_path: ${base_path} + base_path: @base_path@ # Remove the snapshots, if a previous test failed to delete them. This is # useful for third party tests that runs the test against a real external service. @@ -28,18 +28,15 @@ setup: --- "Snapshot/Restore with repository-azure": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository - - match: { repository.settings.container: ${container} } + - match: { repository.settings.container: @container@ } - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : "${base_path}" } + - match: { repository.settings.base_path : @base_path@ } # Index documents - do: @@ -130,9 +127,9 @@ setup: repository: repository snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state: SUCCESS } - - match: { responses.0.snapshots.1.state: SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -212,18 +209,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index b7b9d1b819124..b64d911d0663a 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,7 +1,4 @@ -import java.nio.file.Files -import java.security.KeyPair -import java.security.KeyPairGenerator -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin @@ -29,33 +26,35 @@ esplugin { } dependencies { - api 'com.google.cloud:google-cloud-storage:1.113.1' - api 'com.google.cloud:google-cloud-core:1.93.3' - runtimeOnly 'com.google.guava:guava:26.0-jre' - api 'com.google.http-client:google-http-client:1.35.0' + api 'com.google.cloud:google-cloud-storage:1.117.1' + api 'com.google.cloud:google-cloud-core:1.95.4' + api 'com.google.cloud:google-cloud-core-http:1.95.4' + runtimeOnly 'com.google.guava:guava:30.1.1-jre' + api 'com.google.guava:failureaccess:1.0.1' api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:1.54.0' - api 'org.threeten:threetenbp:1.4.4' - api 'com.google.protobuf:protobuf-java-util:3.11.3' - api 'com.google.protobuf:protobuf-java:3.11.3' - api 'com.google.code.gson:gson:2.7' - api 'com.google.api.grpc:proto-google-common-protos:1.16.0' - api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' - api 'com.google.cloud:google-cloud-core-http:1.93.3' - api 'com.google.auth:google-auth-library-credentials:0.20.0' - api 'com.google.auth:google-auth-library-oauth2-http:0.20.0' - api 'com.google.oauth-client:google-oauth-client:1.28.0' - api 'com.google.api-client:google-api-client:1.30.10' - api 'com.google.http-client:google-http-client-appengine:1.35.0' - api 'com.google.http-client:google-http-client-jackson2:1.35.0' - api 'com.google.api:gax-httpjson:0.62.0' - api 'io.grpc:grpc-context:1.29.0' - api 'io.opencensus:opencensus-api:0.18.0' - api 'io.opencensus:opencensus-contrib-http-util:0.18.0' - api 'com.google.apis:google-api-services-storage:v1-rev20200814-1.30.10' + api 'com.google.api:api-common:1.10.4' + api 'com.google.api:gax:1.66.0' + api 'org.threeten:threetenbp:1.5.1' + api 'com.google.protobuf:protobuf-java-util:3.17.3' + api 'com.google.protobuf:protobuf-java:3.17.3' + api 'com.google.code.gson:gson:2.8.7' + api 'com.google.api.grpc:proto-google-common-protos:2.3.2' + api 'com.google.api.grpc:proto-google-iam-v1:1.0.14' + api 'com.google.auth:google-auth-library-credentials:0.26.0' + api 'com.google.auth:google-auth-library-oauth2-http:0.26.0' + api 'com.google.oauth-client:google-oauth-client:1.31.5' + api 'com.google.api-client:google-api-client:1.32.1' + api 'com.google.http-client:google-http-client:1.39.2' + api 'com.google.http-client:google-http-client-gson:1.39.2' + api 'com.google.http-client:google-http-client-appengine:1.39.2' + api 'com.google.http-client:google-http-client-jackson2:1.39.2' + api 'com.google.api:gax-httpjson:0.83.0' + api 'io.grpc:grpc-context:1.39.0' + api 'io.opencensus:opencensus-api:0.28.0' + api 'io.opencensus:opencensus-contrib-http-util:0.28.0' + api 'com.google.apis:google-api-services-storage:v1-rev20210127-1.31.5' testImplementation project(':test:fixtures:gcs-fixture') } @@ -112,6 +111,7 @@ tasks.named("thirdPartyAudit").configure { ) ignoreMissingClasses( + 'com.google.api.client.http.apache.v2.ApacheHttpTransport', 'com.google.appengine.api.datastore.Blob', 'com.google.appengine.api.datastore.DatastoreService', 'com.google.appengine.api.datastore.DatastoreServiceFactory', @@ -161,6 +161,8 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.http.client.methods.HttpPut', 'org.apache.http.client.methods.HttpRequestBase', 'org.apache.http.client.methods.HttpTrace', + 'org.apache.http.config.Registry', + 'org.apache.http.config.RegistryBuilder', 'org.apache.http.config.SocketConfig', 'org.apache.http.config.SocketConfig$Builder', 'org.apache.http.conn.ClientConnectionManager', @@ -172,6 +174,7 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.http.conn.scheme.PlainSocketFactory', 'org.apache.http.conn.scheme.Scheme', 'org.apache.http.conn.scheme.SchemeRegistry', + 'org.apache.http.conn.socket.PlainConnectionSocketFactory', 'org.apache.http.conn.ssl.SSLConnectionSocketFactory', 'org.apache.http.conn.ssl.SSLSocketFactory', 'org.apache.http.conn.ssl.X509HostnameVerifier', @@ -259,7 +262,7 @@ Map expansions = [ tasks.named("processYamlRestTestResources").configure { inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions, ReplaceTokens.class) } tasks.named("internalClusterTest").configure { diff --git a/plugins/repository-gcs/licenses/api-common-1.10.4.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.10.4.jar.sha1 new file mode 100644 index 0000000000000..add76d71107e5 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.10.4.jar.sha1 @@ -0,0 +1 @@ +650904f378415673cfc581fe5720ea9026dfd62d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 deleted file mode 100644 index 7a1c114c6c0fc..0000000000000 --- a/plugins/repository-gcs/licenses/api-common-1.8.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e89befb19b08ad84b262b2f226ab79aefcaa9d7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/repository-gcs/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-LICENSE.txt b/plugins/repository-gcs/licenses/failureaccess-LICENSE.txt similarity index 99% rename from modules/ingest-geoip/licenses/geoip2-LICENSE.txt rename to plugins/repository-gcs/licenses/failureaccess-LICENSE.txt index 7a4a3ea2424c0..d645695673349 100644 --- a/modules/ingest-geoip/licenses/geoip2-LICENSE.txt +++ b/plugins/repository-gcs/licenses/failureaccess-LICENSE.txt @@ -199,4 +199,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/plugins/repository-gcs/licenses/failureaccess-NOTICE.txt b/plugins/repository-gcs/licenses/failureaccess-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/gax-1.54.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.54.0.jar.sha1 deleted file mode 100644 index ed63c084f4edc..0000000000000 --- a/plugins/repository-gcs/licenses/gax-1.54.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f1668868b8b3fd5fc248d80c16dd9f09afc9180 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.66.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.66.0.jar.sha1 new file mode 100644 index 0000000000000..54ea55cb2d743 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.66.0.jar.sha1 @@ -0,0 +1 @@ +8320f0ee0dd4200ffd8abdf9f16a21f2b64ce9b2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 deleted file mode 100644 index 161ca85ccfc0c..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.62.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05a1a4736acd1c4f30304be953532be6aecdc2c9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.83.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.83.0.jar.sha1 new file mode 100644 index 0000000000000..96bbc4cd0c73f --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.83.0.jar.sha1 @@ -0,0 +1 @@ +b9d8cf27c3ffd409402bee4d8458972ad8638ebc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.30.10.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.30.10.jar.sha1 deleted file mode 100644 index 62c51887ee1ea..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.30.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2de98417199785982e1f037fb8b52613f57175ae \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-1.32.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.32.1.jar.sha1 new file mode 100644 index 0000000000000..2c7872e3226f7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-1.32.1.jar.sha1 @@ -0,0 +1 @@ +3e216f54e59e3c6f01bc52beeeed9010724e0edf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 deleted file mode 100644 index e399aa5865413..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe3b480958961fc7144da10ce3653065d5eb5490 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.31.5.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.31.5.jar.sha1 new file mode 100644 index 0000000000000..f0931d9855355 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20210127-1.31.5.jar.sha1 @@ -0,0 +1 @@ +721b04e4e41c7db323a9ed3fb240f58ee5adf3fe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 deleted file mode 100644 index 14cc742737eed..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87a91a373e64ba5c3cdf8cc5cf54b189dd1492f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.26.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..edcd514c2530c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.26.0.jar.sha1 @@ -0,0 +1 @@ +da0919aae28fd57fb98057e3125040e31b582f5f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 deleted file mode 100644 index 7911c34780cbe..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f33d4d6c91a68826816606a2208990eea93fcb2a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.26.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..314d60821b1af --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.26.0.jar.sha1 @@ -0,0 +1 @@ +ea3e6869046df1e99abbf89b3417aac12f596743 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.93.3.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.93.3.jar.sha1 deleted file mode 100644 index fbbcf804bc541..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.93.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fc017c0908456c0867d21c85d1b906662d4b5f1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.95.4.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.95.4.jar.sha1 new file mode 100644 index 0000000000000..cea76be0bc514 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.95.4.jar.sha1 @@ -0,0 +1 @@ +9120a006cd1fa2be027de951270d541bef8407f5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.93.3.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.93.3.jar.sha1 deleted file mode 100644 index 0518072447569..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.93.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2589aa6a4b6c49811c08ec2803c8e9c79c410bc5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.95.4.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.95.4.jar.sha1 new file mode 100644 index 0000000000000..6042eba8bec3c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.95.4.jar.sha1 @@ -0,0 +1 @@ +0d705e4f7bd51093c4084507054303c2bd348bc9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.113.1.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.113.1.jar.sha1 deleted file mode 100644 index 22fc078b36aa1..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.113.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd291ed57c1223bbb31363c4aa88c55faf0000c7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.117.1.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.117.1.jar.sha1 new file mode 100644 index 0000000000000..7fc4ce2d8c2d7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.117.1.jar.sha1 @@ -0,0 +1 @@ +59b9eceeda71799fe53d1391288b09c542110e02 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 deleted file mode 100644 index 802a6ab3a8d04..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2348dd57d5417c29388bd430f5055dca863c600 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 new file mode 100644 index 0000000000000..4870e9606ee20 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.39.2.jar.sha1 @@ -0,0 +1 @@ +5aafc3ff51693febf4214bb2a21baf577ce2fb25 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.35.0.jar.sha1 deleted file mode 100644 index 8bf444887d30f..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -394d1e1376538931ec3d4eeed654f9da911b95eb \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 new file mode 100644 index 0000000000000..924db225f1ffa --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.39.2.jar.sha1 @@ -0,0 +1 @@ +22ba6d92fd2e5c0c9db01848941e2e8bd42943ca \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 new file mode 100644 index 0000000000000..aec0283e3edd1 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.39.2.jar.sha1 @@ -0,0 +1 @@ +43c1d0500c31ee31ff5918ac4bbe95711cd744a9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 deleted file mode 100644 index 0342f57779315..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1c2a08792b935f3345590783ada872f4a0997f1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 new file mode 100644 index 0000000000000..170ec10eaf5d2 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.39.2.jar.sha1 @@ -0,0 +1 @@ +4037ca41fe43989a5609158d4ed7a3973de5df36 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 deleted file mode 100644 index 474df6e026570..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.28.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9e5d0c33b663d6475c96ce79b2949545a113af \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.31.5.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.31.5.jar.sha1 new file mode 100644 index 0000000000000..51cec0db3b2e6 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.31.5.jar.sha1 @@ -0,0 +1 @@ +a468e6b6d3d7de8310c3c9f4eb7cbb4eabde4404 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 deleted file mode 100644 index a549827edd283..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.29.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d8a441110f86f8927543dc3007639080441ea3c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 new file mode 100644 index 0000000000000..8734ad2f10b57 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.39.0.jar.sha1 @@ -0,0 +1 @@ +637f453f3654aa29bf085ae7ddc86f9f80c937dd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 deleted file mode 100644 index b3433f306eb3f..0000000000000 --- a/plugins/repository-gcs/licenses/gson-2.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -751f548c85fa49f330cecbb1875893f971b33c4e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.8.7.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.8.7.jar.sha1 new file mode 100644 index 0000000000000..68af718a50e3c --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.8.7.jar.sha1 @@ -0,0 +1 @@ +69d9503ea0a40ee16f0bcdac7e3eaf83d0fa914a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 b/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 deleted file mode 100644 index 63d05007650ce..0000000000000 --- a/plugins/repository-gcs/licenses/guava-26.0-jre.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a806eff209f36f635f943e16d97491f00f6bfab \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 b/plugins/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 new file mode 100644 index 0000000000000..39e641fc7834f --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-30.1.1-jre.jar.sha1 @@ -0,0 +1 @@ +87e0fd1df874ea3cbe577702fe6f17068b790fd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 deleted file mode 100644 index 8b95ab4e4c49c..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-api-0.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b89a8f8dfd1e1e0d68d83c82a855624814b19a6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 new file mode 100644 index 0000000000000..e7e2d46fd074c --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.28.0.jar.sha1 @@ -0,0 +1 @@ +0fc0d06a9d975a38c581dff59b99cf31db78bd99 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 deleted file mode 100644 index 1757e00591110..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76a37e4a931d5801a9e25b0c0353e5f37c4d1e8e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 new file mode 100644 index 0000000000000..164fa23ede758 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.28.0.jar.sha1 @@ -0,0 +1 @@ +f6cb276330197d51dd65327fc305a3df7e622705 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 deleted file mode 100644 index 7762b7a3ebdc3..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-1.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c5f022ea3b8e8df6a619c4cd8faf9af86022daa \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 new file mode 100644 index 0000000000000..789e467a3f74d --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.3.2.jar.sha1 @@ -0,0 +1 @@ +a35fd6ed973f752604fce97a21eb1e09d6afc467 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 deleted file mode 100644 index 2bfae3456d499..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-iam-v1-0.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea312c0250a5d0a7cdd1b20bc2c3259938b79855 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 new file mode 100644 index 0000000000000..c74b581d09d1b --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-iam-v1-1.0.14.jar.sha1 @@ -0,0 +1 @@ +6bc86a81d4bd99bfb54e9591b8de3ccd515fde78 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 deleted file mode 100644 index 371f423c3751e..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df12be70b968e32442821a2cfdc3cede5a42dec5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.17.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.17.3.jar.sha1 new file mode 100644 index 0000000000000..e8afe3160abf6 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.17.3.jar.sha1 @@ -0,0 +1 @@ +313b1861fa9312dd71e1033a77c2e64fb1a94dd3 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 deleted file mode 100644 index 8f8d3cf3c9e49..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-util-3.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd4ba2dfeb1b010eb20ca27e65fbfb74fbbdcdb9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 new file mode 100644 index 0000000000000..b130d7fb53c84 --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.17.3.jar.sha1 @@ -0,0 +1 @@ +4340f06a346f46eab1b38feb066e4a2d30aed3b7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.4.4.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.4.4.jar.sha1 deleted file mode 100644 index 0f7ee08a6d2fc..0000000000000 --- a/plugins/repository-gcs/licenses/threetenbp-1.4.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bbe3cc15e8ea16863435009af8ca40dd97770240 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 new file mode 100644 index 0000000000000..5640b4c080ff3 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.5.1.jar.sha1 @@ -0,0 +1 @@ +4307ad2fdd4ba8b5ecd3fdb88b932aa49fa25920 \ No newline at end of file diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index ad24865c214a7..58e9d12b48c75 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -226,7 +226,7 @@ public Map getRepositories(Environment env, NamedXCo @Override protected GoogleCloudStorageBlobStore createBlobStore() { return new GoogleCloudStorageBlobStore( - metadata.settings().get("bucket"), "test", metadata.name(), storageService, + metadata.settings().get("bucket"), "test", metadata.name(), storageService, bigArrays, randomIntBetween(1, 8) * 1024) { @Override long getLargeBlobThresholdInBytes() { diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 74c697a167189..ce2eae99b59f3 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -15,9 +15,11 @@ import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.util.Iterator; import java.util.Map; @@ -76,6 +78,14 @@ public void writeBlob(String blobName, BytesReference bytes, boolean failIfAlrea blobStore.writeBlob(buildKey(blobName), bytes, failIfAlreadyExists); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + blobStore.writeBlob(buildKey(blobName), failIfAlreadyExists, writer); + } + @Override public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException { writeBlob(blobName, bytes, failIfAlreadyExists); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index a49a17699ffe4..ea867a0ff1f9f 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -22,6 +22,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; @@ -37,8 +40,10 @@ import org.elasticsearch.common.unit.ByteSizeValue; import java.io.ByteArrayInputStream; +import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.WritableByteChannel; @@ -91,16 +96,19 @@ class GoogleCloudStorageBlobStore implements BlobStore { private final GoogleCloudStorageService storageService; private final GoogleCloudStorageOperationsStats stats; private final int bufferSize; + private final BigArrays bigArrays; GoogleCloudStorageBlobStore(String bucketName, String clientName, String repositoryName, GoogleCloudStorageService storageService, + BigArrays bigArrays, int bufferSize) { this.bucketName = bucketName; this.clientName = clientName; this.repositoryName = repositoryName; this.storageService = storageService; + this.bigArrays = bigArrays; this.stats = new GoogleCloudStorageOperationsStats(bucketName); this.bufferSize = bufferSize; } @@ -231,7 +239,12 @@ void writeBlob(String blobName, BytesReference bytes, boolean failIfAlreadyExist writeBlobResumable(BlobInfo.newBuilder(bucketName, blobName).setMd5(md5).build(), bytes.streamInput(), bytes.length(), failIfAlreadyExists); } else { - writeBlob(bytes.streamInput(), bytes.length(), failIfAlreadyExists, BlobInfo.newBuilder(bucketName, blobName).build()); + final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); + if (bytes.hasArray()) { + writeBlobMultipart(blobInfo, bytes.array(), bytes.arrayOffset(), bytes.length(), failIfAlreadyExists); + } else { + writeBlob(bytes.streamInput(), bytes.length(), failIfAlreadyExists, blobInfo); + } } } @@ -249,7 +262,9 @@ private void writeBlob(InputStream inputStream, long blobSize, boolean failIfAlr if (blobSize > getLargeBlobThresholdInBytes()) { writeBlobResumable(blobInfo, inputStream, blobSize, failIfAlreadyExists); } else { - writeBlobMultipart(blobInfo, inputStream, blobSize, failIfAlreadyExists); + final byte[] buffer = new byte[Math.toIntExact(blobSize)]; + Streams.readFully(inputStream, buffer); + writeBlobMultipart(blobInfo, buffer, 0, Math.toIntExact(blobSize), failIfAlreadyExists); } } @@ -265,6 +280,98 @@ long getLargeBlobThresholdInBytes() { {Storage.BlobWriteOption.doesNotExist(), Storage.BlobWriteOption.md5Match()}; private static final Storage.BlobWriteOption[] OVERWRITE_CHECK_MD5 = {Storage.BlobWriteOption.md5Match()}; + void writeBlob(String blobName, boolean failIfAlreadyExists, CheckedConsumer writer) throws IOException { + final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); + final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ? NO_OVERWRITE_NO_MD5 : OVERWRITE_NO_MD5; + + StorageException storageException = null; + + for (int retry = 0; retry < 3; ++retry) { + // we start out by buffering the write to a buffer, if it exceeds the large blob threshold we start a resumable upload, flush + // the buffer to it and keep writing to the resumable upload. If we never exceed the large blob threshold we just write the + // buffer via a standard blob write + try (ReleasableBytesStreamOutput buffer = new ReleasableBytesStreamOutput(bigArrays)) { + final AtomicReference channelRef = new AtomicReference<>(); + writer.accept(new OutputStream() { + + private OutputStream resumableStream; + + @Override + public void write(int b) throws IOException { + if (resumableStream != null) { + resumableStream.write(b); + } else { + if (buffer.size() + 1 > getLargeBlobThresholdInBytes()) { + initResumableStream(); + resumableStream.write(b); + } else { + buffer.write(b); + } + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + if (resumableStream != null) { + resumableStream.write(b, off, len); + } else { + if (buffer.size() + len > getLargeBlobThresholdInBytes()) { + initResumableStream(); + resumableStream.write(b, off, len); + } else { + buffer.write(b, off, len); + } + } + } + + private void initResumableStream() throws IOException { + final WriteChannel writeChannel = + SocketAccess.doPrivilegedIOException(() -> client().writer(blobInfo, writeOptions)); + channelRef.set(writeChannel); + resumableStream = new FilterOutputStream(Channels.newOutputStream(new WritableBlobChannel(writeChannel))) { + @Override + public void write(byte[] b, int off, int len) throws IOException { + int written = 0; + while (written < len) { + // at most write the default chunk size in one go to prevent allocating huge buffers in the SDK + // see com.google.cloud.BaseWriteChannel#DEFAULT_CHUNK_SIZE + final int toWrite = Math.min(len - written, 60 * 256 * 1024); + out.write(b, off + written, toWrite); + written += toWrite; + } + } + }; + buffer.bytes().writeTo(resumableStream); + buffer.close(); + } + }); + final WritableByteChannel writeChannel = channelRef.get(); + if (writeChannel != null) { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + stats.trackPutOperation(); + } else { + writeBlob(blobName, buffer.bytes(), failIfAlreadyExists); + } + return; + } catch (final StorageException se) { + final int errorCode = se.getCode(); + if (errorCode == HTTP_GONE) { + logger.warn(() -> new ParameterizedMessage("Retrying broken resumable upload session for blob {}", blobInfo), se); + storageException = ExceptionsHelper.useOrSuppress(storageException, se); + continue; + } else if (failIfAlreadyExists && errorCode == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + } + if (storageException != null) { + se.addSuppressed(storageException); + } + throw se; + } + } + assert storageException != null; + throw storageException; + } + /** * Uploads a blob using the "resumable upload" method (multiple requests, which * can be independently retried in case of failure, see @@ -297,24 +404,9 @@ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, long * It is not enough to wrap the call to Streams#copy, we have to wrap the privileged calls too; this is because Streams#copy * is in the stacktrace and is not granted the permissions needed to close and write the channel. */ - org.elasticsearch.core.internal.io.Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { - - @SuppressForbidden(reason = "channel is based on a socket") - @Override - public int write(final ByteBuffer src) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); - } - - @Override - public boolean isOpen() { - return writeChannel.isOpen(); - } - - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(writeChannel::close); - } - }), buffer); + org.elasticsearch.core.internal.io.Streams.copy( + inputStream, Channels.newOutputStream(new WritableBlobChannel(writeChannel)), buffer); + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); // We don't track this operation on the http layer as // we do with the GET/LIST operations since this operations // can trigger multiple underlying http requests but only one @@ -346,22 +438,21 @@ public void close() throws IOException { * 'multipart/related' request containing both data and metadata. The request is * gziped), see: * https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload - * @param blobInfo the info for the blob to be uploaded - * @param inputStream the stream containing the blob data + * @param blobInfo the info for the blob to be uploaded + * @param buffer the byte array containing the data + * @param offset offset at which the blob contents start in the buffer * @param blobSize the size * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists */ - private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + private void writeBlobMultipart(BlobInfo blobInfo, byte[] buffer, int offset, int blobSize, boolean failIfAlreadyExists) throws IOException { assert blobSize <= getLargeBlobThresholdInBytes() : "large blob uploads should use the resumable upload method"; - final byte[] buffer = new byte[Math.toIntExact(blobSize)]; - Streams.readFully(inputStream, buffer); try { final Storage.BlobTargetOption[] targetOptions = failIfAlreadyExists ? new Storage.BlobTargetOption[] { Storage.BlobTargetOption.doesNotExist() } : new Storage.BlobTargetOption[0]; SocketAccess.doPrivilegedVoidIOException( - () -> client().create(blobInfo, buffer, targetOptions)); + () -> client().create(blobInfo, buffer, offset, blobSize, targetOptions)); // We don't track this operation on the http layer as // we do with the GET/LIST operations since this operations // can trigger multiple underlying http requests but only one @@ -478,4 +569,29 @@ private static String buildKey(String keyPath, String s) { public Map stats() { return stats.toMap(); } + + private static final class WritableBlobChannel implements WritableByteChannel { + + private final WriteChannel channel; + + WritableBlobChannel(WriteChannel writeChannel) { + this.channel = writeChannel; + } + + @SuppressForbidden(reason = "channel is based on a socket") + @Override + public int write(final ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> channel.write(src)); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() { + // we manually close the channel later to have control over whether or not we want to finalize a blob + } + } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index eeab1a0361cad..117cf20762aa5 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -95,7 +95,7 @@ private static Map buildLocation(RepositoryMetadata metadata) { @Override protected GoogleCloudStorageBlobStore createBlobStore() { - return new GoogleCloudStorageBlobStore(bucket, clientName, metadata.name(), storageService, bufferSize); + return new GoogleCloudStorageBlobStore(bucket, clientName, metadata.name(), storageService, bigArrays, bufferSize); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 1ebe387b0f7df..43ddb04bc016b 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -12,6 +12,7 @@ import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.api.client.util.SecurityUtils; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.ServiceOptions; @@ -34,6 +35,7 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; +import java.security.KeyStore; import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; @@ -126,7 +128,13 @@ private Storage createClient(GoogleCloudStorageClientSettings clientSettings, final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); // requires java.lang.RuntimePermission "setFactory" // Pin the TLS trust certificates. - builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); + // We manually load the key store from jks instead of using GoogleUtils.getCertificateTrustStore() because that uses a .p12 + // store format not compatible with FIPS mode. + final KeyStore certTrustStore = SecurityUtils.getJavaKeyStore(); + try (InputStream keyStoreStream = GoogleUtils.class.getResourceAsStream("google.jks")) { + SecurityUtils.loadKeyStore(certTrustStore, keyStoreStream, "notasecret"); + } + builder.trustCertificates(certTrustStore); return builder.build(); }); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/SocketAccess.java index f6327e1ba44fd..287b70615840c 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/SocketAccess.java @@ -32,7 +32,7 @@ public static T doPrivilegedIOException(PrivilegedExceptionAction operati try { return AccessController.doPrivileged(operation); } catch (PrivilegedActionException e) { - throw (IOException) e.getCause(); + throw causeAsIOException(e); } } @@ -44,7 +44,18 @@ public static void doPrivilegedVoidIOException(CheckedRunnable acti return null; }); } catch (PrivilegedActionException e) { - throw (IOException) e.getCause(); + throw causeAsIOException(e); } } + + private static IOException causeAsIOException(PrivilegedActionException e) { + final Throwable cause = e.getCause(); + if (cause instanceof IOException) { + return (IOException) cause; + } + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } + throw new RuntimeException(cause); + } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 1848e9abb1189..5f3a5e7dcda80 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -14,6 +14,7 @@ import com.sun.net.httpserver.HttpHandler; import fixture.gcs.FakeOAuth2HttpHandler; import org.apache.http.HttpStatus; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.core.SuppressForbidden; @@ -81,8 +82,8 @@ private String httpServerUrl() { } @Override - protected String downloadStorageEndpoint(String blob) { - return "/download/storage/v1/b/bucket/o/" + blob; + protected String downloadStorageEndpoint(BlobContainer container, String blob) { + return "/download/storage/v1/b/bucket/o/" + container.path().buildAsString() + blob; } @Override @@ -140,18 +141,20 @@ StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clien httpServer.createContext("/token", new FakeOAuth2HttpHandler()); final GoogleCloudStorageBlobStore blobStore = new GoogleCloudStorageBlobStore("bucket", client, "repo", service, - randomIntBetween(1, 8) * 1024); + BigArrays.NON_RECYCLING_INSTANCE, randomIntBetween(1, 8) * 1024); - return new GoogleCloudStorageBlobContainer(BlobPath.EMPTY, blobStore); + return new GoogleCloudStorageBlobContainer(randomBoolean() ? BlobPath.EMPTY : BlobPath.EMPTY.add("foo"), blobStore); } public void testReadLargeBlobWithRetries() throws Exception { final int maxRetries = randomIntBetween(2, 10); final AtomicInteger countDown = new AtomicInteger(maxRetries); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); + // SDK reads in 2 MB chunks so we use twice that to simulate 2 chunks final byte[] bytes = randomBytes(1 << 22); - httpServer.createContext("/download/storage/v1/b/bucket/o/large_blob_retries", exchange -> { + httpServer.createContext(downloadStorageEndpoint(blobContainer, "large_blob_retries"), exchange -> { Streams.readFully(exchange.getRequestBody()); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); final Tuple range = getRange(exchange); @@ -167,7 +170,6 @@ public void testReadLargeBlobWithRetries() throws Exception { exchange.close(); }); - final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); try (InputStream inputStream = blobContainer.readBlob("large_blob_retries")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); } @@ -177,13 +179,14 @@ public void testWriteBlobWithRetries() throws Exception { final int maxRetries = randomIntBetween(2, 10); final CountDown countDown = new CountDown(maxRetries); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); final byte[] bytes = randomBlobContent(); httpServer.createContext("/upload/storage/v1/b/bucket/o", safeHandler(exchange -> { assertThat(exchange.getRequestURI().getQuery(), containsString("uploadType=multipart")); if (countDown.countDown()) { Optional> content = parseMultipartRequestBody(exchange.getRequestBody()); assertThat(content.isPresent(), is(true)); - assertThat(content.get().v1(), equalTo("write_blob_max_retries")); + assertThat(content.get().v1(), equalTo(blobContainer.path().buildAsString() + "write_blob_max_retries")); if (Objects.deepEquals(bytes, BytesReference.toBytes(content.get().v2()))) { byte[] response = ("{\"bucket\":\"bucket\",\"name\":\"" + content.get().v1() + "\"}").getBytes(UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); @@ -204,7 +207,6 @@ public void testWriteBlobWithRetries() throws Exception { } })); - final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false); } @@ -255,11 +257,15 @@ public void testWriteLargeBlob() throws IOException { final AtomicInteger countUploads = new AtomicInteger(nbErrors * totalChunks); final AtomicBoolean allow410Gone = new AtomicBoolean(randomBoolean()); final AtomicBoolean allowReadTimeout = new AtomicBoolean(rarely()); + final AtomicInteger bytesReceived = new AtomicInteger(); final int wrongChunk = randomIntBetween(1, totalChunks); final AtomicReference sessionUploadId = new AtomicReference<>(UUIDs.randomBase64UUID()); logger.debug("starting with resumable upload id [{}]", sessionUploadId.get()); + final TimeValue readTimeout = allowReadTimeout.get() ? TimeValue.timeValueSeconds(3) : null; + final BlobContainer blobContainer = createBlobContainer(nbErrors + 1, readTimeout, null, null); + httpServer.createContext("/upload/storage/v1/b/bucket/o", safeHandler(exchange -> { final BytesReference requestBody = Streams.readFully(exchange.getRequestBody()); @@ -268,7 +274,7 @@ public void testWriteLargeBlob() throws IOException { assertThat(params.get("uploadType"), equalTo("resumable")); if ("POST".equals(exchange.getRequestMethod())) { - assertThat(params.get("name"), equalTo("write_large_blob")); + assertThat(params.get("name"), equalTo(blobContainer.path().buildAsString() + "write_large_blob")); if (countInits.decrementAndGet() <= 0) { byte[] response = requestBody.utf8ToString().getBytes(UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); @@ -305,6 +311,7 @@ public void testWriteLargeBlob() throws IOException { // we must reset the counters because the whole object upload will be retried countInits.set(nbErrors); countUploads.set(nbErrors * totalChunks); + bytesReceived.set(0); exchange.sendResponseHeaders(HttpStatus.SC_GONE, -1); return; @@ -314,13 +321,25 @@ public void testWriteLargeBlob() throws IOException { final String range = exchange.getRequestHeaders().getFirst("Content-Range"); assertTrue(Strings.hasLength(range)); + if (range.equals("bytes */*")) { + final int receivedSoFar = bytesReceived.get(); + if (receivedSoFar > 0) { + exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=0-%d", receivedSoFar)); + } + exchange.getResponseHeaders().add("Content-Length", "0"); + exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1); + return; + } + if (countUploads.decrementAndGet() % 2 == 0) { + assertThat(Math.toIntExact(requestBody.length()), anyOf(equalTo(defaultChunkSize), equalTo(lastChunkSize))); final int rangeStart = getContentRangeStart(range); final int rangeEnd = getContentRangeEnd(range); assertThat(rangeEnd + 1 - rangeStart, equalTo(Math.toIntExact(requestBody.length()))); assertThat(new BytesArray(data, rangeStart, rangeEnd - rangeStart + 1), is(requestBody)); + bytesReceived.updateAndGet(existing -> Math.max(existing, rangeEnd)); final Integer limit = getContentRangeLimit(range); if (limit != null) { @@ -340,11 +359,12 @@ public void testWriteLargeBlob() throws IOException { } })); - final TimeValue readTimeout = allowReadTimeout.get() ? TimeValue.timeValueSeconds(3) : null; - - final BlobContainer blobContainer = createBlobContainer(nbErrors + 1, readTimeout, null, null); - try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) { - blobContainer.writeBlob("write_large_blob", stream, data.length, false); + if (randomBoolean()) { + try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) { + blobContainer.writeBlob("write_large_blob", stream, data.length, false); + } + } else { + blobContainer.writeBlob("write_large_blob", false, randomBoolean(), out -> out.write(data)); } assertThat(countInits.get(), equalTo(0)); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index a1d99cd266302..a4bb5973ff8b9 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -77,7 +78,7 @@ public void testDeleteBlobsIgnoringIfNotExistsThrowsIOException() throws Excepti when(storageService.client(any(String.class), any(String.class), any(GoogleCloudStorageOperationsStats.class))).thenReturn(storage); try (BlobStore store = new GoogleCloudStorageBlobStore("bucket", "test", "repo", storageService, - randomIntBetween(1, 8) * 1024)) { + BigArrays.NON_RECYCLING_INSTANCE, randomIntBetween(1, 8) * 1024)) { final BlobContainer container = store.blobContainer(BlobPath.EMPTY); IOException e = expectThrows(IOException.class, () -> container.deleteBlobsIgnoringIfNotExists(blobs.iterator())); diff --git a/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index 81e50a77d651a..7ce4e44dada35 100644 --- a/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/plugins/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -9,9 +9,9 @@ setup: body: type: gcs settings: - bucket: ${bucket} + bucket: @bucket@ client: "integration_test" - base_path: "${base_path}" + base_path: "@base_path@" # Remove the snapshots, if a previous test failed to delete them. This is # useful for third party tests that runs the test against a real external service. @@ -28,18 +28,15 @@ setup: --- "Snapshot/Restore with repository-gcs": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository - - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.bucket : @bucket@ } - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : "${base_path}" } + - match: { repository.settings.base_path : "@base_path@" } # Index documents - do: @@ -130,9 +127,9 @@ setup: repository: repository snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -209,18 +206,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 52e951e258c86..51e356548413b 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -15,6 +15,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Path; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; @@ -31,6 +32,7 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Collections; @@ -156,6 +158,38 @@ public void writeBlob(String blobName, BytesReference bytes, boolean failIfAlrea }); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + Path blob = new Path(path, blobName); + if (atomic) { + final Path tempBlobPath = new Path(path, FsBlobContainer.tempBlobName(blobName)); + store.execute((Operation) fileContext -> { + try (FSDataOutputStream stream = fileContext.create(tempBlobPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK))) { + writer.accept(stream); + fileContext.rename(tempBlobPath, blob, failIfAlreadyExists ? Options.Rename.NONE : Options.Rename.OVERWRITE); + } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) { + throw new FileAlreadyExistsException(blob.toString(), null, faee.getMessage()); + } + return null; + }); + } else { + // we pass CREATE, which means it fails if a blob already exists. + final EnumSet flags = failIfAlreadyExists ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) + : EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK); + store.execute((Operation) fileContext -> { + try (FSDataOutputStream stream = fileContext.create(blob, flags)) { + writer.accept(stream); + } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) { + throw new FileAlreadyExistsException(blob.toString(), null, faee.getMessage()); + } + return null; + }); + } + } + @Override public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException { final String tempBlob = FsBlobContainer.tempBlobName(blobName); @@ -192,8 +226,13 @@ private void writeToPath(InputStream inputStream, long blobSize, FileContext fil @Override public Map listBlobsByPrefix(@Nullable final String prefix) throws IOException { - FileStatus[] files = store.execute(fileContext -> fileContext.util().listStatus(path, - path -> prefix == null || path.getName().startsWith(prefix))); + FileStatus[] files; + try { + files = store.execute(fileContext -> fileContext.util().listStatus(path, + path -> prefix == null || path.getName().startsWith(prefix))); + } catch (FileNotFoundException e) { + files = new FileStatus[0]; + } Map map = new LinkedHashMap<>(); for (FileStatus file : files) { if (file.isFile()) { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 70c7c496fcdd7..46b538168b9d0 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -159,6 +159,41 @@ public void testReadRange() throws Exception { assertTrue(container.blobExists("foo")); } + public void testListBlobsByPrefix() throws Exception { + FileContext fileContext = createTestContext(); + HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, false); + FileContext.Util util = fileContext.util(); + Path root = fileContext.makeQualified(new Path("dir")); + assertTrue(util.exists(root)); + BlobPath blobPath = BlobPath.EMPTY.add("path"); + + hdfsBlobStore.blobContainer(blobPath); + Path hdfsPath = root; + for (String p : blobPath.parts()) { + hdfsPath = new Path(hdfsPath, p); + } + assertTrue(util.exists(hdfsPath)); + + BlobContainer container = hdfsBlobStore.blobContainer(blobPath); + + byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); + writeBlob(container, "foo", new BytesArray(data), randomBoolean()); + assertArrayEquals(readBlobFully(container, "foo", data.length), data); + assertTrue(container.blobExists("foo")); + writeBlob(container, "bar", new BytesArray(data), randomBoolean()); + assertArrayEquals(readBlobFully(container, "bar", data.length), data); + assertTrue(container.blobExists("bar")); + + assertEquals(2, container.listBlobsByPrefix(null).size()); + assertEquals(1, container.listBlobsByPrefix("fo").size()); + assertEquals(0, container.listBlobsByPrefix("noSuchFile").size()); + + container.delete(); + assertEquals(0, container.listBlobsByPrefix(null).size()); + assertEquals(0, container.listBlobsByPrefix("fo").size()); + assertEquals(0, container.listBlobsByPrefix("noSuchFile").size()); + } + public static byte[] readBlobPartially(BlobContainer container, String name, int pos, int length) throws IOException { byte[] data = new byte[length]; try (InputStream inputStream = container.readBlob(name, pos, length)) { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 01f2f524294ed..60851c270e3fa 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -83,7 +83,7 @@ public void testSimpleWorkflow() { .prepareGetSnapshots("test-repo") .setSnapshots("test-snap") .get() - .getSnapshots("test-repo") + .getSnapshots() .get(0) .state(), equalTo(SnapshotState.SUCCESS)); diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml index 6ed93e07160b2..f38f4783b195b 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_get.yml @@ -46,8 +46,8 @@ repository: test_snapshot_get_repository snapshot: test_snapshot_get - - length: { responses.0.snapshots: 1 } - - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } # List snapshot info - do: @@ -55,8 +55,8 @@ repository: test_snapshot_get_repository snapshot: "*" - - length: { responses.0.snapshots: 1 } - - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } # Remove our snapshot - do: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml index dda910ae36c26..c2a37964e70a7 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml @@ -21,7 +21,7 @@ repository: test_snapshot_repository_ro snapshot: "_all" - - length: { responses.0.snapshots: 1 } + - length: { snapshots: 1 } # Remove our repository - do: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml index 2c4fcc338ab07..20d988884113f 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_get.yml @@ -48,8 +48,8 @@ repository: test_snapshot_get_repository snapshot: test_snapshot_get - - length: { responses.0.snapshots: 1 } - - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } # List snapshot info - do: @@ -57,8 +57,8 @@ repository: test_snapshot_get_repository snapshot: "*" - - length: { responses.0.snapshots: 1 } - - match: { responses.0.snapshots.0.snapshot : test_snapshot_get } + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } # Remove our snapshot - do: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml index c31749072a17b..8c4c0347a156a 100644 --- a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml @@ -23,7 +23,7 @@ repository: test_snapshot_repository_ro snapshot: "_all" - - length: { responses.0.snapshots: 1 } + - length: { snapshots: 1 } # Remove our repository - do: diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ee7552c77e048..0a83463d21dff 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,4 +1,4 @@ -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin @@ -153,10 +153,10 @@ tasks.named("processYamlRestTestResources").configure { 'ec2_base_path' : s3EC2BasePath, 'ecs_bucket' : s3ECSBucket, 'ecs_base_path' : s3ECSBasePath, - 'disable_chunked_encoding': s3DisableChunkedEncoding, + 'disable_chunked_encoding': s3DisableChunkedEncoding ] inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } tasks.named("internalClusterTest").configure { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 6c096c835d132..7c75ef7ffb4e4 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -27,6 +27,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -41,10 +42,12 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -129,6 +132,106 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b }); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + final String absoluteBlobKey = buildKey(blobName); + try (AmazonS3Reference clientReference = blobStore.clientReference(); + ChunkedBlobOutputStream out = new ChunkedBlobOutputStream<>(blobStore.bigArrays(), blobStore.bufferSizeInBytes()) { + + private final SetOnce uploadId = new SetOnce<>(); + + @Override + protected void flushBuffer() throws IOException { + flushBuffer(false); + } + + private void flushBuffer(boolean lastPart) throws IOException { + if (buffer.size() == 0) { + return; + } + if (flushedBytes == 0L) { + assert lastPart == false : "use single part upload if there's only a single part"; + uploadId.set(SocketAccess.doPrivileged(() -> + clientReference.client().initiateMultipartUpload(initiateMultiPartUpload(absoluteBlobKey)).getUploadId())); + if (Strings.isEmpty(uploadId.get())) { + throw new IOException("Failed to initialize multipart upload " + absoluteBlobKey); + } + } + assert lastPart == false || successful : "must only write last part if successful"; + final UploadPartRequest uploadRequest = createPartUploadRequest( + buffer.bytes().streamInput(), uploadId.get(), parts.size() + 1, absoluteBlobKey, buffer.size(), lastPart); + final UploadPartResult uploadResponse = + SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest)); + finishPart(uploadResponse.getPartETag()); + } + + @Override + protected void onCompletion() throws IOException { + if (flushedBytes == 0L) { + writeBlob(blobName, buffer.bytes(), failIfAlreadyExists); + } else { + flushBuffer(true); + final CompleteMultipartUploadRequest complRequest = + new CompleteMultipartUploadRequest(blobStore.bucket(), absoluteBlobKey, uploadId.get(), parts); + complRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); + } + } + + @Override + protected void onFailure() { + if (Strings.hasText(uploadId.get())) { + abortMultiPartUpload(uploadId.get(), absoluteBlobKey); + } + } + }) { + writer.accept(out); + out.markSuccess(); + } + } + + private UploadPartRequest createPartUploadRequest(InputStream stream, + String uploadId, + int number, + String blobName, + long size, + boolean lastPart) { + final UploadPartRequest uploadRequest = new UploadPartRequest(); + uploadRequest.setBucketName(blobStore.bucket()); + uploadRequest.setKey(blobName); + uploadRequest.setUploadId(uploadId); + uploadRequest.setPartNumber(number); + uploadRequest.setInputStream(stream); + uploadRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + uploadRequest.setPartSize(size); + uploadRequest.setLastPart(lastPart); + return uploadRequest; + } + + private void abortMultiPartUpload(String uploadId, String blobName) { + final AbortMultipartUploadRequest abortRequest = + new AbortMultipartUploadRequest(blobStore.bucket(), blobName, uploadId); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); + } + } + + private InitiateMultipartUploadRequest initiateMultiPartUpload(String blobName) { + final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(blobStore.bucket(), blobName); + initRequest.setStorageClass(blobStore.getStorageClass()); + initRequest.setCannedACL(blobStore.getCannedACL()); + initRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + if (blobStore.serverSideEncryption()) { + final ObjectMetadata md = new ObjectMetadata(); + md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); + initRequest.setObjectMetadata(md); + } + return initRequest; + } + // package private for testing long getLargeBlobThresholdInBytes() { return blobStore.bufferSizeInBytes(); @@ -389,19 +492,10 @@ void executeMultipartUpload(final S3BlobStore blobStore, final SetOnce uploadId = new SetOnce<>(); final String bucketName = blobStore.bucket(); boolean success = false; - - final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); - initRequest.setStorageClass(blobStore.getStorageClass()); - initRequest.setCannedACL(blobStore.getCannedACL()); - initRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); - if (blobStore.serverSideEncryption()) { - final ObjectMetadata md = new ObjectMetadata(); - md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - initRequest.setObjectMetadata(md); - } try (AmazonS3Reference clientReference = blobStore.clientReference()) { - uploadId.set(SocketAccess.doPrivileged(() -> clientReference.client().initiateMultipartUpload(initRequest).getUploadId())); + uploadId.set(SocketAccess.doPrivileged(() -> + clientReference.client().initiateMultipartUpload(initiateMultiPartUpload(blobName)).getUploadId())); if (Strings.isEmpty(uploadId.get())) { throw new IOException("Failed to initialize multipart upload " + blobName); } @@ -410,21 +504,9 @@ void executeMultipartUpload(final S3BlobStore blobStore, long bytesCount = 0; for (int i = 1; i <= nbParts; i++) { - final UploadPartRequest uploadRequest = new UploadPartRequest(); - uploadRequest.setBucketName(bucketName); - uploadRequest.setKey(blobName); - uploadRequest.setUploadId(uploadId.get()); - uploadRequest.setPartNumber(i); - uploadRequest.setInputStream(input); - uploadRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); - - if (i < nbParts) { - uploadRequest.setPartSize(partSize); - uploadRequest.setLastPart(false); - } else { - uploadRequest.setPartSize(lastPartSize); - uploadRequest.setLastPart(true); - } + final boolean lastPart = i == nbParts; + final UploadPartRequest uploadRequest = + createPartUploadRequest(input, uploadId.get(), i, blobName, lastPart ? lastPartSize : partSize, lastPart); bytesCount += uploadRequest.getPartSize(); final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest)); @@ -446,10 +528,7 @@ void executeMultipartUpload(final S3BlobStore blobStore, throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e); } finally { if ((success == false) && Strings.hasLength(uploadId.get())) { - final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get()); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); - } + abortMultiPartUpload(uploadId.get(), blobName); } } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index a3279c1ef4976..90a86c4910ba5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import java.io.IOException; import java.util.HashMap; @@ -35,6 +36,8 @@ class S3BlobStore implements BlobStore { private final S3Service service; + private final BigArrays bigArrays; + private final String bucket; private final ByteSizeValue bufferSize; @@ -56,8 +59,9 @@ class S3BlobStore implements BlobStore { S3BlobStore(S3Service service, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass, - RepositoryMetadata repositoryMetadata) { + RepositoryMetadata repositoryMetadata, BigArrays bigArrays) { this.service = service; + this.bigArrays = bigArrays; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; this.bufferSize = bufferSize; @@ -136,6 +140,10 @@ public String bucket() { return bucket; } + public BigArrays bigArrays() { + return bigArrays; + } + public boolean serverSideEncryption() { return serverSideEncryption; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index c5324f8572648..5aac348d1794a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -13,8 +13,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -28,12 +26,11 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.repositories.FinalizeSnapshotContext; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; -import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -239,15 +236,18 @@ private static Map buildLocation(RepositoryMetadata metadata) { private final AtomicReference finalizationFuture = new AtomicReference<>(); @Override - public void finalizeSnapshot(ShardGenerations shardGenerations, long repositoryStateId, Metadata clusterMetadata, - SnapshotInfo snapshotInfo, Version repositoryMetaVersion, - Function stateTransformer, - ActionListener listener) { - if (SnapshotsService.useShardGenerations(repositoryMetaVersion) == false) { - listener = delayedListener(listener); + public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { + if (SnapshotsService.useShardGenerations(finalizeSnapshotContext.repositoryMetaVersion()) == false) { + finalizeSnapshotContext = new FinalizeSnapshotContext( + finalizeSnapshotContext.updatedShardGenerations(), + finalizeSnapshotContext.repositoryStateId(), + finalizeSnapshotContext.clusterMetadata(), + finalizeSnapshotContext.snapshotInfo(), + finalizeSnapshotContext.repositoryMetaVersion(), + delayedListener(finalizeSnapshotContext) + ); } - super.finalizeSnapshot(shardGenerations, repositoryStateId, clusterMetadata, snapshotInfo, repositoryMetaVersion, - stateTransformer, listener); + super.finalizeSnapshot(finalizeSnapshotContext); } @Override @@ -308,7 +308,7 @@ private static BlobPath buildBasePath(RepositoryMetadata metadata) { @Override protected S3BlobStore createBlobStore() { - return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, metadata); + return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, metadata, bigArrays); } // only use for testing diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 5505cd592a8e6..789a67ca2e847 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -12,6 +12,7 @@ import com.amazonaws.util.Base16; import org.apache.http.HttpStatus; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; @@ -42,6 +43,7 @@ import java.util.Locale; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; import static org.elasticsearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; @@ -74,8 +76,8 @@ public void tearDown() throws Exception { } @Override - protected String downloadStorageEndpoint(String blob) { - return "/bucket/" + blob; + protected String downloadStorageEndpoint(BlobContainer container, String blob) { + return "/bucket/" + container.path().buildAsString() + blob; } @Override @@ -90,9 +92,9 @@ protected Class unresponsiveExceptionType() { @Override protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, - final @Nullable TimeValue readTimeout, - final @Nullable Boolean disableChunkedEncoding, - final @Nullable ByteSizeValue bufferSize) { + final @Nullable TimeValue readTimeout, + final @Nullable Boolean disableChunkedEncoding, + final @Nullable ByteSizeValue bufferSize) { final Settings.Builder clientSettings = Settings.builder(); final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); @@ -121,12 +123,12 @@ protected BlobContainer createBlobContainer(final @Nullable Integer maxRetries, final RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repository", S3Repository.TYPE, Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build()); - return new S3BlobContainer(BlobPath.EMPTY, new S3BlobStore(service, "bucket", + return new S3BlobContainer(randomBoolean() ? BlobPath.EMPTY : BlobPath.EMPTY.add("foo"), new S3BlobStore(service, "bucket", S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), - repositoryMetadata)) { + repositoryMetadata, BigArrays.NON_RECYCLING_INSTANCE)) { @Override public InputStream readBlob(String blobName) throws IOException { return new AssertingInputStream(super.readBlob(blobName), blobName); @@ -143,8 +145,10 @@ public void testWriteBlobWithRetries() throws Exception { final int maxRetries = randomInt(5); final CountDown countDown = new CountDown(maxRetries + 1); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); + final byte[] bytes = randomBlobContent(); - httpServer.createContext("/bucket/write_blob_max_retries", exchange -> { + httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_blob_max_retries"), exchange -> { if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery() == null) { if (countDown.countDown()) { final BytesReference body = Streams.readFully(exchange.getRequestBody()); @@ -169,8 +173,6 @@ public void testWriteBlobWithRetries() throws Exception { exchange.close(); } }); - - final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false); } @@ -183,7 +185,7 @@ public void testWriteBlobWithReadTimeouts() { final BlobContainer blobContainer = createBlobContainer(1, readTimeout, true, null); // HTTP server does not send a response - httpServer.createContext("/bucket/write_blob_timeout", exchange -> { + httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_blob_timeout"), exchange -> { if (randomBoolean()) { if (randomBoolean()) { Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]); @@ -199,7 +201,8 @@ public void testWriteBlobWithReadTimeouts() { } }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), - containsString("unable to upload object [write_blob_timeout] using a single upload")); + containsString( + "unable to upload object [" + blobContainer.path().buildAsString() + "write_blob_timeout] using a single upload")); assertThat(exception.getCause(), instanceOf(SdkClientException.class)); assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); @@ -223,7 +226,7 @@ public void testWriteLargeBlob() throws Exception { final AtomicInteger countDownUploads = new AtomicInteger(nbErrors * (parts + 1)); final CountDown countDownComplete = new CountDown(nbErrors); - httpServer.createContext("/bucket/write_large_blob", exchange -> { + httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_large_blob"), exchange -> { final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length")); if ("POST".equals(exchange.getRequestMethod()) @@ -296,6 +299,105 @@ public void testWriteLargeBlob() throws Exception { assertThat(countDownComplete.isCountedDown(), is(true)); } + public void testWriteLargeBlobStreaming() throws Exception { + final boolean useTimeout = rarely(); + final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; + final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); + final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); + + final int parts = randomIntBetween(1, 5); + final long lastPartSize = randomLongBetween(10, 512); + final long blobSize = (parts * bufferSize.getBytes()) + lastPartSize; + + final int nbErrors = 2; // we want all requests to fail at least once + final CountDown countDownInitiate = new CountDown(nbErrors); + final AtomicInteger counterUploads = new AtomicInteger(0); + final AtomicLong bytesReceived = new AtomicLong(0L); + final CountDown countDownComplete = new CountDown(nbErrors); + + httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_large_blob_streaming"), exchange -> { + final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length")); + + if ("POST".equals(exchange.getRequestMethod()) + && exchange.getRequestURI().getQuery().equals("uploads")) { + // initiate multipart upload request + if (countDownInitiate.countDown()) { + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob_streaming\n" + + " TEST\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + } else if ("PUT".equals(exchange.getRequestMethod()) + && exchange.getRequestURI().getQuery().contains("uploadId=TEST") + && exchange.getRequestURI().getQuery().contains("partNumber=")) { + // upload part request + MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); + BytesReference bytes = Streams.readFully(md5); + + if (counterUploads.incrementAndGet() % 2 == 0) { + bytesReceived.addAndGet(bytes.length()); + exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); + exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); + exchange.close(); + return; + } + + } else if ("POST".equals(exchange.getRequestMethod()) + && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) { + // complete multipart upload request + if (countDownComplete.countDown()) { + Streams.readFully(exchange.getRequestBody()); + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob_streaming\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + } + + // sends an error back or let the request time out + if (useTimeout == false) { + if (randomBoolean() && contentLength > 0) { + Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))]); + } else { + Streams.readFully(exchange.getRequestBody()); + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + } + exchange.close(); + } + }); + + blobContainer.writeBlob("write_large_blob_streaming", false, randomBoolean(), out -> { + final byte[] buffer = new byte[16 * 1024]; + long outstanding = blobSize; + while (outstanding > 0) { + if (randomBoolean()) { + int toWrite = Math.toIntExact(Math.min(randomIntBetween(64, buffer.length), outstanding)); + out.write(buffer, 0, toWrite); + outstanding -= toWrite; + } else { + out.write(0); + outstanding--; + } + } + }); + + assertEquals(blobSize, bytesReceived.get()); + } + /** * Asserts that an InputStream is fully consumed, or aborted, when it is closed */ diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index f651cad52e8f2..60a4133aa58c1 100644 --- a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -10,12 +10,12 @@ setup: body: type: s3 settings: - bucket: ${permanent_bucket} + bucket: @permanent_bucket@ client: integration_test_permanent - base_path: "${permanent_base_path}" + base_path: "@permanent_base_path@" canned_acl: private storage_class: standard - disable_chunked_encoding: ${disable_chunked_encoding} + disable_chunked_encoding: @disable_chunked_encoding@ # Remove the snapshots, if a previous test failed to delete them. This is # useful for third party tests that runs the test against a real external service. @@ -40,9 +40,9 @@ setup: body: type: s3 settings: - bucket: ${permanent_bucket} + bucket: @permanent_bucket@ client: integration_test_permanent - base_path: "${permanent_base_path}" + base_path: "@permanent_base_path@" endpoint: 127.0.0.1:5 canned_acl: private storage_class: standard @@ -55,9 +55,9 @@ setup: body: type: s3 settings: - bucket: ${permanent_bucket} + bucket: @permanent_bucket@ client: integration_test_permanent - base_path: "${permanent_base_path}" + base_path: "@permanent_base_path@" endpoint: 127.0.0.1:5 canned_acl: private storage_class: standard @@ -108,18 +108,15 @@ setup: --- "Snapshot and Restore with repository-s3 using permanent credentials": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository_permanent - - match: { repository_permanent.settings.bucket : ${permanent_bucket} } + - match: { repository_permanent.settings.bucket : @permanent_bucket@ } - match: { repository_permanent.settings.client : "integration_test_permanent" } - - match: { repository_permanent.settings.base_path : "${permanent_base_path}" } + - match: { repository_permanent.settings.base_path : "@permanent_base_path@" } - match: { repository_permanent.settings.canned_acl : "private" } - match: { repository_permanent.settings.storage_class : "standard" } - is_false: repository_permanent.settings.access_key @@ -215,9 +212,9 @@ setup: repository: repository_permanent snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -322,18 +319,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository_permanent snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index caca1b959511e..148ac94b709fb 100644 --- a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -10,27 +10,24 @@ setup: body: type: s3 settings: - bucket: ${temporary_bucket} + bucket: @temporary_bucket@ client: integration_test_temporary - base_path: "${temporary_base_path}" + base_path: "@temporary_base_path@" canned_acl: private storage_class: standard - disable_chunked_encoding: ${disable_chunked_encoding} + disable_chunked_encoding: @disable_chunked_encoding@ --- "Snapshot and Restore with repository-s3 using temporary credentials": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository_temporary - - match: { repository_temporary.settings.bucket : ${temporary_bucket} } + - match: { repository_temporary.settings.bucket : @temporary_bucket@ } - match: { repository_temporary.settings.client : "integration_test_temporary" } - - match: { repository_temporary.settings.base_path : "${temporary_base_path}" } + - match: { repository_temporary.settings.base_path : "@temporary_base_path@" } - match: { repository_temporary.settings.canned_acl : "private" } - match: { repository_temporary.settings.storage_class : "standard" } - is_false: repository_temporary.settings.access_key @@ -126,9 +123,9 @@ setup: repository: repository_temporary snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -233,18 +230,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository_temporary snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index 3573af618febe..21112bc99defc 100644 --- a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -10,27 +10,24 @@ setup: body: type: s3 settings: - bucket: ${ec2_bucket} + bucket: @ec2_bucket@ client: integration_test_ec2 - base_path: "${ec2_base_path}" + base_path: "@ec2_base_path@" canned_acl: private storage_class: standard - disable_chunked_encoding: ${disable_chunked_encoding} + disable_chunked_encoding: @disable_chunked_encoding@ --- "Snapshot and Restore with repository-s3 using ec2 credentials": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository_ec2 - - match: { repository_ec2.settings.bucket : ${ec2_bucket} } + - match: { repository_ec2.settings.bucket : @ec2_bucket@ } - match: { repository_ec2.settings.client : "integration_test_ec2" } - - match: { repository_ec2.settings.base_path : "${ec2_base_path}" } + - match: { repository_ec2.settings.base_path : "@ec2_base_path@" } - match: { repository_ec2.settings.canned_acl : "private" } - match: { repository_ec2.settings.storage_class : "standard" } - is_false: repository_ec2.settings.access_key @@ -126,9 +123,9 @@ setup: repository: repository_ec2 snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -233,18 +230,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository_ec2 snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 88c315af4b536..daf5739f6720d 100644 --- a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -10,27 +10,24 @@ setup: body: type: s3 settings: - bucket: ${ecs_bucket} + bucket: @ecs_bucket@ client: integration_test_ecs - base_path: "${ecs_base_path}" + base_path: "@ecs_base_path@" canned_acl: private storage_class: standard - disable_chunked_encoding: ${disable_chunked_encoding} + disable_chunked_encoding: @disable_chunked_encoding@ --- "Snapshot and Restore with repository-s3 using ecs credentials": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" # Get repository - do: snapshot.get_repository: repository: repository_ecs - - match: { repository_ecs.settings.bucket : ${ecs_bucket} } + - match: { repository_ecs.settings.bucket : @ecs_bucket@ } - match: { repository_ecs.settings.client : "integration_test_ecs" } - - match: { repository_ecs.settings.base_path : "${ecs_base_path}" } + - match: { repository_ecs.settings.base_path : "@ecs_base_path@" } - match: { repository_ecs.settings.canned_acl : "private" } - match: { repository_ecs.settings.storage_class : "standard" } - is_false: repository_ecs.settings.access_key @@ -126,9 +123,9 @@ setup: repository: repository_ecs snapshot: snapshot-one,snapshot-two - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.state : SUCCESS } - - match: { responses.0.snapshots.1.state : SUCCESS } + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } # Delete the index - do: @@ -233,18 +230,13 @@ setup: --- "Get a non existing snapshot": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception/ snapshot.get: repository: repository_ecs snapshot: missing - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Delete a non existing snapshot": diff --git a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbSimpleFsTests.java b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbNIOFSTests.java similarity index 80% rename from plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbSimpleFsTests.java rename to plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbNIOFSTests.java index be78804d9535e..bcfc913b64efd 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbSimpleFsTests.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbNIOFSTests.java @@ -11,12 +11,12 @@ import org.elasticsearch.common.settings.Settings; -public class SmbSimpleFsTests extends AbstractAzureFsTestCase { +public class SmbNIOFSTests extends AbstractAzureFsTestCase { @Override public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) - .put("index.store.type", "smb_simple_fs") + .put("index.store.type", randomFrom("smb_simple_fs", "smb_nio_fs")) .build(); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbSimpleFsDirectoryFactory.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryFactory.java similarity index 78% rename from plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbSimpleFsDirectoryFactory.java rename to plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryFactory.java index 1ed76e575575e..22dc32a47770a 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbSimpleFsDirectoryFactory.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryFactory.java @@ -10,17 +10,17 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.store.FsDirectoryFactory; import java.io.IOException; import java.nio.file.Path; -public final class SmbSimpleFsDirectoryFactory extends FsDirectoryFactory { +public final class SmbNIOFSDirectoryFactory extends FsDirectoryFactory { @Override protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { - return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); + return new SmbDirectoryWrapper(new NIOFSDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java index f94259d7badf3..c524926ba1548 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.plugin.store.smb; import org.elasticsearch.index.store.smb.SmbMmapFsDirectoryFactory; -import org.elasticsearch.index.store.smb.SmbSimpleFsDirectoryFactory; +import org.elasticsearch.index.store.smb.SmbNIOFSDirectoryFactory; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.Plugin; @@ -21,7 +21,9 @@ public class SMBStorePlugin extends Plugin implements IndexStorePlugin { public Map getDirectoryFactories() { return Map.of( "smb_mmap_fs", new SmbMmapFsDirectoryFactory(), - "smb_simple_fs", new SmbSimpleFsDirectoryFactory()); + "smb_simple_fs", new SmbNIOFSDirectoryFactory(), + "smb_nio_fs", new SmbNIOFSDirectoryFactory() + ); } } diff --git a/plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbSimpleFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryTests.java similarity index 82% rename from plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbSimpleFSDirectoryTests.java rename to plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryTests.java index 3eb8a20f49e82..13e90747faea3 100644 --- a/plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbSimpleFSDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/elasticsearch/index/store/smb/SmbNIOFSDirectoryTests.java @@ -11,14 +11,14 @@ import java.io.IOException; import java.nio.file.Path; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.elasticsearch.index.store.EsBaseDirectoryTestCase; -public class SmbSimpleFSDirectoryTests extends EsBaseDirectoryTestCase { +public class SmbNIOFSDirectoryTests extends EsBaseDirectoryTestCase { @Override protected Directory getDirectory(Path file) throws IOException { - return new SmbDirectoryWrapper(new SimpleFSDirectory(file)); + return new SmbDirectoryWrapper(new NIOFSDirectory(file)); } @Override diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index b65e9dffb4fae..08481ca6860e6 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -34,6 +34,13 @@ tasks.named("dependencyLicenses").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.Encoder', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) 'com.google.protobuf.ExtensionRegistry', 'com.google.protobuf.MessageLite$Builder', @@ -86,6 +93,7 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.MessageLiteOrBuilder', 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', + 'com.github.luben.zstd.Zstd', 'com.jcraft.jzlib.Deflater', 'com.jcraft.jzlib.Inflater', 'com.jcraft.jzlib.JZlib$WrapperType', @@ -98,11 +106,6 @@ tasks.named("thirdPartyAudit").configure { 'com.ning.compress.lzf.util.ChunkDecoderFactory', 'com.ning.compress.lzf.util.ChunkEncoderFactory', 'lzma.sdk.lzma.Encoder', - 'net.jpountz.lz4.LZ4Compressor', - 'net.jpountz.lz4.LZ4Factory', - 'net.jpountz.lz4.LZ4FastDecompressor', - 'net.jpountz.xxhash.XXHash32', - 'net.jpountz.xxhash.XXHashFactory', 'org.eclipse.jetty.alpn.ALPN$ClientProvider', 'org.eclipse.jetty.alpn.ALPN$ServerProvider', 'org.eclipse.jetty.alpn.ALPN', @@ -113,6 +116,8 @@ tasks.named("thirdPartyAudit").configure { 'org.conscrypt.HandshakeListener', // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.Library', 'io.netty.internal.tcnative.SSL', @@ -120,6 +125,7 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.internal.tcnative.SSLPrivateKeyMethod', 'io.netty.internal.tcnative.CertificateCallback', 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', 'io.netty.internal.tcnative.SessionTicketKey', 'io.netty.internal.tcnative.SniHostNameMatcher', 'io.netty.internal.tcnative.SSLSession', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.63.Final.jar.sha1 deleted file mode 100644 index d472369d69bc0..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40028ce5ac7c43f1c9a1439f74637cad04013e23 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..973ba015d2079 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +8d4be9506ea5f54af58bcd596ba3fe2fc5036413 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.63.Final.jar.sha1 deleted file mode 100644 index 8bfbe331c55c9..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4d2fccea88c80e56d59ce1053c53df0f9f4f5db \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..ae8837c2664a8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +e7cfff848e6c1294645638d74fce6ad89cc6f3f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.63.Final.jar.sha1 deleted file mode 100644 index 0279e286e318d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8c9b159dcb76452dc98a370a5511ff993670419 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..74435145e041c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +15fff6bae9e4b09ba5d48a70bb88841c9fc22a32 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.63.Final.jar.sha1 deleted file mode 100644 index 54e103f1d8b5f..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1206b46384d4dcbecee2901f18ce65ecf02e8a4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..164add2d48e57 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +d1c4eda38f525a02fb1ea8d94a8d98dc2935fd02 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.63.Final.jar.sha1 deleted file mode 100644 index ae180d9ae4016..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879a43c2325b08e92e8967218b6ddb0ed4b7a0d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..657b3ad736c1e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +1e6ec9b58725a96b2bd0f173709b59c79175225c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.63.Final.jar.sha1 deleted file mode 100644 index eb6858e75cc21..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d07cd47c101dfa655d6d5cc304d523742fd78ca8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..4a085c20c9ec0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +2f018d8df6f533c3d75dc5fdb11071bc2e7b591b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.63.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.63.Final.jar.sha1 deleted file mode 100644 index c41cdc86c51c8..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.63.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09a8bbe1ba082c9434e6f524d3864a53f340f2df \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.66.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.66.Final.jar.sha1 new file mode 100644 index 0000000000000..c21ce614d86e9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.66.Final.jar.sha1 @@ -0,0 +1 @@ +3511bc4e13198de644eefe4c8c758245145da128 \ No newline at end of file diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 804a74a39f5d1..a7769272582ab 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -34,6 +33,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.AbstractHttpServerTransportTestCase; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.CorsHandler; import org.elasticsearch.http.HttpServerTransport; @@ -44,7 +44,6 @@ import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -72,7 +71,7 @@ /** * Tests for the {@link NioHttpServerTransport} class. */ -public class NioHttpServerTransportTests extends ESTestCase { +public class NioHttpServerTransportTests extends AbstractHttpServerTransportTestCase { private NetworkService networkService; private ThreadPool threadPool; @@ -150,8 +149,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, } }; try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, - xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), randomClusterSettings())) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (NioHttpClient client = new NioHttpClient()) { @@ -186,7 +184,7 @@ public void testBindUnavailableAddress() { final Settings initialSettings = createSettings(); try (NioHttpServerTransport transport = new NioHttpServerTransport(initialSettings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + randomClusterSettings())) { transport.start(); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); Settings settings = Settings.builder() @@ -195,7 +193,7 @@ threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settin .build(); try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + randomClusterSettings())) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); assertEquals( "Failed to bind to " + NetworkAddress.format(remoteAddress.address()), @@ -232,7 +230,7 @@ public void dispatchBadRequest(final RestChannel channel, try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + randomClusterSettings())) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -294,7 +292,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try (NioHttpServerTransport transport = new NioHttpServerTransport( Settings.EMPTY, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher, - new NioGroupFactory(Settings.EMPTY, logger), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + new NioGroupFactory(Settings.EMPTY, logger), randomClusterSettings())) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -350,7 +348,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + randomClusterSettings())) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -400,7 +398,7 @@ public void dispatchBadRequest(final RestChannel channel, try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + randomClusterSettings())) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index 501ce6190b8a4..4a880eb402d0a 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 559aad2d0e2ed..0b51bd0a9a1bf 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -20,31 +20,6 @@ dependencies { } // TODO: give each evil test its own fresh JVM for more isolation. - tasks.named("test").configure { systemProperty 'tests.security.manager', 'false' } - -tasks.named("thirdPartyAudit").configure { - ignoreMissingClasses( - 'com.ibm.icu.lang.UCharacter' - ) - - ignoreViolations( - // uses internal java api: sun.misc.Unsafe - 'com.google.common.cache.Striped64', - 'com.google.common.cache.Striped64$1', - 'com.google.common.cache.Striped64$Cell', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', - 'com.google.common.hash.Striped64', - 'com.google.common.hash.Striped64$1', - 'com.google.common.hash.Striped64$Cell', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', - 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1' - ) -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java index 5c146ad6702c9..95bf6c8e9cc38 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/PolicyUtilTests.java @@ -238,6 +238,21 @@ void assertIllegalPermissions(List illegalPermissions, PolicyParser pars "java.util.PropertyPermission someProperty read", "java.util.PropertyPermission * write", "java.util.PropertyPermission foo.bar write", + "javax.management.MBeanPermission * addNotificationListener", + "javax.management.MBeanPermission * getAttribute", + "javax.management.MBeanPermission * getDomains", + "javax.management.MBeanPermission * getMBeanInfo", + "javax.management.MBeanPermission * getObjectInstance", + "javax.management.MBeanPermission * instantiate", + "javax.management.MBeanPermission * invoke", + "javax.management.MBeanPermission * isInstanceOf", + "javax.management.MBeanPermission * queryMBeans", + "javax.management.MBeanPermission * queryNames", + "javax.management.MBeanPermission * registerMBean", + "javax.management.MBeanPermission * removeNotificationListener", + "javax.management.MBeanPermission * setAttribute", + "javax.management.MBeanPermission * unregisterMBean", + "javax.management.MBeanServerPermission *", "javax.security.auth.AuthPermission doAs", "javax.security.auth.AuthPermission doAsPrivileged", "javax.security.auth.AuthPermission getSubject", @@ -349,8 +364,9 @@ public void testModulePolicyAllowedPermissions() throws Exception { "java.sql.SQLPermission setSyncFactory", "java.sql.SQLPermission deregisterDriver", "java.util.logging.LoggingPermission control", - "javax.management.MBeanPermission * *", - "javax.management.MBeanServerPermission *", + "javax.management.MBeanPermission * getClassLoader", + "javax.management.MBeanPermission * getClassLoaderFor", + "javax.management.MBeanPermission * getClassLoaderRepository", "javax.management.MBeanTrustPermission *", "javax.management.remote.SubjectDelegationPermission *", "javax.net.ssl.SSLPermission setHostnameVerifier", diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/index/store/LuceneFilesExtensionsTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/index/store/LuceneFilesExtensionsTests.java new file mode 100644 index 0000000000000..9248a2ca8e9fb --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/index/store/LuceneFilesExtensionsTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.Assertions; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class LuceneFilesExtensionsTests extends ESTestCase { + + public void testUnknownFileExtension() { + if (Assertions.ENABLED) { + AssertionError e = expectThrows(AssertionError.class, () -> LuceneFilesExtensions.fromExtension("abc")); + assertThat(e.getMessage(), containsString("unknown Lucene file extension [abc]")); + + setEsAllowUnknownLuceneFileExtensions("true"); + try { + assertNull(LuceneFilesExtensions.fromExtension("abc")); + } finally { + setEsAllowUnknownLuceneFileExtensions(null); + } + } else { + assertNull(LuceneFilesExtensions.fromExtension("abc")); + } + } + + @SuppressForbidden(reason = "set or clear system property es.allow_unknown_lucene_file_extensions") + public void setEsAllowUnknownLuceneFileExtensions(final String value) { + if (value == null) { + System.clearProperty("es.allow_unknown_lucene_file_extensions"); + } else { + System.setProperty("es.allow_unknown_lucene_file_extensions", value); + } + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java index 109f999c2b7e5..9ff3dc5231f9a 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java @@ -80,8 +80,12 @@ public void testExecutionErrorOnScalingESThreadPoolExecutor() throws Interrupted } public void testExecutionErrorOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { - final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing("test", - EsExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); + final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing( + "test", + EsExecutors.daemonThreadFactory("test"), + threadPool.getThreadContext(), + threadPool.scheduler(), + PrioritizedEsThreadPoolExecutor.StarvationWatcher.NOOP_STARVATION_WATCHER); try { checkExecutionError(getExecuteRunner(prioritizedExecutor)); checkExecutionError(getSubmitRunner(prioritizedExecutor)); @@ -177,8 +181,12 @@ public void testExecutionExceptionOnScalingESThreadPoolExecutor() throws Interru } public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { - final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing("test", - EsExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); + final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing( + "test", + EsExecutors.daemonThreadFactory("test"), + threadPool.getThreadContext(), + threadPool.scheduler(), + PrioritizedEsThreadPoolExecutor.StarvationWatcher.NOOP_STARVATION_WATCHER); try { checkExecutionException(getExecuteRunner(prioritizedExecutor), true); checkExecutionException(getSubmitRunner(prioritizedExecutor), false); diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 334161795526a..8456ebf5cd481 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -12,7 +12,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 9d36621267e5e..d6dcd01a3e053 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -10,31 +10,35 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.transport.Compression; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -51,6 +55,7 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -677,9 +682,6 @@ public void testRecovery() throws Exception { flushRequest.addParameter("force", "true"); flushRequest.addParameter("wait_if_ongoing", "true"); assertOK(client().performRequest(flushRequest)); - if (randomBoolean()) { - syncedFlush(index); - } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog @@ -1047,13 +1049,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); - Map responseMap = entityAsMap(client().performRequest(listSnapshotRequest)); - Map snapResponse; - if (responseMap.get("responses") != null) { - snapResponse = (Map) ((List) responseMap.get("responses")).get(0); - } else { - snapResponse = responseMap; - } + Map snapResponse = entityAsMap(client().performRequest(listSnapshotRequest)); assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); @@ -1588,6 +1584,44 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { } } + /** + * In 7.14 the cluster.remote.*.transport.compress setting was change from a boolean to an enum setting + * with true/false as options. This test ensures that the old boolean setting in cluster state is + * translated properly. This test can be removed in 9.0. + */ + public void testTransportCompressionSetting() throws IOException { + assumeTrue("the old transport.compress setting existed before 7.14", getOldClusterVersion().before(Version.V_7_14_0)); + assumeTrue("Early versions of 6.x do not have cluster.remote* prefixed settings", + getOldClusterVersion().onOrAfter(Version.V_7_14_0.minimumCompatibilityVersion())); + if (isRunningAgainstOldCluster()) { + final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("persistent"); + { + builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); + builder.field("cluster.remote.foo.transport.compress", "true"); + } + builder.endObject(); + } + builder.endObject(); + putSettingsRequest.setJsonEntity(Strings.toString(builder)); + } + client().performRequest(putSettingsRequest); + } else { + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response getSettingsResponse = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + assertThat( + REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Compression.Enabled.TRUE)); + } + } + } + public static void assertNumHits(String index, int numHits, int totalShards) throws IOException { Map resp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(resp); diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 7e13ff0306983..3d8c5ff2301ec 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -115,6 +115,7 @@ public void testDeprecatedMessageWithoutXOpaqueId() throws IOException { public void testCompatibleLog() throws Exception { withThreadContext(threadContext -> { threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.TRACE_ID, "someTraceId"); final DeprecationLogger testLogger = DeprecationLogger.getLogger("org.elasticsearch.test"); testLogger.deprecate(DeprecationCategory.OTHER,"someKey", "deprecated message1") .compatibleApiWarning("compatibleKey","compatible API message"); @@ -143,6 +144,7 @@ public void testCompatibleLog() throws Exception { hasEntry("message", "deprecated message1"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "someKey"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), hasEntry("elasticsearch.event.category", "other") ), allOf( @@ -159,6 +161,7 @@ public void testCompatibleLog() throws Exception { hasEntry("message", "compatible API message"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "compatibleKey"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), hasEntry("elasticsearch.event.category", "compatible_api") ) ) @@ -172,6 +175,7 @@ public void testCompatibleLog() throws Exception { public void testParseFieldEmittingDeprecatedLogs() throws Exception { withThreadContext(threadContext -> { threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.TRACE_ID, "someTraceId"); ParseField deprecatedField = new ParseField("new_name", "deprecated_name"); assertTrue(deprecatedField.match("deprecated_name", LoggingDeprecationHandler.INSTANCE)); @@ -208,6 +212,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry("message", "Deprecated field [deprecated_name] used, expected [new_name] instead"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_deprecated_name"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), hasEntry("elasticsearch.event.category", "api") ), // deprecation log for field deprecated_name2 (note it is not being throttled) @@ -224,6 +229,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry("message", "Deprecated field [deprecated_name2] used, expected [new_name] instead"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_deprecated_name2"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), hasEntry("elasticsearch.event.category", "api") ), // compatible log line @@ -240,6 +246,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { hasEntry("message", "Deprecated field [compatible_deprecated_name] used, expected [new_name] instead"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "deprecated_field_compatible_deprecated_name"), hasEntry(DeprecatedMessage.X_OPAQUE_ID_FIELD_NAME, "someId"), + hasEntry(Task.TRACE_ID, "someTraceId"), hasEntry("elasticsearch.event.category", "compatible_api") ) ) @@ -255,6 +262,7 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { public void testDeprecatedMessage() throws Exception { withThreadContext(threadContext -> { threadContext.putHeader(Task.X_OPAQUE_ID, "someId"); + threadContext.putHeader(Task.TRACE_ID, "someTraceId"); final DeprecationLogger testLogger = DeprecationLogger.getLogger("org.elasticsearch.test"); testLogger.deprecate(DeprecationCategory.OTHER, "someKey", "deprecated message1"); diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 7a1191603fc24..562847cd9faa3 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -12,7 +12,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 0893f7138783c..e60c8a4f45038 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -17,8 +17,11 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.MediaType; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; @@ -309,9 +312,58 @@ public void testSyncedFlushTransition() throws Exception { try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { Request request = new Request("POST", index + "/_flush/synced"); - List warningMsg = List.of("Synced flush was removed and a normal flush was performed instead. " + - "This transition will be removed in a future version."); - request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(warningMsg) == false)); + final String v7MediaType = XContentType.VND_JSON.toParsedMediaType() + .responseContentTypeHeader(Map.of(MediaType.COMPATIBLE_WITH_PARAMETER_NAME, + String.valueOf(RestApiVersion.minimumSupported().major))); + List warningMsg = List.of("Synced flush is deprecated and will be removed in 8.0." + + " Use flush at /_flush or /{index}/_flush instead."); + request.setOptions(RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler(warnings -> warnings.equals(warningMsg) == false) + .addHeader("Accept", v7MediaType)); + + assertBusy(() -> { + Map result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(totalShards)); + assertThat(result.get("failed"), equalTo(0)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + } + + public void testFlushTransition() throws Exception { + Nodes nodes = buildNodeAndVersions(); + assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); + assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); + // Allocate shards to new nodes then verify flush requests processed by old nodes/new nodes + String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + int numShards = randomIntBetween(1, 10); + int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); + int totalShards = numShards * (numOfReplicas + 1); + final String index = "test_flush"; + createIndex(index, Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put("index.routing.allocation.include._name", newNodes).build()); + ensureGreen(index); + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient oldNodeClient = buildClient(restClientSettings(), + nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush"); + assertBusy(() -> { + Map result = ObjectPath.createFromResponse(oldNodeClient.performRequest(request)).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(totalShards)); + assertThat(result.get("failed"), equalTo(0)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient newNodeClient = buildClient(restClientSettings(), + nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush"); assertBusy(() -> { Map result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards"); assertThat(result.get("total"), equalTo(totalShards)); diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java new file mode 100644 index 0000000000000..705caa526a7d9 --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.backwards; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; + +/** + * Test that index enough data to trigger the creation of Cuckoo filters. + */ +public class RareTermsIT extends ESRestTestCase { + + private static final String index = "idx"; + + private int indexDocs(int numDocs, int id) throws Exception { + final Request request = new Request("POST", "/_bulk"); + final StringBuilder builder = new StringBuilder(); + for (int i = 0; i < numDocs; ++i) { + builder.append("{ \"index\" : { \"_index\" : \"" + index + "\", \"_id\": \"" + id++ + "\" } }\n"); + builder.append("{\"str_value\" : \"s" + i + "\"}\n"); + } + request.setJsonEntity(builder.toString()); + assertOK(client().performRequest(request)); + return id; + } + + public void testSingleValuedString() throws Exception { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + createIndex(index, settings.build()); + // We want to trigger the usage oif cuckoo filters that happen only when there are + // more than 10k distinct values in one shard. + final int numDocs = randomIntBetween(12000, 17000); + int id = 1; + // Index every value 5 times + for (int i = 0; i < 5; i++) { + id = indexDocs(numDocs, id); + refreshAllIndices(); + } + // There are no rare terms that only appear in one document + assertNumRareTerms(1, 0); + // All terms have a cardinality lower than 10 + assertNumRareTerms(10, numDocs); + } + + private void assertNumRareTerms(int maxDocs, int rareTerms) throws IOException { + final Request request = new Request("POST", index + "/_search"); + request.setJsonEntity( + "{\"aggs\" : {\"rareTerms\" : {\"rare_terms\" : {\"field\" : \"str_value.keyword\", \"max_doc_count\" : " + maxDocs + "}}}}" + ); + final Response response = client().performRequest(request); + assertOK(response); + final Object o = XContentMapValues.extractValue("aggregations.rareTerms.buckets", responseAsMap(response)); + assertThat(o, Matchers.instanceOf(List.class)); + assertThat(((List) o).size(), Matchers.equalTo(rareTerms)); + } +} diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 009e83daffeab..817a9a41e42ec 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -9,7 +9,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-resources' dependencies { @@ -46,6 +46,4 @@ tasks.register("integTest") { dependsOn "mixedClusterTest" } -tasks.named("test").configure { enabled = false }// no unit tests for multi-cluster-search, only integration tests - tasks.named("check").configure { dependsOn("integTest") } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index b4aafc797e5e8..fe0a206d787ac 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.TimeUnits; import org.elasticsearch.action.ActionListener; @@ -74,6 +75,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; @@ -206,8 +208,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertTrue(bulkProcessor.awaitClose(30, TimeUnit.SECONDS)); RefreshResponse refreshResponse = restHighLevelClient.indices().refresh(new RefreshRequest(INDEX_NAME), RequestOptions.DEFAULT); - assertEquals(0, refreshResponse.getFailedShards()); - assertEquals(numShards, refreshResponse.getSuccessfulShards()); + ElasticsearchAssertions.assertNoFailures(refreshResponse); } private static IndexRequest buildIndexRequest(String id, String type, String questionId) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java index c83c492106467..b8d4f19ba3ca5 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java @@ -20,6 +20,8 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeFalse; import static java.nio.file.StandardOpenOption.APPEND; @@ -46,6 +48,8 @@ public static void cleanupFiles() { public void test10Install() throws Exception { install(); + // Enable security for this test only where it is necessary, until we can enable it for all + ServerUtils.enableSecurityFeatures(installation); } public void test20Help() { @@ -107,8 +111,24 @@ public void test40RunWithCert() throws Exception { Files.write(installation.config("elasticsearch.yml"), yaml, CREATE, APPEND); - assertWhileRunning( - () -> ServerUtils.makeRequest(Request.Get("https://127.0.0.1:9200"), null, null, installation.config("certs/ca/ca.crt")) - ); + assertWhileRunning(() -> { + final String password = setElasticPassword(); + assertNotNull(password); + ServerUtils.makeRequest(Request.Get("https://127.0.0.1:9200"), "elastic", password, installation.config("certs/ca/ca.crt")); + }); + } + + private String setElasticPassword() { + final Pattern userpassRegex = Pattern.compile("PASSWORD (\\w+) = ([^\\s]+)"); + Shell.Result result = installation.executables().setupPasswordsTool.run("auto --batch", null); + Matcher matcher = userpassRegex.matcher(result.stdout); + assertNotNull(matcher); + while (matcher.find()) { + if (matcher.group(1).equals("elastic")) { + return matcher.group(2); + } + } + return null; } + } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index d6ea55f7b1102..295ccd970dd7a 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -80,6 +80,8 @@ */ public class DockerTests extends PackagingTestCase { private Path tempDir; + private static final String USERNAME = "elastic"; + private static final String PASSWORD = "nothunter2"; @BeforeClass public static void filterDistros() { @@ -88,7 +90,10 @@ public static void filterDistros() { @Before public void setupTest() throws IOException { - installation = runContainer(distribution(), builder().envVars(Map.of("ingest.geoip.downloader.enabled", "false"))); + installation = runContainer( + distribution(), + builder().envVars(Map.of("ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + ); tempDir = createTempDir(DockerTests.class.getSimpleName()); } @@ -106,12 +111,18 @@ public void test010Install() { } /** - * Check that the /_xpack API endpoint's presence is correct for the type of distribution being tested. + * Check that security is enabled */ - public void test011PresenceOfXpack() throws Exception { + public void test011SecurityEnabledStatus() throws Exception { + waitForElasticsearch(installation, USERNAME, PASSWORD); + final int statusCode = ServerUtils.makeRequestAndGetStatus(Request.Get("http://localhost:9200"), USERNAME, "wrong_password", null); + assertThat(statusCode, equalTo(401)); + + // restart container with security disabled + runContainer(distribution(), builder().envVars(Map.of("xpack.security.enabled", "false"))); waitForElasticsearch(installation); - final int statusCode = Request.Get("http://localhost:9200/_xpack").execute().returnResponse().getStatusLine().getStatusCode(); - assertThat(statusCode, equalTo(200)); + final int unauthStatusCode = ServerUtils.makeRequestAndGetStatus(Request.Get("http://localhost:9200"), null, null, null); + assertThat(unauthStatusCode, equalTo(200)); } /** @@ -147,9 +158,9 @@ public void test041AmazonCaCertsAreInTheKeystore() { * Check that when the keystore is created on startup, it is created with the correct permissions. */ public void test042KeystorePermissionsAreCorrect() throws Exception { - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); - assertPermissionsAndOwnership(installation.config("elasticsearch.keystore"), p660); + assertPermissionsAndOwnership(installation.config("elasticsearch.keystore"), "elasticsearch", "root", p660); } /** @@ -157,11 +168,11 @@ public void test042KeystorePermissionsAreCorrect() throws Exception { * is minimally functional. */ public void test050BasicApiTests() throws Exception { - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); assertTrue(existsInContainer(installation.logs.resolve("gc.log"))); - ServerUtils.runElasticsearchTests(); + ServerUtils.runElasticsearchTests(USERNAME, PASSWORD); } /** @@ -185,11 +196,24 @@ public void test070BindMountCustomPathConfAndJvmOptions() throws Exception { // Restart the container final Map volumes = Map.of(tempDir, Path.of("/usr/share/elasticsearch/config")); - runContainer(distribution(), builder().volumes(volumes).envVars(Map.of("ES_JAVA_OPTS", "-XX:-UseCompressedOops"))); + runContainer( + distribution(), + builder().volumes(volumes) + .envVars( + Map.of( + "ES_JAVA_OPTS", + "-XX:-UseCompressedOops", + "ingest.geoip.downloader.enabled", + "false", + "ELASTIC_PASSWORD", + PASSWORD + ) + ) + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); - final JsonNode nodes = getJson("/_nodes").get("nodes"); + final JsonNode nodes = getJson("/_nodes", USERNAME, PASSWORD).get("nodes"); final String nodeId = nodes.fieldNames().next(); final int heapSize = nodes.at("/" + nodeId + "/jvm/mem/heap_init_in_bytes").intValue(); @@ -213,11 +237,14 @@ public void test071BindMountCustomPathWithDifferentUID() throws Exception { // Restart the container final Map volumes = Map.of(tempEsDataDir.toAbsolutePath(), installation.data); - runContainer(distribution(), builder().volumes(volumes)); + runContainer( + distribution(), + builder().volumes(volumes).envVars(Map.of("ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); - final JsonNode nodes = getJson("/_nodes"); + final JsonNode nodes = getJson("/_nodes", USERNAME, PASSWORD); assertThat(nodes.at("/_nodes/total").intValue(), equalTo(1)); assertThat(nodes.at("/_nodes/successful").intValue(), equalTo(1)); @@ -264,9 +291,14 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { volumes.put(tempEsLogsDir.toAbsolutePath(), installation.logs); // Restart the container - runContainer(distribution(), builder().volumes(volumes).uid(501, 501)); + runContainer( + distribution(), + builder().volumes(volumes) + .envVars(Map.of("ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + .uid(501, 501) + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); } /** @@ -275,9 +307,14 @@ public void test072RunEsAsDifferentUserAndGroup() throws Exception { */ public void test073RunEsAsDifferentUserAndGroupWithoutBindMounting() throws Exception { // Restart the container - runContainer(distribution(), builder().uid(501, 501).extraArgs("--group-add 0")); + runContainer( + distribution(), + builder().envVars(Map.of("ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + .uid(501, 501) + .extraArgs("--group-add 0") + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); } /** @@ -290,13 +327,7 @@ public void test080ConfigurePasswordThroughEnvironmentVariableFile() throws Exce // ELASTIC_PASSWORD_FILE Files.writeString(tempDir.resolve(passwordFilename), xpackPassword + "\n"); - Map envVars = Map.of( - "ELASTIC_PASSWORD_FILE", - "/run/secrets/" + passwordFilename, - // Enable security so that we can test that the password has been used - "xpack.security.enabled", - "true" - ); + Map envVars = Map.of("ELASTIC_PASSWORD_FILE", "/run/secrets/" + passwordFilename); // File permissions need to be secured in order for the ES wrapper to accept // them for populating env var values @@ -344,13 +375,7 @@ public void test081SymlinksAreFollowedWithEnvironmentVariableFiles() throws Exce // it won't resolve inside the container. Files.createSymbolicLink(tempDir.resolve(symlinkFilename), Path.of(passwordFilename)); - Map envVars = Map.of( - "ELASTIC_PASSWORD_FILE", - "/run/secrets/" + symlinkFilename, - // Enable security so that we can test that the password has been used - "xpack.security.enabled", - "true" - ); + Map envVars = Map.of("ELASTIC_PASSWORD_FILE", "/run/secrets/" + symlinkFilename); // File permissions need to be secured in order for the ES wrapper to accept // them for populating env var values. The wrapper will resolve the symlink @@ -436,13 +461,7 @@ public void test084SymlinkToFileWithInvalidPermissionsIsRejected() throws Except // it won't resolve inside the container. Files.createSymbolicLink(tempDir.resolve(symlinkFilename), Path.of(passwordFilename)); - Map envVars = Map.of( - "ELASTIC_PASSWORD_FILE", - "/run/secrets/" + symlinkFilename, - // Enable security so that we can test that the password has been used - "xpack.security.enabled", - "true" - ); + Map envVars = Map.of("ELASTIC_PASSWORD_FILE", "/run/secrets/" + symlinkFilename); // Set invalid permissions on the file that the symlink targets Files.setPosixFilePermissions(tempDir.resolve(passwordFilename), p775); @@ -469,10 +488,7 @@ public void test084SymlinkToFileWithInvalidPermissionsIsRejected() throws Except * `docker exec`, where the Docker image's entrypoint is not executed. */ public void test085EnvironmentVariablesAreRespectedUnderDockerExec() throws Exception { - installation = runContainer( - distribution(), - builder().envVars(Map.of("xpack.security.enabled", "true", "ELASTIC_PASSWORD", "hunter2")) - ); + installation = runContainer(distribution(), builder().envVars(Map.of("ELASTIC_PASSWORD", "hunter2"))); // The tool below requires a keystore, so ensure that ES is fully initialised before proceeding. waitForElasticsearch("green", null, installation, "elastic", "hunter2"); @@ -486,6 +502,52 @@ public void test085EnvironmentVariablesAreRespectedUnderDockerExec() throws Exce assertThat(result.stdout, containsString("java.net.UnknownHostException: this.is.not.valid")); } + /** + * Check that settings are applied when they are supplied as environment variables with names that are: + *
    + *
  • Prefixed with {@code ES_SETTING_}
  • + *
  • All uppercase
  • + *
  • Dots (periods) are converted to underscores
  • + *
  • Underscores in setting names are escaped by doubling them
  • + *
+ */ + public void test086EnvironmentVariablesInSnakeCaseAreTranslated() { + // Note the double-underscore in the var name here, which retains the underscore in translation + installation = runContainer(distribution(), builder().envVars(Map.of("ES_SETTING_XPACK_SECURITY_FIPS__MODE_ENABLED", "false"))); + + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) + .findFirst(); + + assertThat(commandLine.isPresent(), equalTo(true)); + + assertThat(commandLine.get(), containsString("-Expack.security.fips_mode.enabled=false")); + } + + /** + * Check that environment variables that do not match the criteria for translation to settings are ignored. + */ + public void test087EnvironmentVariablesInIncorrectFormatAreIgnored() { + final Map envVars = new HashMap<>(); + // No ES_SETTING_ prefix + envVars.put("XPACK_SECURITY_FIPS__MODE_ENABLED", "false"); + // Incomplete prefix + envVars.put("ES_XPACK_SECURITY_FIPS__MODE_ENABLED", "false"); + // Not underscore-separated + envVars.put("ES.XPACK.SECURITY.FIPS_MODE.ENABLED", "false"); + // Not uppercase + envVars.put("es_xpack_security_fips__mode_enabled", "false"); + installation = runContainer(distribution(), builder().envVars(envVars)); + + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) + .findFirst(); + + assertThat(commandLine.isPresent(), equalTo(true)); + + assertThat(commandLine.get(), not(containsString("-Expack.security.fips_mode.enabled=false"))); + } + /** * Check whether the elasticsearch-certutil tool has been shipped correctly, * and if present then it can execute. @@ -628,7 +690,7 @@ public void test110OrgOpencontainersLabels() throws Exception { * Check that the container logs contain the expected content for Elasticsearch itself. */ public void test120DockerLogsIncludeElasticsearchLogs() throws Exception { - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); final Result containerLogs = getContainerLogs(); assertThat("Container logs should contain full class names", containerLogs.stdout, containsString("org.elasticsearch.node.Node")); @@ -639,9 +701,12 @@ public void test120DockerLogsIncludeElasticsearchLogs() throws Exception { * Check that it is possible to write logs to disk */ public void test121CanUseStackLoggingConfig() throws Exception { - runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "file"))); + runContainer( + distribution(), + builder().envVars(Map.of("ES_LOG_STYLE", "file", "ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); final Result containerLogs = getContainerLogs(); final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); @@ -658,9 +723,12 @@ public void test121CanUseStackLoggingConfig() throws Exception { * Check that the default logging config can be explicitly selected. */ public void test122CanUseDockerLoggingConfig() throws Exception { - runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "console"))); + runContainer( + distribution(), + builder().envVars(Map.of("ES_LOG_STYLE", "console", "ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + ); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); final Result containerLogs = getContainerLogs(); final List stdout = containerLogs.stdout.lines().collect(Collectors.toList()); @@ -682,14 +750,14 @@ public void test123CannotUseUnknownLoggingConfig() { * Check that it when configuring logging to write to disk, the container can be restarted. */ public void test124CanRestartContainerWithStackLoggingConfig() throws Exception { - runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "file"))); + runContainer(distribution(), builder().envVars(Map.of("ES_LOG_STYLE", "file", "ELASTIC_PASSWORD", PASSWORD))); - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); restartContainer(); // If something went wrong running Elasticsearch the second time, this will fail. - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); } /** @@ -725,9 +793,9 @@ public void test131InitProcessHasCorrectPID() { * Check that Elasticsearch reports per-node cgroup information. */ public void test140CgroupOsStatsAreAvailable() throws Exception { - waitForElasticsearch(installation); + waitForElasticsearch(installation, USERNAME, PASSWORD); - final JsonNode nodes = getJson("/_nodes/stats/os").get("nodes"); + final JsonNode nodes = getJson("/_nodes/stats/os", USERNAME, PASSWORD).get("nodes"); final String nodeId = nodes.fieldNames().next(); @@ -756,8 +824,13 @@ public void test150MachineDependentHeap() throws Exception { Files.writeString(jvmOptionsPath, String.join("\n", jvmOptions)); // Now run the container, being explicit about the available memory - runContainer(distribution(), builder().memory("942m").volumes(Map.of(jvmOptionsPath, containerJvmOptionsPath))); - waitForElasticsearch(installation); + runContainer( + distribution(), + builder().memory("942m") + .volumes(Map.of(jvmOptionsPath, containerJvmOptionsPath)) + .envVars(Map.of("ingest.geoip.downloader.enabled", "false", "ELASTIC_PASSWORD", PASSWORD)) + ); + waitForElasticsearch(installation, USERNAME, PASSWORD); // Grab the container output and find the line where it print the JVM arguments. This will // let us see what the automatic heap sizing calculated. diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index e982c7609bd2c..4d22d6cacf118 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -56,6 +56,8 @@ public class KeystoreManagementTests extends PackagingTestCase { public static final String ERROR_CORRUPTED_KEYSTORE = "Keystore has been corrupted or tampered with"; public static final String ERROR_KEYSTORE_NOT_PASSWORD_PROTECTED = "ERROR: Keystore is not password-protected"; public static final String ERROR_KEYSTORE_NOT_FOUND = "ERROR: Elasticsearch keystore not found"; + private static final String USERNAME = "elastic"; + private static final String PASSWORD = "nothunter2"; /** Test initial archive state */ public void test10InstallArchiveDistribution() throws Exception { @@ -260,54 +262,62 @@ public void test51WrongKeystorePasswordFromFile() throws Exception { * Check that we can mount a password-protected keystore to a docker image * and provide a password via an environment variable. */ + @AwaitsFix(bugUrl = "Keystore fails to save with resource busy") public void test60DockerEnvironmentVariablePassword() throws Exception { assumeTrue(distribution().isDocker()); String password = "keystore-password"; - Path dockerKeystore = installation.config("elasticsearch.keystore"); - Path localKeystoreFile = getKeystoreFileFromDockerContainer(password, dockerKeystore); + Path localConfigDir = getMountedLocalConfDirWithKeystore(password, installation.config); - // restart ES with password and mounted keystore - Map volumes = Map.of(localKeystoreFile, dockerKeystore); - Map envVars = Map.of("KEYSTORE_PASSWORD", password, "ingest.geoip.downloader.enabled", "false"); + // restart ES with password and mounted config dir containing password protected keystore + Map volumes = Map.of(localConfigDir.resolve("config"), installation.config); + Map envVars = Map.of( + "KEYSTORE_PASSWORD", + password, + "ingest.geoip.downloader.enabled", + "false", + "ELASTIC_PASSWORD", + PASSWORD + ); runContainer(distribution(), builder().volumes(volumes).envVars(envVars)); - waitForElasticsearch(installation); - ServerUtils.runElasticsearchTests(); + waitForElasticsearch(installation, USERNAME, PASSWORD); + ServerUtils.runElasticsearchTests(USERNAME, PASSWORD); } /** * Check that we can mount a password-protected keystore to a docker image * and provide a password via a file, pointed at from an environment variable. */ + @AwaitsFix(bugUrl = "Keystore fails to save with resource busy") public void test61DockerEnvironmentVariablePasswordFromFile() throws Exception { assumeTrue(distribution().isDocker()); Path tempDir = null; try { - tempDir = createTempDir(DockerTests.class.getSimpleName()); + tempDir = createTempDir(KeystoreManagementTests.class.getSimpleName()); String password = "keystore-password"; String passwordFilename = "password.txt"; Files.writeString(tempDir.resolve(passwordFilename), password + "\n"); Files.setPosixFilePermissions(tempDir.resolve(passwordFilename), p600); - Path dockerKeystore = installation.config("elasticsearch.keystore"); + Path localConfigDir = getMountedLocalConfDirWithKeystore(password, installation.config); - Path localKeystoreFile = getKeystoreFileFromDockerContainer(password, dockerKeystore); - - // restart ES with password and mounted keystore - Map volumes = Map.of(localKeystoreFile, dockerKeystore, tempDir, Path.of("/run/secrets")); + // restart ES with password and mounted config dir containing password protected keystore + Map volumes = Map.of(localConfigDir.resolve("config"), installation.config, tempDir, Path.of("/run/secrets")); Map envVars = Map.of( "KEYSTORE_PASSWORD_FILE", "/run/secrets/" + passwordFilename, "ingest.geoip.downloader.enabled", - "false" + "false", + "ELASTIC_PASSWORD", + PASSWORD ); runContainer(distribution(), builder().volumes(volumes).envVars(envVars)); - waitForElasticsearch(installation); - ServerUtils.runElasticsearchTests(); + waitForElasticsearch(installation, USERNAME, PASSWORD); + ServerUtils.runElasticsearchTests(USERNAME, PASSWORD); } finally { if (tempDir != null) { rm(tempDir); @@ -319,15 +329,15 @@ public void test61DockerEnvironmentVariablePasswordFromFile() throws Exception { * Check that if we provide the wrong password for a mounted and password-protected * keystore, Elasticsearch doesn't start. */ + @AwaitsFix(bugUrl = "Keystore fails to save with resource busy") public void test62DockerEnvironmentVariableBadPassword() throws Exception { assumeTrue(distribution().isDocker()); String password = "keystore-password"; - Path dockerKeystore = installation.config("elasticsearch.keystore"); - Path localKeystoreFile = getKeystoreFileFromDockerContainer(password, dockerKeystore); + Path localConfigPath = getMountedLocalConfDirWithKeystore(password, installation.config); - // restart ES with password and mounted keystore - Map volumes = Map.of(localKeystoreFile, dockerKeystore); + // restart ES with password and mounted config dir containing password protected keystore + Map volumes = Map.of(localConfigPath.resolve("config"), installation.config); Map envVars = Map.of("KEYSTORE_PASSWORD", "wrong"); Shell.Result r = runContainerExpectingFailure(distribution(), builder().volumes(volumes).envVars(envVars)); assertThat(r.stderr, containsString(ERROR_INCORRECT_PASSWORD)); @@ -340,7 +350,7 @@ public void test62DockerEnvironmentVariableBadPassword() throws Exception { * the keystore, and then returns the path of the file that appears in the * mounted directory (now accessible from the local filesystem). */ - private Path getKeystoreFileFromDockerContainer(String password, Path dockerKeystore) throws IOException { + private Path getMountedLocalConfDirWithKeystore(String password, Path dockerKeystore) throws IOException { // Mount a temporary directory for copying the keystore Path dockerTemp = Path.of("/usr/tmp/keystore-tmp"); Path tempDirectory = createTempDir(KeystoreManagementTests.class.getSimpleName()); @@ -373,8 +383,8 @@ private Path getKeystoreFileFromDockerContainer(String password, Path dockerKeys sh.run("bash " + dockerTemp.resolve("set-pass.sh")); // copy keystore to temp file to make it available to docker host - sh.run("cp " + dockerKeystore + " " + dockerTemp); - return tempDirectory.resolve("elasticsearch.keystore"); + sh.run("cp -arf" + dockerKeystore + " " + dockerTemp); + return tempDirectory; } /** Create a keystore. Provide a password to password-protect it, otherwise use null */ @@ -383,12 +393,6 @@ private void createKeystore(String password) throws Exception { final Installation.Executables bin = installation.executables(); bin.keystoreTool.run("create"); - // this is a hack around the fact that we can't run a command in the same session as the same user but not as administrator. - // the keystore ends up being owned by the Administrators group, so we manually set it to be owned by the vagrant user here. - // from the server's perspective the permissions aren't really different, this is just to reflect what we'd expect in the tests. - // when we run these commands as a role user we won't have to do this - Platforms.onWindows(() -> sh.chown(keystore)); - if (distribution().isDocker()) { try { waitForPathToExist(keystore); @@ -400,6 +404,12 @@ private void createKeystore(String password) throws Exception { if (password != null) { setKeystorePassword(password); } + + // this is a hack around the fact that we can't run a command in the same session as the same user but not as administrator. + // the keystore ends up being owned by the Administrators group, so we manually set it to be owned by the vagrant user here. + // from the server's perspective the permissions aren't really different, this is just to reflect what we'd expect in the tests. + // when we run these commands as a role user we won't have to do this + Platforms.onWindows(() -> sh.chown(keystore)); } private void rmKeystoreIfExists() { @@ -450,7 +460,7 @@ private void verifyKeystorePermissions() { case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - assertPermissionsAndOwnership(keystore, p660); + assertPermissionsAndOwnership(keystore, "elasticsearch", "root", p660); break; default: throw new IllegalStateException("Unknown Elasticsearch packaging type."); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java index 7ec651d19d37a..94f86aedcea79 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageUpgradeTests.java @@ -83,16 +83,19 @@ public void test20InstallUpgradedVersion() throws Exception { verifyPackageInstallation(installation, distribution, sh); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/76283") public void test21CheckUpgradedVersion() throws Exception { assertWhileRunning(() -> { assertDocsExist(); }); } private void assertDocsExist() throws Exception { - String response1 = makeRequest(Request.Get("http://localhost:9200/library/_doc/1?pretty")); + // We can properly handle this as part of https://github.com/elastic/elasticsearch/issues/75940 + // For now we can use elastic with "keystore.seed" as we set it explicitly in PackageUpgradeTests#test11ModifyKeystore + String response1 = makeRequest(Request.Get("http://localhost:9200/library/_doc/1?pretty"), "elastic", "keystore_seed", null); assertThat(response1, containsString("Elasticsearch")); - String response2 = makeRequest(Request.Get("http://localhost:9200/library/_doc/2?pretty")); + String response2 = makeRequest(Request.Get("http://localhost:9200/library/_doc/2?pretty"), "elastic", "keystore_seed", null); assertThat(response2, containsString("World")); - String response3 = makeRequest(Request.Get("http://localhost:9200/library2/_doc/1?pretty")); + String response3 = makeRequest(Request.Get("http://localhost:9200/library2/_doc/1?pretty"), "elastic", "keystore_seed", null); assertThat(response3, containsString("Darkness")); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java index c78af7882bbb5..e5cb19ddb6196 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PasswordToolsTests.java @@ -17,9 +17,7 @@ import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardOpenOption; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -41,15 +39,13 @@ public void filterDistros() { public void test010Install() throws Exception { install(); - Files.write( - installation.config("elasticsearch.yml"), - List.of("xpack.license.self_generated.type: trial", "xpack.security.enabled: true"), - StandardOpenOption.APPEND - ); + // Enable security for this test only where it is necessary, until we can enable it for all + ServerUtils.enableSecurityFeatures(installation); } public void test20GeneratePasswords() throws Exception { assertWhileRunning(() -> { + ServerUtils.waitForElasticsearch(installation); Shell.Result result = installation.executables().setupPasswordsTool.run("auto --batch", null); Map userpasses = parseUsersAndPasswords(result.stdout); for (Map.Entry userpass : userpasses.entrySet()) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/QuotaAwareFsTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/QuotaAwareFsTests.java deleted file mode 100644 index a23853407ded0..0000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/QuotaAwareFsTests.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.packaging.test; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; - -import org.apache.http.client.fluent.Request; -import org.elasticsearch.core.CheckedRunnable; -import org.elasticsearch.packaging.util.ServerUtils; -import org.elasticsearch.packaging.util.Shell; -import org.junit.After; -import org.junit.BeforeClass; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.Locale; -import java.util.stream.Collectors; - -import static org.elasticsearch.packaging.util.Distribution.Platform.WINDOWS; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.not; -import static org.junit.Assume.assumeFalse; -import static org.junit.Assume.assumeTrue; - -/** - * Check that the quota-aware filesystem plugin can be installed, and that it operates as expected. - */ -public class QuotaAwareFsTests extends PackagingTestCase { - - // private static final String QUOTA_AWARE_FS_PLUGIN_NAME = "quota-aware-fs"; - private static final Path QUOTA_AWARE_FS_PLUGIN; - static { - // Re-read before each test so the plugin path can be manipulated within tests. - // Corresponds to DistroTestPlugin#QUOTA_AWARE_FS_PLUGIN_SYSPROP - QUOTA_AWARE_FS_PLUGIN = Paths.get(System.getProperty("tests.quota-aware-fs-plugin")); - } - - @BeforeClass - public static void filterDistros() { - assumeTrue("only archives", distribution.isArchive()); - assumeFalse("not on windows", distribution.platform == WINDOWS); - } - - @After - public void teardown() throws Exception { - super.teardown(); - cleanup(); - } - - /** - * Check that when the plugin is installed but the system property for passing the location of the related - * properties file is omitted, then Elasticsearch exits with the expected error message. - */ - public void test10ElasticsearchRequiresSystemPropertyToBeSet() throws Exception { - install(); - - installation.executables().pluginTool.run("install --batch \"" + QUOTA_AWARE_FS_PLUGIN.toUri() + "\""); - - // Without setting the `es.fs.quota.file` property, ES should exit with a failure code. - final Shell.Result result = runElasticsearchStartCommand(null, false, false); - - assertThat("Elasticsearch should have terminated unsuccessfully", result.isSuccess(), equalTo(false)); - assertThat( - result.stderr, - containsString("Property es.fs.quota.file must be set to a URI in order to use the quota filesystem provider") - ); - } - - /** - * Check that when the plugin is installed but the system property for passing the location of the related - * properties file contains a non-existent URI, then Elasticsearch exits with the expected error message. - */ - public void test20ElasticsearchRejectsNonExistentPropertiesLocation() throws Exception { - install(); - - installation.executables().pluginTool.run("install --batch \"" + QUOTA_AWARE_FS_PLUGIN.toUri() + "\""); - - sh.getEnv().put("ES_JAVA_OPTS", "-Des.fs.quota.file=file:///this/does/not/exist.properties"); - - final Shell.Result result = runElasticsearchStartCommand(null, false, false); - - // Generate a Path for this location so that the platform-specific line-endings will be used. - final String platformPath = Path.of("/this/does/not/exist.properties").toString(); - - assertThat("Elasticsearch should have terminated unsuccessfully", result.isSuccess(), equalTo(false)); - assertThat(result.stderr, containsString("NoSuchFileException: " + platformPath)); - } - - /** - * Check that Elasticsearch can load the plugin and apply the quota limits in the properties file. Also check that - * Elasticsearch polls the file for changes. - */ - public void test30ElasticsearchStartsWhenSystemPropertySet() throws Exception { - install(); - - int total = 20 * 1024 * 1024; - int available = 10 * 1024 * 1024; - - installation.executables().pluginTool.run("install --batch \"" + QUOTA_AWARE_FS_PLUGIN.toUri() + "\""); - - final Path quotaPath = getRootTempDir().resolve("quota.properties"); - Files.writeString(quotaPath, String.format(Locale.ROOT, "total=%d\nremaining=%d\n", total, available)); - - sh.getEnv().put("ES_JAVA_OPTS", "-Des.fs.quota.file=" + quotaPath.toUri()); - - startElasticsearchAndThen(() -> { - final Totals actualTotals = fetchFilesystemTotals(); - - assertThat(actualTotals.totalInBytes, equalTo(total)); - assertThat(actualTotals.availableInBytes, equalTo(available)); - - int updatedTotal = total * 3; - int updatedAvailable = available * 3; - - // Check that ES is polling the properties file for changes by modifying the properties file - // and waiting for ES to pick up the changes. - Files.writeString(quotaPath, String.format(Locale.ROOT, "total=%d\nremaining=%d\n", updatedTotal, updatedAvailable)); - - // The check interval is 1000ms, but give ourselves some leeway. - Thread.sleep(2000); - - final Totals updatedActualTotals = fetchFilesystemTotals(); - - assertThat(updatedActualTotals.totalInBytes, equalTo(updatedTotal)); - assertThat(updatedActualTotals.availableInBytes, equalTo(updatedAvailable)); - }); - } - - /** - * Check that the _cat API can list the plugin correctly. - */ - public void test40CatApiFiltersPlugin() throws Exception { - install(); - - int total = 20 * 1024 * 1024; - int available = 10 * 1024 * 1024; - - installation.executables().pluginTool.run("install --batch \"" + QUOTA_AWARE_FS_PLUGIN.toUri() + "\""); - - final Path quotaPath = getRootTempDir().resolve("quota.properties"); - Files.writeString(quotaPath, String.format(Locale.ROOT, "total=%d\nremaining=%d\n", total, available)); - - sh.getEnv().put("ES_JAVA_OPTS", "-Des.fs.quota.file=" + quotaPath.toUri()); - - startElasticsearchAndThen(() -> { - final String uri = "http://localhost:9200/_cat/plugins?include_bootstrap=true&h=component,type"; - String response = ServerUtils.makeRequest(Request.Get(uri)).trim(); - assertThat(response, not(emptyString())); - - List lines = response.lines().collect(Collectors.toList()); - assertThat(lines, hasSize(1)); - - final String[] fields = lines.get(0).split(" "); - assertThat(fields, arrayContaining("quota-aware-fs", "bootstrap")); - }); - } - - private void startElasticsearchAndThen(CheckedRunnable runnable) throws Exception { - boolean started = false; - try { - startElasticsearch(); - started = true; - - runnable.run(); - } finally { - if (started) { - stopElasticsearch(); - } - } - } - - private static class Totals { - int totalInBytes; - int availableInBytes; - - Totals(int totalInBytes, int availableInBytes) { - this.totalInBytes = totalInBytes; - this.availableInBytes = availableInBytes; - } - } - - private Totals fetchFilesystemTotals() { - try { - final String response = ServerUtils.makeRequest(Request.Get("http://localhost:9200/_nodes/stats")); - - final ObjectMapper mapper = new ObjectMapper(); - final JsonNode rootNode = mapper.readTree(response); - - assertThat("Some nodes failed", rootNode.at("/_nodes/failed").intValue(), equalTo(0)); - - final String nodeId = rootNode.get("nodes").fieldNames().next(); - - final JsonNode fsNode = rootNode.at("/nodes/" + nodeId + "/fs/total"); - - return new Totals(fsNode.get("total_in_bytes").intValue(), fsNode.get("available_in_bytes").intValue()); - } catch (Exception e) { - throw new RuntimeException("Failed to fetch filesystem totals: " + e.getMessage(), e); - } - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java index 516bdbaacfab1..99252690509bf 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java @@ -107,6 +107,9 @@ public static Installation installArchive(Shell sh, Distribution distribution, P Installation installation = Installation.ofArchive(sh, distribution, fullInstallPath); ServerUtils.disableGeoIpDownloader(installation); + // TODO: Adjust all tests so that they can run with security on, which is the default behavior + // https://github.com/elastic/elasticsearch/issues/75940 + ServerUtils.possiblyDisableSecurityFeatures(installation); return installation; } @@ -197,6 +200,7 @@ private static void verifyDefaultInstallation(Installation es, Distribution dist "elasticsearch-certutil", "elasticsearch-croneval", "elasticsearch-saml-metadata", + "elasticsearch-security-config", "elasticsearch-setup-passwords", "elasticsearch-sql-cli", "elasticsearch-syskeygen", diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java index 5b9333cabb461..ba700a95303e0 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java @@ -30,12 +30,11 @@ import static java.nio.file.attribute.PosixFilePermissions.fromString; import static org.elasticsearch.packaging.util.DockerRun.getImageName; +import static org.elasticsearch.packaging.util.FileMatcher.p444; import static org.elasticsearch.packaging.util.FileMatcher.p555; -import static org.elasticsearch.packaging.util.FileMatcher.p644; import static org.elasticsearch.packaging.util.FileMatcher.p664; import static org.elasticsearch.packaging.util.FileMatcher.p770; import static org.elasticsearch.packaging.util.FileMatcher.p775; -import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; @@ -189,7 +188,7 @@ private static void waitForElasticsearchToExit() { } catch (Exception e) { logger.warn("Caught exception while waiting for ES to exit", e); } - } while (attempt++ < 5); + } while (attempt++ < 8); if (isElasticsearchRunning) { final Shell.Result dockerLogs = getContainerLogs(); @@ -214,7 +213,7 @@ public static void removeContainer() { // I'm not sure why we're already removing this container, but that's OK. if (isErrorAcceptable == false) { - throw new RuntimeException("Command was not successful: [" + command + "] result: " + result.toString()); + throw new RuntimeException("Command was not successful: [" + command + "] result: " + result); } } } finally { @@ -248,8 +247,6 @@ protected String[] getScriptCommand(String script) { List cmd = new ArrayList<>(); cmd.add("docker"); cmd.add("exec"); - cmd.add("--user"); - cmd.add("elasticsearch:root"); cmd.add("--tty"); env.forEach((key, value) -> cmd.add("--env " + key + "=\"" + value + "\"")); @@ -402,25 +399,45 @@ public static void chownWithPrivilegeEscalation(Path localPath, String ownership /** * Checks that the specified path's permissions and ownership match those specified. - * @param path the path to check + *

+ * The implementation supports multiple files being matched by the path, via bash expansion, although + * it is expected that only the final part of the path will contain expansions. + * + * @param path the path to check, possibly with e.g. a wildcard (*) + * @param expectedUser the file's expected user + * @param expectedGroup the file's expected group * @param expectedPermissions the unix permissions that the path ought to have */ - public static void assertPermissionsAndOwnership(Path path, Set expectedPermissions) { + public static void assertPermissionsAndOwnership( + Path path, + String expectedUser, + String expectedGroup, + Set expectedPermissions + ) { logger.debug("Checking permissions and ownership of [" + path + "]"); - final String[] components = dockerShell.run("stat -c \"%U %G %A\" " + path).stdout.split("\\s+"); + final Shell.Result result = dockerShell.run("bash -c 'stat -c \"%n %U %G %A\" " + path + "'"); + + final Path parent = path.getParent(); + + result.stdout.lines().forEach(line -> { + final String[] components = line.split("\\s+"); + + final String filename = components[0]; + final String username = components[1]; + final String group = components[2]; + final String permissions = components[3]; - final String username = components[0]; - final String group = components[1]; - final String permissions = components[2]; + // The final substring() is because we don't check the directory bit, and we + // also don't want any SELinux security context indicator. + Set actualPermissions = fromString(permissions.substring(1, 10)); - // The final substring() is because we don't check the directory bit, and we - // also don't want any SELinux security context indicator. - Set actualPermissions = fromString(permissions.substring(1, 10)); + String fullPath = filename.startsWith("/") ? filename : parent + "/" + filename; - assertEquals("Permissions of " + path + " are wrong", expectedPermissions, actualPermissions); - assertThat("File owner of " + path + " is wrong", username, equalTo("elasticsearch")); - assertThat("File group of " + path + " is wrong", group, equalTo("root")); + assertEquals("Permissions of " + fullPath + " are wrong", expectedPermissions, actualPermissions); + assertThat("File owner of " + fullPath + " is wrong", username, equalTo(expectedUser)); + assertThat("File group of " + fullPath + " is wrong", group, equalTo(expectedGroup)); + }); } /** @@ -442,42 +459,39 @@ public static void waitForPathToExist(Path path) throws InterruptedException { } /** - * Perform a variety of checks on an installation. If the current distribution is not OSS, additional checks are carried out. - * @param installation the installation to verify + * Perform a variety of checks on an installation. + * @param es the installation to verify */ - public static void verifyContainerInstallation(Installation installation) { - verifyOssInstallation(installation); - verifyDefaultInstallation(installation); - } - - private static void verifyOssInstallation(Installation es) { + public static void verifyContainerInstallation(Installation es) { + // Ensure the `elasticsearch` user and group exist. + // These lines will both throw an exception if the command fails dockerShell.run("id elasticsearch"); dockerShell.run("getent group elasticsearch"); final Shell.Result passwdResult = dockerShell.run("getent passwd elasticsearch"); final String homeDir = passwdResult.stdout.trim().split(":")[5]; - assertThat(homeDir, equalTo("/usr/share/elasticsearch")); + assertThat("elasticsearch user's home directory is incorrect", homeDir, equalTo("/usr/share/elasticsearch")); - Stream.of(es.home, es.data, es.logs, es.config, es.plugins).forEach(dir -> assertPermissionsAndOwnership(dir, p775)); + assertPermissionsAndOwnership(es.home, "root", "root", p775); - Stream.of(es.bin, es.bundledJdk, es.lib, es.modules).forEach(dir -> assertPermissionsAndOwnership(dir, p555)); + Stream.of(es.bundledJdk, es.lib, es.modules).forEach(dir -> assertPermissionsAndOwnership(dir, "root", "root", p555)); - Stream.of("elasticsearch.yml", "jvm.options", "log4j2.properties") - .forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p664)); + // You can't install plugins that include configuration when running as `elasticsearch` and the `config` + // dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's + // config directory. + Stream.of(es.bin, es.config, es.logs, es.config.resolve("jvm.options.d"), es.data, es.plugins) + .forEach(dir -> assertPermissionsAndOwnership(dir, "elasticsearch", "root", p775)); - assertThat(dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed")); + Stream.of(es.bin, es.bundledJdk.resolve("bin"), es.modules.resolve("x-pack-ml/platform/linux-*/bin")) + .forEach(binariesPath -> assertPermissionsAndOwnership(binariesPath.resolve("*"), "root", "root", p555)); + + Stream.of("elasticsearch.yml", "jvm.options", "log4j2.properties", "role_mapping.yml", "roles.yml", "users", "users_roles") + .forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), "root", "root", p664)); - Stream.of( - "elasticsearch", - "elasticsearch-cli", - "elasticsearch-env", - "elasticsearch-keystore", - "elasticsearch-node", - "elasticsearch-plugin", - "elasticsearch-shard" - ).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p555)); + Stream.of("LICENSE.txt", "NOTICE.txt", "README.asciidoc") + .forEach(doc -> assertPermissionsAndOwnership(es.home.resolve(doc), "root", "root", p444)); - Stream.of("LICENSE.txt", "NOTICE.txt", "README.asciidoc").forEach(doc -> assertPermissionsAndOwnership(es.home.resolve(doc), p644)); + assertThat(dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed")); // nc is useful for checking network issues // zip/unzip are installed to help users who are working with certificates. @@ -490,37 +504,6 @@ private static void verifyOssInstallation(Installation es) { ); } - private static void verifyDefaultInstallation(Installation es) { - Stream.of( - "elasticsearch-certgen", - "elasticsearch-certutil", - "elasticsearch-croneval", - "elasticsearch-saml-metadata", - "elasticsearch-setup-passwords", - "elasticsearch-sql-cli", - "elasticsearch-syskeygen", - "elasticsearch-users", - "elasticsearch-service-tokens", - "x-pack-env", - "x-pack-security-env", - "x-pack-watcher-env" - ).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p555)); - - // at this time we only install the current version of archive distributions, but if that changes we'll need to pass - // the version through here - assertPermissionsAndOwnership(es.bin("elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), p555); - - final String architecture = getArchitecture(); - Stream.of("autodetect", "categorize", "controller", "data_frame_analyzer", "normalize", "pytorch_inference") - .forEach(executableName -> { - final Path executablePath = es.modules.resolve("x-pack-ml/platform/linux-" + architecture + "/bin/" + executableName); - assertPermissionsAndOwnership(executablePath, p555); - }); - - Stream.of("role_mapping.yml", "roles.yml", "users", "users_roles") - .forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p664)); - } - public static void waitForElasticsearch(Installation installation) throws Exception { withLogging(() -> ServerUtils.waitForElasticsearch(installation)); } @@ -530,6 +513,18 @@ public static void waitForElasticsearch(String status, String index, Installatio withLogging(() -> ServerUtils.waitForElasticsearch(status, index, installation, username, password)); } + public static void waitForElasticsearch(Installation installation, String username, String password) { + try { + waitForElasticsearch("green", null, installation, username, password); + } catch (Exception e) { + throw new AssertionError( + "Failed to check whether Elasticsearch had started. This could be because " + + "authentication isn't working properly. Check the container logs", + e + ); + } + } + /** * Runs the provided closure, and captures logging information if an exception is thrown. * @param r the closure to run @@ -558,7 +553,7 @@ public static String getContainerId() { * @return the parsed response */ public static JsonNode getJson(String path) throws Exception { - path = Objects.requireNonNull(path).trim(); + path = Objects.requireNonNull(path, "path can not be null").trim(); if (path.isEmpty()) { throw new IllegalArgumentException("path must be supplied"); } @@ -572,6 +567,21 @@ public static JsonNode getJson(String path) throws Exception { return mapper.readTree(pluginsResponse); } + public static JsonNode getJson(String path, String user, String password) throws Exception { + path = Objects.requireNonNull(path, "path can not be null").trim(); + if (path.isEmpty()) { + throw new IllegalArgumentException("path must be supplied"); + } + if (path.startsWith("/") == false) { + throw new IllegalArgumentException("path must start with /"); + } + final String pluginsResponse = makeRequest(Request.Get("http://localhost:9200" + path), user, password, null); + + ObjectMapper mapper = new ObjectMapper(); + + return mapper.readTree(pluginsResponse); + } + /** * Fetches all the labels for a Docker image * @param distribution required to derive the image name @@ -624,12 +634,4 @@ public static Shell.Result getContainerLogs() { public static void restartContainer() { sh.run("docker restart " + containerId); } - - private static String getArchitecture() { - String architecture = System.getProperty("os.arch", "x86_64"); - if (architecture.equals("amd64")) { - architecture = "x86_64"; - } - return architecture; - } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java index 6861c9d22724e..8e30258696a5c 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java @@ -37,15 +37,16 @@ public enum Fileness { Directory } + public static final Set p444 = fromString("r--r--r--"); public static final Set p555 = fromString("r-xr-xr-x"); - public static final Set p775 = fromString("rwxrwxr-x"); - public static final Set p770 = fromString("rwxrwx---"); - public static final Set p755 = fromString("rwxr-xr-x"); - public static final Set p750 = fromString("rwxr-x---"); - public static final Set p660 = fromString("rw-rw----"); + public static final Set p600 = fromString("rw-------"); public static final Set p644 = fromString("rw-r--r--"); + public static final Set p660 = fromString("rw-rw----"); public static final Set p664 = fromString("rw-rw-r--"); - public static final Set p600 = fromString("rw-------"); + public static final Set p750 = fromString("rwxr-x---"); + public static final Set p755 = fromString("rwxr-xr-x"); + public static final Set p770 = fromString("rwxrwx---"); + public static final Set p775 = fromString("rwxrwxr-x"); private final Fileness fileness; private final String owner; diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java index e8e6f1061ac0f..06c95c3aab887 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java @@ -185,6 +185,7 @@ public class Executables { public final Executable cronevalTool = new Executable("elasticsearch-croneval"); public final Executable shardTool = new Executable("elasticsearch-shard"); public final Executable nodeTool = new Executable("elasticsearch-node"); + public final Executable securityConfigTool = new Executable("elasticsearch-security-config"); public final Executable setupPasswordsTool = new Executable("elasticsearch-setup-passwords"); public final Executable sqlCli = new Executable("elasticsearch-sql-cli"); public final Executable syskeygenTool = new Executable("elasticsearch-syskeygen"); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 733d46378607b..02f96a99684bf 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -94,6 +94,9 @@ public static Installation installPackage(Shell sh, Distribution distribution) t if (Version.fromString(distribution.baseVersion).onOrAfter(Version.V_7_13_0)) { ServerUtils.disableGeoIpDownloader(installation); } + // https://github.com/elastic/elasticsearch/issues/75940 + // TODO Figure out how to run all packaging tests with security enabled which is now the default behavior + ServerUtils.possiblyDisableSecurityFeatures(installation); return installation; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java index 43b8c3f961f9d..453ace7ea39d7 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -49,6 +50,7 @@ import static java.nio.file.StandardOpenOption.APPEND; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static org.elasticsearch.packaging.util.Docker.sh; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; @@ -56,8 +58,7 @@ public class ServerUtils { private static final Logger logger = LogManager.getLogger(ServerUtils.class); - private static String SECURITY_ENABLED = "xpack.security.enabled: true"; - private static String SSL_ENABLED = "xpack.security.http.ssl.enabled: true"; + private static String SECURITY_DISABLED = "xpack.security.enabled: false"; // generous timeout as nested virtualization can be quite slow ... private static final long waitTime = TimeUnit.MINUTES.toMillis(3); @@ -65,17 +66,25 @@ public class ServerUtils { private static final long requestInterval = TimeUnit.SECONDS.toMillis(5); public static void waitForElasticsearch(Installation installation) throws Exception { - boolean xpackEnabled = false; + boolean securityEnabled; - // TODO: need a way to check if docker has security enabled, the yml config is not bind mounted so can't look from here if (installation.distribution.isDocker() == false) { Path configFilePath = installation.config("elasticsearch.yml"); // this is fragile, but currently doesn't deviate from a single line enablement and not worth the parsing effort String configFile = Files.readString(configFilePath, StandardCharsets.UTF_8); - xpackEnabled = configFile.contains(SECURITY_ENABLED) || configFile.contains(SSL_ENABLED); + securityEnabled = configFile.contains(SECURITY_DISABLED) == false; + } else { + final Optional commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines() + .filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch")) + .findFirst(); + if (commandLine.isPresent() == false) { + throw new RuntimeException("Installation distribution is docker but a docker container is not running"); + } + // security is enabled by default, the only way for it to be disabled is to be explicitly disabled + securityEnabled = commandLine.get().contains("-Expack.security.enabled=false") == false; } - if (xpackEnabled) { + if (securityEnabled) { // with security enabled, we may or may not have setup a user/pass, so we use a more generic port being available check. // this isn't as good as a health check, but long term all this waiting should go away when node startup does not // make the http port available until the system is really ready to serve requests @@ -240,6 +249,29 @@ public static void runElasticsearchTests() throws Exception { makeRequest(Request.Delete("http://localhost:9200/library")); } + public static void runElasticsearchTests(String username, String password) throws Exception { + makeRequest( + Request.Post("http://localhost:9200/library/_doc/1?refresh=true&pretty") + .bodyString("{ \"title\": \"Book #1\", \"pages\": 123 }", ContentType.APPLICATION_JSON), + username, + password, + null + ); + + makeRequest( + Request.Post("http://localhost:9200/library/_doc/2?refresh=true&pretty") + .bodyString("{ \"title\": \"Book #2\", \"pages\": 456 }", ContentType.APPLICATION_JSON), + username, + password, + null + ); + + String count = makeRequest(Request.Get("http://localhost:9200/_count?pretty"), username, password, null); + assertThat(count, containsString("\"count\" : 2")); + + makeRequest(Request.Delete("http://localhost:9200/library"), username, password, null); + } + public static String makeRequest(Request request) throws Exception { return makeRequest(request, null, null, null); } @@ -255,6 +287,11 @@ public static String makeRequest(Request request, String username, String passwo return body; } + public static int makeRequestAndGetStatus(Request request, String username, String password, Path caCert) throws Exception { + final HttpResponse response = execute(request, username, password, caCert); + return response.getStatusLine().getStatusCode(); + } + public static void disableGeoIpDownloader(Installation installation) throws IOException { List yaml = Collections.singletonList("ingest.geoip.downloader.enabled: false"); Path yml = installation.config("elasticsearch.yml"); @@ -273,4 +310,27 @@ public static void enableGeoIpDownloader(Installation installation) throws IOExc } Files.write(yml, lines, TRUNCATE_EXISTING); } + + /** + * Explicitly disables security if the existing configuration didn't already have an explicit value for the + * xpack.security.enabled setting + */ + public static void possiblyDisableSecurityFeatures(Installation installation) throws IOException { + List configLines = Collections.singletonList("xpack.security.enabled: false"); + Path yamlFile = installation.config("elasticsearch.yml"); + try (Stream lines = Files.readAllLines(yamlFile).stream()) { + if (lines.noneMatch(s -> s.startsWith("xpack.security.enabled"))) { + Files.write(yamlFile, configLines, CREATE, APPEND); + } + } + } + + public static void enableSecurityFeatures(Installation installation) throws IOException { + Path yml = installation.config("elasticsearch.yml"); + List lines; + try (Stream allLines = Files.readAllLines(yml).stream()) { + lines = allLines.filter(s -> s.startsWith("xpack.security.enabled") == false).collect(Collectors.toList()); + } + Files.write(yml, lines, TRUNCATE_EXISTING); + } } diff --git a/qa/remote-clusters/build.gradle b/qa/remote-clusters/build.gradle index f831108971318..0d6d629940546 100644 --- a/qa/remote-clusters/build.gradle +++ b/qa/remote-clusters/build.gradle @@ -65,7 +65,7 @@ tasks.named("preProcessFixture").configure { dockerCompose { tcpPortsToIgnoreWhenWaiting = [9600, 9601] - if ('default'.equalsIgnoreCase(System.getProperty('tests.distribution', 'default'))) { + if ('default'.equalsIgnoreCase(providers.systemProperty('tests.distribution').orElse('default').forUseAtConfigurationTime().get())) { useComposeFiles = ['docker-compose.yml'] } else { useComposeFiles = ['docker-compose-oss.yml'] diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 6d0c8bc548903..e947453a1e578 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index da9122552e227..4ef7d702191a7 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -261,13 +261,7 @@ private List> listSnapshots(String repoName) throws IOExcept new Request("GET", "/_snapshot/" + repoName + "/_all")).getEntity().getContent(); XContentParser parser = JsonXContent.jsonXContent.createParser( xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entity)) { - final Map raw = parser.map(); - // Bwc lookup since the format of the snapshots response changed between versions - if (raw.containsKey("snapshots")) { - return (List>) raw.get("snapshots"); - } else { - return (List>) ((List>) raw.get("responses")).get(0).get("snapshots"); - } + return (List>) parser.map().get("snapshots"); } } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 1e983b6f7a9ad..4a7f9d904d71a 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' @@ -42,6 +42,8 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } } + String oldVersion = bwcVersion.toString() + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn "processTestResources" useCluster testClusters."${baseName}" @@ -50,6 +52,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { delete("${buildDir}/cluster/shared/repo/${baseName}") } systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } @@ -61,6 +64,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { testClusters."${baseName}".nextNodeToNextVersion() } systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'true' nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") @@ -73,6 +77,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { testClusters."${baseName}".nextNodeToNextVersion() } systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'false' nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") @@ -85,6 +90,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } useCluster testClusters."${baseName}" systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 9e9081f54028b..26de71b9ba629 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.upgrades; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; @@ -24,14 +25,15 @@ public static ClusterType parse(String value) { return MIXED; case "upgraded_cluster": return UPGRADED; - default: - throw new AssertionError("unknown cluster type: " + value); + default: + throw new AssertionError("unknown cluster type: " + value); } } } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); - protected static final boolean firstMixedRound = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); + protected static final boolean FIRST_MIXED_ROUND = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); + protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); @Override protected final boolean preserveIndicesUponCompletion() { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index a8f04931ea34e..41996f2b9e434 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -271,7 +271,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } if (randomBoolean()) { - syncedFlush(index); + flush(index, randomBoolean()); } } @@ -309,7 +309,7 @@ public void testRecovery() throws Exception { } } if (randomBoolean()) { - syncedFlush(index); + flush(index, randomBoolean()); } ensureGreen(index); } @@ -584,7 +584,7 @@ public void testUpdateDoc() throws Exception { assertThat(XContentMapValues.extractValue("_source.updated_field", doc), equalTo(updates.get(docId))); } if (randomBoolean()) { - syncedFlush(index); + flush(index, randomBoolean()); } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java new file mode 100644 index 0000000000000..820efd4a03287 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; + +public class SnapshotBasedRecoveryIT extends AbstractRollingTestCase { + public void testSnapshotBasedRecovery() throws Exception { + final String indexName = "snapshot_based_recovery"; + final String repositoryName = "snapshot_based_recovery_repo"; + final int numDocs = 200; + switch (CLUSTER_TYPE) { + case OLD: + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + createIndex(indexName, settings.build()); + ensureGreen(indexName); + indexDocs(indexName, numDocs); + flush(indexName, true); + + registerRepository( + repositoryName, + "fs", + true, + Settings.builder() + .put("location", "./snapshot_based_recovery") + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) + .build() + ); + + createSnapshot(repositoryName, "snap", true); + + updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); + ensureGreen(indexName); + break; + case MIXED: + case UPGRADED: + // Drop replicas + updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)); + ensureGreen(indexName); + + updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); + ensureGreen(indexName); + assertMatchAllReturnsAllDocuments(indexName, numDocs); + assertMatchQueryReturnsAllDocuments(indexName, numDocs); + break; + default: + throw new IllegalStateException("unknown type " + CLUSTER_TYPE); + } + } + + private void assertMatchAllReturnsAllDocuments(String indexName, int numDocs) throws IOException { + Map searchResults = search(indexName, QueryBuilders.matchAllQuery()); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + List> hits = extractValue(searchResults, "hits.hits"); + for (Map hit : hits) { + String docId = extractValue(hit, "_id"); + assertThat(Integer.parseInt(docId), allOf(greaterThanOrEqualTo(0), lessThan(numDocs))); + assertThat(extractValue(hit, "_source.field"), equalTo(Integer.parseInt(docId))); + assertThat(extractValue(hit, "_source.text"), equalTo("Some text " + docId)); + } + } + + private void assertMatchQueryReturnsAllDocuments(String indexName, int numDocs) throws IOException { + Map searchResults = search(indexName, QueryBuilders.matchQuery("text", "some")); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + } + + private static Map search(String index, QueryBuilder query) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, '/' + index + "/_search"); + request.setJsonEntity(new SearchSourceBuilder().trackTotalHits(true).query(query).toString()); + + final Response response = client().performRequest(request); + assertOK(response); + + final Map responseAsMap = responseAsMap(response); + assertThat( + extractValue(responseAsMap, "_shards.failed"), + equalTo(0) + ); + return responseAsMap; + } + + private void indexDocs(String indexName, int numDocs) throws IOException { + final StringBuilder bulkBody = new StringBuilder(); + for (int i = 0; i < numDocs; i++) { + bulkBody.append("{\"index\":{\"_id\":\"").append(i).append("\"}}\n"); + bulkBody.append("{\"field\":").append(i).append(",\"text\":\"Some text ").append(i).append("\"}\n"); + } + + final Request documents = new Request(HttpPost.METHOD_NAME, '/' + indexName + "/_bulk"); + documents.addParameter("refresh", "true"); + documents.setJsonEntity(bulkBody.toString()); + assertOK(client().performRequest(documents)); + } + + @SuppressWarnings("unchecked") + private static T extractValue(Map map, String path) { + return (T) XContentMapValues.extractValue(path, map); + } +} diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index f08700b9c24d6..b0acfcb0f9a46 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -20,11 +20,3 @@ dependencies { testClusters.all { setting 'xpack.security.enabled', 'false' } - -tasks.named("integTest").configure { - /* - * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each - * other if we allow them to set the number of available processors as it's set-once in Netty. - */ - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/AbstractSnapshotRestTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/AbstractSnapshotRestTestCase.java index ef993ca55f3bb..6e391a199a9b4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/AbstractSnapshotRestTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/AbstractSnapshotRestTestCase.java @@ -8,6 +8,7 @@ package org.elasticsearch.http.snapshots; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.http.HttpSmokeTestCase; import org.elasticsearch.plugins.Plugin; @@ -19,6 +20,13 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public abstract class AbstractSnapshotRestTestCase extends HttpSmokeTestCase { + /** + * We use single threaded metadata fetching in some tests to make sure that once the snapshot meta thread is stuck on a blocked repo, + * no other snapshot meta thread can concurrently finish a request/task + */ + protected static final Settings SINGLE_THREADED_SNAPSHOT_META_SETTINGS = + Settings.builder().put("thread_pool.snapshot_meta.core", 1).put("thread_pool.snapshot_meta.max", 1).build(); + @Override protected Collection> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), MockRepository.Plugin.class); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestCatSnapshotsIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestCatSnapshotsIT.java new file mode 100644 index 0000000000000..681aa3a9e7740 --- /dev/null +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestCatSnapshotsIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.http.snapshots; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.hamcrest.Matchers; + +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.util.List; + +public class RestCatSnapshotsIT extends AbstractSnapshotRestTestCase { + + public void testCatSnapshotsDefaultsToAllRepositories() throws Exception { + final String repoName1 = "test-repo-1"; + final String repoName2 = "test-repo-2"; + AbstractSnapshotIntegTestCase.createRepository(logger, repoName1, "fs"); + AbstractSnapshotIntegTestCase.createRepository(logger, repoName2, "fs"); + final int snapshotsRepo1 = randomIntBetween(1, 20); + AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName1, snapshotsRepo1); + final int snapshotsRepo2 = randomIntBetween(1, 20); + AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName2, snapshotsRepo2); + final Response response = getRestClient().performRequest(new Request(HttpGet.METHOD_NAME, "/_cat/snapshots")); + assertEquals(HttpURLConnection.HTTP_OK, response.getStatusLine().getStatusCode()); + final List allLines; + try (InputStream in = response.getEntity().getContent()) { + allLines = Streams.readAllLines(in); + } + assertThat(allLines, Matchers.hasSize(snapshotsRepo1 + snapshotsRepo2)); + assertEquals(allLines.stream().filter(l -> l.contains(repoName1)).count(), snapshotsRepo1); + assertEquals(allLines.stream().filter(l -> l.contains(repoName2)).count(), snapshotsRepo2); + } +} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsCancellationIT.java index cbd27b8f8037c..0308c02b60f69 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsCancellationIT.java @@ -29,7 +29,7 @@ public class RestGetSnapshotsCancellationIT extends AbstractSnapshotRestTestCase { public void testGetSnapshotsCancellation() throws Exception { - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(SINGLE_THREADED_SNAPSHOT_META_SETTINGS); internalCluster().startDataOnlyNode(); ensureStableCluster(2); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 72514beed8504..70da8e782b887 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -64,8 +64,7 @@ public void testSortOrder() throws Exception { } private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) throws IOException { - final List defaultSorting = - clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(repoName); + final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); assertSnapshotListSorted( allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.NAME, order), @@ -106,24 +105,34 @@ private void doTestPagination(String repoName, GetSnapshotsRequest.SortBy sort, SortOrder order) throws IOException { final List allSnapshotsSorted = allSnapshotsSorted(names, repoName, sort, order); - final List batch1 = sortedWithLimit(repoName, sort, 2, order); - assertEquals(batch1, allSnapshotsSorted.subList(0, 2)); - final List batch2 = sortedWithLimit(repoName, sort, batch1.get(1), 2, order); - assertEquals(batch2, allSnapshotsSorted.subList(2, 4)); - final int lastBatch = names.size() - batch1.size() - batch2.size(); - final List batch3 = sortedWithLimit(repoName, sort, batch2.get(1), lastBatch, order); - assertEquals(batch3, allSnapshotsSorted.subList(batch1.size() + batch2.size(), names.size())); - final List batch3NoLimit = - sortedWithLimit(repoName, sort, batch2.get(1), GetSnapshotsRequest.NO_LIMIT, order); - assertEquals(batch3, batch3NoLimit); - final List batch3LargeLimit = sortedWithLimit( + final GetSnapshotsResponse batch1 = sortedWithLimit(repoName, sort, null, 2, order); + assertEquals(allSnapshotsSorted.subList(0, 2), batch1.getSnapshots()); + final GetSnapshotsResponse batch2 = sortedWithLimit(repoName, sort, batch1.next(), 2, order); + assertEquals(allSnapshotsSorted.subList(2, 4), batch2.getSnapshots()); + final int lastBatch = names.size() - batch1.getSnapshots().size() - batch2.getSnapshots().size(); + final GetSnapshotsResponse batch3 = sortedWithLimit(repoName, sort, batch2.next(), lastBatch, order); + assertEquals( + batch3.getSnapshots(), + allSnapshotsSorted.subList(batch1.getSnapshots().size() + batch2.getSnapshots().size(), names.size()) + ); + final GetSnapshotsResponse batch3NoLimit = sortedWithLimit( repoName, sort, - batch2.get(1), + batch2.next(), + GetSnapshotsRequest.NO_LIMIT, + order + ); + assertNull(batch3NoLimit.next()); + assertEquals(batch3.getSnapshots(), batch3NoLimit.getSnapshots()); + final GetSnapshotsResponse batch3LargeLimit = sortedWithLimit( + repoName, + sort, + batch2.next(), lastBatch + randomIntBetween(1, 100), order ); - assertEquals(batch3, batch3LargeLimit); + assertEquals(batch3.getSnapshots(), batch3LargeLimit.getSnapshots()); + assertNull(batch3LargeLimit.next()); } public void testSortAndPaginateWithInProgress() throws Exception { @@ -173,15 +182,23 @@ private static void assertStablePagination(String repoName, final List allSorted = allSnapshotsSorted(allSnapshotNames, repoName, sort, order); for (int i = 1; i <= allSnapshotNames.size(); i++) { - final List subsetSorted = sortedWithLimit(repoName, sort, i, order); + final List subsetSorted = sortedWithLimit(repoName, sort, null, i, order).getSnapshots(); assertEquals(subsetSorted, allSorted.subList(0, i)); } for (int j = 0; j < allSnapshotNames.size(); j++) { final SnapshotInfo after = allSorted.get(j); for (int i = 1; i < allSnapshotNames.size() - j; i++) { - final List subsetSorted = sortedWithLimit(repoName, sort, after, i, order); + final GetSnapshotsResponse getSnapshotsResponse = + sortedWithLimit(repoName, sort, GetSnapshotsRequest.After.from(after, sort).asQueryParam(), i, order); + final GetSnapshotsResponse getSnapshotsResponseNumeric = sortedWithLimit(repoName, sort, j + 1, i, order); + final List subsetSorted = getSnapshotsResponse.getSnapshots(); + assertEquals(subsetSorted, getSnapshotsResponseNumeric.getSnapshots()); assertEquals(subsetSorted, allSorted.subList(j + 1, j + i + 1)); + assertEquals(allSnapshotNames.size(), getSnapshotsResponse.totalCount()); + assertEquals(allSnapshotNames.size() - (j + i + 1), getSnapshotsResponse.remaining()); + assertEquals(getSnapshotsResponseNumeric.totalCount(), getSnapshotsResponse.totalCount()); + assertEquals(getSnapshotsResponseNumeric.remaining(), getSnapshotsResponse.remaining()); } } } @@ -195,9 +212,11 @@ private static List allSnapshotsSorted(Collection allSnaps if (order == SortOrder.DESC || randomBoolean()) { request.addParameter("order", order.toString()); } - final Response response = getRestClient().performRequest(request); - final List snapshotInfos = readSnapshotInfos(repoName, response); + final GetSnapshotsResponse getSnapshotsResponse = readSnapshotInfos(getRestClient().performRequest(request)); + final List snapshotInfos = getSnapshotsResponse.getSnapshots(); assertEquals(snapshotInfos.size(), allSnapshotNames.size()); + assertEquals(getSnapshotsResponse.totalCount(), allSnapshotNames.size()); + assertEquals(0, getSnapshotsResponse.remaining()); for (SnapshotInfo snapshotInfo : snapshotInfos) { assertThat(snapshotInfo.snapshotId().getName(), is(in(allSnapshotNames))); } @@ -208,47 +227,49 @@ private static Request baseGetSnapshotsRequest(String repoName) { return new Request(HttpGet.METHOD_NAME, "/_snapshot/" + repoName + "/*"); } - private static List sortedWithLimit(String repoName, - GetSnapshotsRequest.SortBy sortBy, - int size, - SortOrder order) throws IOException { - final Request request = baseGetSnapshotsRequest(repoName); - request.addParameter("sort", sortBy.toString()); - if (order == SortOrder.DESC || randomBoolean()) { - request.addParameter("order", order.toString()); - } - request.addParameter("size", String.valueOf(size)); - final Response response = getRestClient().performRequest(request); - return readSnapshotInfos(repoName, response); - } - - private static List readSnapshotInfos(String repoName, Response response) throws IOException { - final List snapshotInfos; + private static GetSnapshotsResponse readSnapshotInfos(Response response) throws IOException { try (InputStream input = response.getEntity().getContent(); XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, input)) { - snapshotInfos = GetSnapshotsResponse.fromXContent(parser).getSnapshots(repoName); + return GetSnapshotsResponse.fromXContent(parser); } - return snapshotInfos; } - private static List sortedWithLimit(String repoName, - GetSnapshotsRequest.SortBy sortBy, - SnapshotInfo after, - int size, - SortOrder order) throws IOException { + private static GetSnapshotsResponse sortedWithLimit(String repoName, + GetSnapshotsRequest.SortBy sortBy, + String after, + int size, + SortOrder order) throws IOException { final Request request = baseGetSnapshotsRequest(repoName); request.addParameter("sort", sortBy.toString()); if (size != GetSnapshotsRequest.NO_LIMIT || randomBoolean()) { request.addParameter("size", String.valueOf(size)); } if (after != null) { - request.addParameter("after", GetSnapshotsRequest.After.from(after, sortBy).value() + "," + after.snapshotId().getName()); + request.addParameter("after", after); + } + if (order == SortOrder.DESC || randomBoolean()) { + request.addParameter("order", order.toString()); + } + final Response response = getRestClient().performRequest(request); + return readSnapshotInfos(response); + } + + private static GetSnapshotsResponse sortedWithLimit(String repoName, + GetSnapshotsRequest.SortBy sortBy, + int offset, + int size, + SortOrder order) throws IOException { + final Request request = baseGetSnapshotsRequest(repoName); + request.addParameter("sort", sortBy.toString()); + if (size != GetSnapshotsRequest.NO_LIMIT || randomBoolean()) { + request.addParameter("size", String.valueOf(size)); } + request.addParameter("offset", String.valueOf(offset)); if (order == SortOrder.DESC || randomBoolean()) { request.addParameter("order", order.toString()); } final Response response = getRestClient().performRequest(request); - return readSnapshotInfos(repoName, response); + return readSnapshotInfos(response); } } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestSnapshotsStatusCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestSnapshotsStatusCancellationIT.java index 75d3ca545f3d0..b7f147862eca2 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestSnapshotsStatusCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestSnapshotsStatusCancellationIT.java @@ -29,7 +29,7 @@ public class RestSnapshotsStatusCancellationIT extends AbstractSnapshotRestTestCase { public void testSnapshotStatusCancellation() throws Exception { - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(SINGLE_THREADED_SNAPSHOT_META_SETTINGS); internalCluster().startDataOnlyNode(); ensureStableCluster(2); diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index 8b886db84d13d..afd31f38d52ee 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -import org.elasticsearch.gradle.internal.MavenFilteringHack +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' @@ -37,5 +37,5 @@ ext.expansions = [ tasks.named("processTestResources").configure { assert pluginPaths.size() > 0 inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) + filter("tokens" : expansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } diff --git a/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml b/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml index 6a92845a062aa..46fe41663e781 100644 --- a/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml +++ b/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml @@ -10,4 +10,4 @@ - do: nodes.info: {} - - length: { nodes.$master.plugins: ${expected.plugins.count} } + - length: { nodes.$master.plugins: @expected.plugins.count@ } diff --git a/qa/snapshot-based-recoveries/azure/build.gradle b/qa/snapshot-based-recoveries/azure/build.gradle new file mode 100644 index 0000000000000..2b1ea99d45bc1 --- /dev/null +++ b/qa/snapshot-based-recoveries/azure/build.gradle @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.internal.info.BuildParams +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.rest-resources' + +final Project fixture = project(':test:fixtures:azure-fixture') +final Project repositoryPlugin = project(':plugins:repository-azure') + +dependencies { + testImplementation testArtifact(project(':qa:snapshot-based-recoveries')) + testImplementation repositoryPlugin +} + +restResources { + restApi { + include 'indices', 'search', 'bulk', 'snapshot' + } +} + +boolean useFixture = false +String azureAccount = System.getenv("azure_storage_account") +String azureKey = System.getenv("azure_storage_key") +String azureContainer = System.getenv("azure_storage_container") +String azureBasePath = System.getenv("azure_storage_base_path") +String azureSasToken = System.getenv("azure_storage_sas_token") + +if (!azureAccount && !azureKey && !azureContainer && !azureBasePath && !azureSasToken) { + azureAccount = 'azure_integration_test_account' + azureKey = 'YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk=' // The key is "azure_integration_test_key" encoded using base64 + azureContainer = 'container' + azureBasePath = '' + azureSasToken = '' + useFixture = true + +} + +if (useFixture) { + apply plugin: 'elasticsearch.test.fixtures' + testFixtures.useFixture(fixture.path, 'azure-fixture-snapshot-based-recoveries') +} + +tasks.named("integTest").configure { + systemProperty 'test.azure.container', azureContainer + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_snapshot_based_recoveries_tests_" + BuildParams.testSeed +} + +testClusters.matching { it.name == "integTest" }.configureEach { + testDistribution = 'DEFAULT' + numberOfNodes = 3 + plugin repositoryPlugin.path + + keystore 'azure.client.snapshot_based_recoveries.account', azureAccount + if (azureKey != null && azureKey.isEmpty() == false) { + keystore 'azure.client.snapshot_based_recoveries.key', azureKey + } + if (azureSasToken != null && azureSasToken.isEmpty() == false) { + keystore 'azure.client.snapshot_based_recoveries.sas_token', azureSasToken + } + + setting 'xpack.security.enabled', 'false' + + if (useFixture) { + def fixtureAddress = { fixtureName -> + assert useFixture: 'closure should not be used without a fixture' + int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.8091" + assert ephemeralPort > 0 + '127.0.0.1:' + ephemeralPort + } + setting 'azure.client.snapshot_based_recoveries.endpoint_suffix', + { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-snapshot-based-recoveries')}/azure_integration_test_account" }, IGNORE_VALUE + + } else { + println "Using an external service to test " + project.name + } +} + +tasks.register("azureThirdPartyTest") { + dependsOn "integTest" +} + diff --git a/qa/snapshot-based-recoveries/azure/src/test/java/org/elasticsearch/recovery/AzureSnapshotBasedRecoveryIT.java b/qa/snapshot-based-recoveries/azure/src/test/java/org/elasticsearch/recovery/AzureSnapshotBasedRecoveryIT.java new file mode 100644 index 0000000000000..84f15c065e610 --- /dev/null +++ b/qa/snapshot-based-recoveries/azure/src/test/java/org/elasticsearch/recovery/AzureSnapshotBasedRecoveryIT.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.recovery; + +import org.elasticsearch.common.settings.Settings; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public class AzureSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { + + @Override + protected String repositoryType() { + return "azure"; + } + + @Override + protected Settings repositorySettings() { + final String container = System.getProperty("test.azure.container"); + assertThat(container, not(blankOrNullString())); + + final String basePath = System.getProperty("test.azure.base_path"); + assertThat(basePath, not(blankOrNullString())); + + return Settings.builder() + .put("client", "snapshot_based_recoveries") + .put("container", container).put("base_path", basePath) + .build(); + } +} diff --git a/qa/snapshot-based-recoveries/build.gradle b/qa/snapshot-based-recoveries/build.gradle new file mode 100644 index 0000000000000..bfceaf642d35e --- /dev/null +++ b/qa/snapshot-based-recoveries/build.gradle @@ -0,0 +1,2 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' diff --git a/qa/snapshot-based-recoveries/fs/build.gradle b/qa/snapshot-based-recoveries/fs/build.gradle new file mode 100644 index 0000000000000..224f43e18f44b --- /dev/null +++ b/qa/snapshot-based-recoveries/fs/build.gradle @@ -0,0 +1,39 @@ +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.rest-resources' + +dependencies { + testImplementation testArtifact(project(':qa:snapshot-based-recoveries')) +} + +final File repoDir = file("$buildDir/testclusters/snapshot-recoveries-repo") + +restResources { + restApi { + include 'indices', 'search', 'bulk', 'snapshot' + } +} + +tasks.withType(Test).configureEach { + doFirst { + delete(repoDir) + } + systemProperty 'tests.path.repo', repoDir +} + +testClusters.matching { it.name == "integTest" }.configureEach { + testDistribution = 'DEFAULT' + numberOfNodes = 3 + setting 'path.repo', repoDir.absolutePath + setting 'xpack.security.enabled', 'false' +} diff --git a/qa/snapshot-based-recoveries/fs/src/test/java/org/elasticsearch/recovery/FsSnapshotBasedRecoveryIT.java b/qa/snapshot-based-recoveries/fs/src/test/java/org/elasticsearch/recovery/FsSnapshotBasedRecoveryIT.java new file mode 100644 index 0000000000000..a49f462c11466 --- /dev/null +++ b/qa/snapshot-based-recoveries/fs/src/test/java/org/elasticsearch/recovery/FsSnapshotBasedRecoveryIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.recovery; + +import org.elasticsearch.common.settings.Settings; + +public class FsSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { + + @Override + protected String repositoryType() { + return "fs"; + } + + @Override + protected Settings repositorySettings() { + return Settings.builder() + .put("location", System.getProperty("tests.path.repo")) + .build(); + } +} diff --git a/qa/snapshot-based-recoveries/gcs/build.gradle b/qa/snapshot-based-recoveries/gcs/build.gradle new file mode 100644 index 0000000000000..9929f757fde5f --- /dev/null +++ b/qa/snapshot-based-recoveries/gcs/build.gradle @@ -0,0 +1,121 @@ +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.internal.info.BuildParams + +import java.security.KeyPair +import java.security.KeyPairGenerator + +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.rest-resources' + +final Project fixture = project(':test:fixtures:gcs-fixture') +final Project repositoryPlugin = project(':plugins:repository-gcs') + +dependencies { + testImplementation testArtifact(project(':qa:snapshot-based-recoveries')) + testImplementation repositoryPlugin +} + +restResources { + restApi { + include 'indices', 'search', 'bulk', 'snapshot' + } +} + +boolean useFixture = false + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +File serviceAccountFile = null +if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { + serviceAccountFile = new File(project.buildDir, 'generated-resources/service_account_test.json') + gcsBucket = 'bucket' + gcsBasePath = 'integration_test' + useFixture = true +} else if (!gcsServiceAccount || !gcsBucket || !gcsBasePath) { + throw new IllegalArgumentException("not all options specified to run tests against external GCS service are present") +} else { + serviceAccountFile = new File(gcsServiceAccount) +} + +/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ +tasks.register("createServiceAccountFile") { + doLast { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") + keyPairGenerator.initialize(2048) + KeyPair keyPair = keyPairGenerator.generateKeyPair() + String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) + + serviceAccountFile.parentFile.mkdirs() + serviceAccountFile.setText( + """ + { + "type": "service_account", + "project_id": "integration_test", + "private_key_id": "${UUID.randomUUID().toString()}", + + "private_key": "-----BEGIN PRIVATE KEY-----\\n${encodedKey}\\n-----END PRIVATE KEY-----\\n", + "client_email": "integration_test@appspot.gserviceaccount.com", + "client_id": "123456789101112130594" + } + """ + ) + } +} + +def fixtureAddress = { f -> + assert useFixture: 'closure should not be used without a fixture' + int ephemeralPort = project(':test:fixtures:gcs-fixture').postProcessFixture.ext."test.fixtures.${f}.tcp.80" + assert ephemeralPort > 0 + 'http://127.0.0.1:' + ephemeralPort +} + +Map expansions = [ + 'bucket' : gcsBucket, + 'base_path': gcsBasePath + "_integration_tests" +] + +tasks.named("processTestResources").configure { + inputs.properties(expansions) + filter("tokens" : expansions, ReplaceTokens.class) +} + +if (useFixture) { + apply plugin: 'elasticsearch.test.fixtures' + testFixtures.useFixture(fixture.path, 'gcs-fixture-snapshots-based-recoveries') +} + +tasks.named("integTest").configure { + systemProperty 'test.gcs.bucket', gcsBucket + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed + + if (useFixture) { + dependsOn "createServiceAccountFile" + } +} + +testClusters.matching { it.name == "integTest" }.configureEach { + testDistribution = 'DEFAULT' + numberOfNodes = 3 + plugin repositoryPlugin.path + + keystore 'gcs.client.snapshot_based_recoveries.credentials_file', serviceAccountFile, IGNORE_VALUE + if (useFixture) { + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 'gcs.client.snapshot_based_recoveries.endpoint', { "${-> fixtureAddress('gcs-fixture-snapshots-based-recoveries')}" }, IGNORE_VALUE + setting 'gcs.client.snapshot_based_recoveries.token_uri', { "${-> fixtureAddress('gcs-fixture-snapshots-based-recoveries')}/o/oauth2/token" }, + IGNORE_VALUE + } else { + println "Using an external service to test " + project.name + } + + setting 'xpack.security.enabled', 'false' +} + +tasks.register("gcsThirdPartyTest") { + dependsOn "integTest" +} diff --git a/qa/snapshot-based-recoveries/gcs/src/test/java/org/elasticsearch/recovery/GCSSnapshotBasedRecoveryIT.java b/qa/snapshot-based-recoveries/gcs/src/test/java/org/elasticsearch/recovery/GCSSnapshotBasedRecoveryIT.java new file mode 100644 index 0000000000000..ff623f31e5113 --- /dev/null +++ b/qa/snapshot-based-recoveries/gcs/src/test/java/org/elasticsearch/recovery/GCSSnapshotBasedRecoveryIT.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.recovery; + +import org.elasticsearch.common.settings.Settings; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public class GCSSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { + + @Override + protected String repositoryType() { + return "gcs"; + } + + @Override + protected Settings repositorySettings() { + final String bucket = System.getProperty("test.gcs.bucket"); + assertThat(bucket, not(blankOrNullString())); + + final String basePath = System.getProperty("test.gcs.base_path"); + assertThat(basePath, not(blankOrNullString())); + + return Settings.builder() + .put("client", "snapshot_based_recoveries") + .put("bucket", bucket).put("base_path", basePath) + .build(); + } +} diff --git a/qa/snapshot-based-recoveries/s3/build.gradle b/qa/snapshot-based-recoveries/s3/build.gradle new file mode 100644 index 0000000000000..642eaa3d1cca1 --- /dev/null +++ b/qa/snapshot-based-recoveries/s3/build.gradle @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.rest-resources' + +final Project fixture = project(':test:fixtures:s3-fixture') +final Project repositoryPlugin = project(':plugins:repository-s3') + +dependencies { + testImplementation testArtifact(project(':qa:snapshot-based-recoveries')) + testImplementation repositoryPlugin +} + +restResources { + restApi { + include 'indices', 'search', 'bulk', 'snapshot' + } +} + +boolean useFixture = false +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 's3_test_access_key' + s3SecretKey = 's3_test_secret_key' + s3Bucket = 'bucket' + s3BasePath = null + useFixture = true + +} else if (!s3AccessKey || !s3SecretKey || !s3Bucket || !s3BasePath) { + throw new IllegalArgumentException("not all options specified to run against external S3 service are present") +} + +if (useFixture) { + apply plugin: 'elasticsearch.test.fixtures' + testFixtures.useFixture(fixture.path, 's3-snapshot-based-recoveries') +} + +tasks.withType(Test).configureEach { + systemProperty 'test.s3.bucket', s3Bucket + nonInputProperties.systemProperty 'test.s3.base_path', + s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed : 'base_path' +} + +testClusters.matching { it.name == "integTest" }.configureEach { + testDistribution = 'DEFAULT' + numberOfNodes = 3 + plugin repositoryPlugin.path + + keystore 's3.client.snapshot_based_recoveries.access_key', s3AccessKey + keystore 's3.client.snapshot_based_recoveries.secret_key', s3SecretKey + + if (useFixture) { + def fixtureAddress = { fixtureName -> + assert useFixture: 'closure should not be used without a fixture' + int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" + assert ephemeralPort > 0 + '127.0.0.1:' + ephemeralPort + } + setting 's3.client.snapshot_based_recoveries.protocol', 'http' + setting 's3.client.snapshot_based_recoveries.endpoint', { "${-> fixtureAddress('s3-snapshot-based-recoveries')}" }, IGNORE_VALUE + + } else { + println "Using an external service to test " + project.name + } + + setting 'xpack.security.enabled', 'false' +} + +tasks.register("s3ThirdPartyTest") { + dependsOn "integTest" +} diff --git a/qa/snapshot-based-recoveries/s3/src/test/java/org/elasticsearch/recovery/S3SnapshotBasedRecoveryIT.java b/qa/snapshot-based-recoveries/s3/src/test/java/org/elasticsearch/recovery/S3SnapshotBasedRecoveryIT.java new file mode 100644 index 0000000000000..d5d72cd8489b0 --- /dev/null +++ b/qa/snapshot-based-recoveries/s3/src/test/java/org/elasticsearch/recovery/S3SnapshotBasedRecoveryIT.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.recovery; + +import org.elasticsearch.common.settings.Settings; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public class S3SnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { + + @Override + protected String repositoryType() { + return "s3"; + } + + @Override + protected Settings repositorySettings() { + final String bucket = System.getProperty("test.s3.bucket"); + assertThat(bucket, not(blankOrNullString())); + + final String basePath = System.getProperty("test.s3.base_path"); + assertThat(basePath, not(blankOrNullString())); + + return Settings.builder() + .put("client", "snapshot_based_recoveries") + .put("bucket", bucket) + .put("base_path", basePath) + .build(); + } +} diff --git a/qa/snapshot-based-recoveries/src/test/java/org/elasticsearch/recovery/AbstractSnapshotBasedRecoveryRestTestCase.java b/qa/snapshot-based-recoveries/src/test/java/org/elasticsearch/recovery/AbstractSnapshotBasedRecoveryRestTestCase.java new file mode 100644 index 0000000000000..88a821b521b00 --- /dev/null +++ b/qa/snapshot-based-recoveries/src/test/java/org/elasticsearch/recovery/AbstractSnapshotBasedRecoveryRestTestCase.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.recovery; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; + +public abstract class AbstractSnapshotBasedRecoveryRestTestCase extends ESRestTestCase { + private static final String REPOSITORY_NAME = "repository"; + private static final String SNAPSHOT_NAME = "snapshot-for-recovery"; + + protected abstract String repositoryType(); + + protected abstract Settings repositorySettings(); + + public void testRecoveryUsingSnapshots() throws Exception { + final String repositoryType = repositoryType(); + Settings repositorySettings = Settings.builder().put(repositorySettings()) + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) + .build(); + + registerRepository(REPOSITORY_NAME, repositoryType, true, repositorySettings); + + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(indexName); + + final int numDocs = randomIntBetween(500, 1000); + indexDocs(indexName, numDocs); + waitUntilGlobalCheckpointIsStable(indexName); + forceMerge(indexName, randomBoolean(), randomBoolean()); + + deleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME, true); + createSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME, true); + + // Add a new replica + updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(indexName); + assertSnapshotIsUsed(indexName); + + assertMatchAllReturnsAllDocuments(indexName, numDocs); + assertMatchQueryReturnsAllDocuments(indexName, numDocs); + + deleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME, false); + } + + private void waitUntilGlobalCheckpointIsStable(String index) throws Exception { + assertBusy(() -> { + Request request = new Request(HttpGet.METHOD_NAME, '/' + index + "/_stats?level=shards"); + Response response = client().performRequest(request); + assertOK(response); + Map responseAsMap = responseAsMap(response); + Map indices = extractValue(responseAsMap, "indices"); + Map indexShardsStats = extractValue(extractValue(indices, index), "shards"); + List> shardStats = extractValue(indexShardsStats, "0"); + for (Map shardStat : shardStats) { + final boolean isPrimary = extractValue(shardStat, "routing.primary"); + if (isPrimary == false) { + continue; + } + Map seqNos = extractValue(shardStat, "seq_no"); + assertThat(seqNos.toString(), seqNos.get("max_seq_no"), is(equalTo(seqNos.get("global_checkpoint")))); + } + }, 60, TimeUnit.SECONDS); + } + + private void assertMatchAllReturnsAllDocuments(String indexName, int numDocs) throws IOException { + Map searchResults = search(indexName, QueryBuilders.matchAllQuery()); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + List> hits = extractValue(searchResults, "hits.hits"); + for (Map hit : hits) { + String docId = extractValue(hit, "_id"); + assertThat(Integer.parseInt(docId), allOf(greaterThanOrEqualTo(0), lessThan(numDocs))); + assertThat(extractValue(hit, "_source.field"), equalTo(Integer.parseInt(docId))); + assertThat(extractValue(hit, "_source.text"), equalTo("Some text " + docId)); + } + } + + private void assertSnapshotIsUsed(String index) throws Exception { + Request request = new Request(HttpGet.METHOD_NAME, '/' + index + "/_recovery?detailed=true"); + Response response = client().performRequest(request); + assertOK(response); + Map responseAsMap = responseAsMap(response); + List> shardRecoveries = extractValue(responseAsMap, index + ".shards"); + long totalRecoveredFromSnapshot = 0; + for (Map shardRecoveryState : shardRecoveries) { + String recoveryType = extractValue(shardRecoveryState, "type"); + if (recoveryType.equals("PEER") == false) { + continue; + } + String stage = extractValue(shardRecoveryState, "stage"); + assertThat(stage, is(equalTo("DONE"))); + + List> fileDetails = extractValue(shardRecoveryState, "index.files.details"); + for (Map fileDetail : fileDetails) { + int recoveredFromSnapshot = extractValue(fileDetail, "recovered_from_snapshot_in_bytes"); + assertThat(recoveredFromSnapshot, is(greaterThan(0))); + totalRecoveredFromSnapshot += recoveredFromSnapshot; + } + } + long snapshotSize = getSnapshotSizeForIndex(index); + assertThat(totalRecoveredFromSnapshot, is(greaterThan(0L))); + assertThat(totalRecoveredFromSnapshot, is(equalTo(snapshotSize))); + } + + private int getSnapshotSizeForIndex(String indexName) throws Exception { + Request request = new Request(HttpGet.METHOD_NAME, "/_snapshot/" + REPOSITORY_NAME + "/" + SNAPSHOT_NAME); + request.addParameter("index_details", "true"); + Response response = client().performRequest(request); + assertOK(response); + Map snapshotsResponse = responseAsMap(response); + List> snapshots = extractValue(snapshotsResponse, "snapshots"); + assertThat(snapshots.size(), is(equalTo(1))); + Map snapshot = snapshots.get(0); + return extractValue(snapshot, "index_details." + indexName + ".size_in_bytes"); + } + + private void assertMatchQueryReturnsAllDocuments(String indexName, int numDocs) throws IOException { + Map searchResults = search(indexName, QueryBuilders.matchQuery("text", "some")); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + } + + private static void forceMerge(String index, boolean onlyExpungeDeletes, boolean flush) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, '/' + index + "/_forcemerge"); + request.addParameter("only_expunge_deletes", Boolean.toString(onlyExpungeDeletes)); + request.addParameter("flush", Boolean.toString(flush)); + assertOK(client().performRequest(request)); + } + + private void indexDocs(String indexName, int numDocs) throws IOException { + final StringBuilder bulkBody = new StringBuilder(); + for (int i = 0; i < numDocs; i++) { + bulkBody.append("{\"index\":{\"_id\":\"").append(i).append("\"}}\n"); + bulkBody.append("{\"field\":").append(i).append(",\"text\":\"Some text ").append(i).append("\"}\n"); + } + + final Request documents = new Request(HttpPost.METHOD_NAME, '/' + indexName + "/_bulk"); + documents.addParameter("refresh", Boolean.TRUE.toString()); + documents.setJsonEntity(bulkBody.toString()); + assertOK(client().performRequest(documents)); + } + + private static Map search(String index, QueryBuilder query) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, '/' + index + "/_search"); + request.setJsonEntity(new SearchSourceBuilder().trackTotalHits(true).query(query).toString()); + + final Response response = client().performRequest(request); + assertOK(response); + + final Map responseAsMap = responseAsMap(response); + assertThat( + extractValue(responseAsMap, "_shards.failed"), + equalTo(0) + ); + return responseAsMap; + } + + @SuppressWarnings("unchecked") + private static T extractValue(Map map, String path) { + return (T) XContentMapValues.extractValue(path, map); + } +} diff --git a/qa/system-indices/build.gradle b/qa/system-indices/build.gradle new file mode 100644 index 0000000000000..6762b90588c17 --- /dev/null +++ b/qa/system-indices/build.gradle @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.internal-es-plugin' +apply plugin: 'elasticsearch.java-rest-test' + +esplugin { + name 'system-indices-qa' + description 'Plugin for performing QA of system indices' + classname 'org.elasticsearch.system.indices.SystemIndicesQA' + licenseFile rootProject.file('licenses/SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') +} + +tasks.named("test").configure { enabled = false } +tasks.named("javaRestTest").configure { + dependsOn "buildZip" +} + +testClusters.all { + testDistribution = 'DEFAULT' + setting 'xpack.security.enabled', 'true' + user username: 'rest_user', password: 'rest-user-password', role: 'superuser' +} diff --git a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/NetNewSystemIndicesIT.java b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/NetNewSystemIndicesIT.java new file mode 100644 index 0000000000000..f345b79a377e7 --- /dev/null +++ b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/NetNewSystemIndicesIT.java @@ -0,0 +1,105 @@ + +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.system.indices; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.After; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class NetNewSystemIndicesIT extends ESRestTestCase { + + static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("rest_user", new SecureString("rest-user-password".toCharArray())); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + public void testCreatingSystemIndex() throws Exception { + ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("PUT", "/.net-new-system-index-" + Version.CURRENT.major)) + ); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("system")); + + Response response = client().performRequest(new Request("PUT", "/_net_new_sys_index/_create")); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + public void testIndexDoc() throws Exception { + String id = randomAlphaOfLength(4); + + ResponseException e = expectThrows(ResponseException.class, () -> { + Request request = new Request("PUT", "/.net-new-system-index-" + Version.CURRENT.major + "/_doc" + id); + request.setJsonEntity("{}"); + client().performRequest(request); + }); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("system")); + + Request request = new Request("PUT", "/_net_new_sys_index/" + id); + request.setJsonEntity("{}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + public void testSearch() throws Exception { + // search before indexing doc + Request searchRequest = new Request("GET", "/_search"); + searchRequest.setJsonEntity("{ \"query\": { \"match_all\": {} } }"); + searchRequest.addParameter("size", "10000"); + Response searchResponse = client().performRequest(searchRequest); + assertThat(searchResponse.getStatusLine().getStatusCode(), is(200)); + assertThat(EntityUtils.toString(searchResponse.getEntity()), not(containsString(".net-new"))); + + // create a doc + String id = randomAlphaOfLength(4); + Request request = new Request("PUT", "/_net_new_sys_index/" + id); + request.setJsonEntity("{}"); + request.addParameter("refresh", "true"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + + // search again + searchResponse = client().performRequest(searchRequest); + assertThat(searchResponse.getStatusLine().getStatusCode(), is(200)); + assertThat(EntityUtils.toString(searchResponse.getEntity()), not(containsString(".net-new"))); + + // index wildcard search + searchRequest = new Request("GET", "/.net-new-system-index*/_search"); + searchRequest.setJsonEntity("{ \"query\": { \"match_all\": {} } }"); + searchRequest.addParameter("size", "10000"); + searchResponse = client().performRequest(searchRequest); + assertThat(searchResponse.getStatusLine().getStatusCode(), is(200)); + assertThat(EntityUtils.toString(searchResponse.getEntity()), not(containsString(".net-new"))); + + // direct index search + Request directRequest = new Request("GET", "/.net-new-system-index-" + Version.CURRENT.major + "/_search"); + directRequest.setJsonEntity("{ \"query\": { \"match_all\": {} } }"); + directRequest.addParameter("size", "10000"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(directRequest)); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("system")); + } + + @After + public void resetFeatures() throws Exception { + client().performRequest(new Request("POST", "/_features/_reset")); + } +} diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java new file mode 100644 index 0000000000000..6d73b22de1f60 --- /dev/null +++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java @@ -0,0 +1,164 @@ + +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.system.indices; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequest.Method; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +public class SystemIndicesQA extends Plugin implements SystemIndexPlugin, ActionPlugin { + + @Override + public String getFeatureName() { + return "system indices qa"; + } + + @Override + public String getFeatureDescription() { + return "plugin used to perform qa on system index behavior"; + } + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return List.of( + SystemIndexDescriptor.builder() + .setNetNew() + .setIndexPattern(".net-new-system-index*") + .setDescription("net new system index") + .setMappings(mappings()) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .build() + ) + .setOrigin("net-new") + .setVersionMetaKey("version") + .setPrimaryIndex(".net-new-system-index-" + Version.CURRENT.major) + .build() + ); + } + + private static XContentBuilder mappings() { + try { + return jsonBuilder().startObject() + .startObject(SINGLE_MAPPING_NAME) + .startObject("_meta") + .field("version", Version.CURRENT) + .endObject() + .field("dynamic", "strict") + .startObject("properties") + .startObject("name") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new UncheckedIOException("Failed to build mappings for net new system index", e); + } + } + + @Override + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return List.of(new CreateNetNewSystemIndexHandler(), new IndexDocHandler()); + } + + private static class CreateNetNewSystemIndexHandler extends BaseRestHandler { + + @Override + public String getName() { + return "create net new system index for qa"; + } + + @Override + public List routes() { + return List.of(Route.builder(Method.PUT, "/_net_new_sys_index/_create").build()); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + return channel -> client.admin() + .indices() + .create(new CreateIndexRequest(".net-new-system-index-" + Version.CURRENT.major), new RestToXContentListener<>(channel)); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return true; + } + } + + private static class IndexDocHandler extends BaseRestHandler { + @Override + public String getName() { + return "index doc into net new for qa"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_net_new_sys_index/{id}"), new Route(PUT, "/_net_new_sys_index/{id}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + IndexRequest indexRequest = new IndexRequest(".net-new-system-index-" + Version.CURRENT.major); + indexRequest.source(request.requiredContent(), request.getXContentType()); + indexRequest.id(request.param("id")); + indexRequest.setRefreshPolicy(request.param("refresh")); + + return channel -> client.index(indexRequest, new RestToXContentListener<>(channel)); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return true; + } + } +} diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index c8c87f22f1d43..d551ecc39a1e4 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -12,7 +12,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { diff --git a/qa/wildfly/docker-compose.yml b/qa/wildfly/docker-compose.yml index cd934b1a36499..b1816e1ada57f 100644 --- a/qa/wildfly/docker-compose.yml +++ b/qa/wildfly/docker-compose.yml @@ -20,6 +20,7 @@ services: image: elasticsearch:test environment: - discovery.type=single-node + - xpack.security.enabled=false - "ES_JAVA_OPTS=-Xms512m -Xmx512m" ulimits: memlock: diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 59e4360fc2339..50202cc02fb4c 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -28,7 +28,7 @@ testClusters.all { tasks.named("test").configure { enabled = false } tasks.named("jarHell").configure { enabled = false } -def v7compatiblityNotSupportedTests = { +def v7compatibilityNotSupportedTests = { return [ // Cat API are meant to be consumed by humans, so will not be supported by Compatible REST API 'cat*/*/*', @@ -68,7 +68,24 @@ def v7compatiblityNotSupportedTests = { // translog settings removal is not supported under compatible api 'indices.stats/20_translog/Translog retention settings are deprecated', 'indices.stats/20_translog/Translog retention without soft_deletes', - 'indices.stats/20_translog/Translog stats on closed indices without soft-deletes' + 'indices.stats/20_translog/Translog stats on closed indices without soft-deletes', + + // upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex + 'indices.upgrade/*/*', + + 'search.aggregation/20_terms/*profiler*', // The profiler results aren't backwards compatible. + 'search.aggregation/370_doc_count_field/Test filters agg with doc_count', // Uses profiler for assertions which is not backwards compatible + + 'indices.create/10_basic/Create index without soft deletes', //Make soft-deletes mandatory in 8.0 #51122 - settings changes are note supported in Rest Api compatibility + + 'field_caps/30_filter/Field caps with index filter', //behaviour change after #63692 4digits dates are parsed as epoch and in quotes as year + + 'indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set', //#44761 bug fix, + + 'search/340_type_query/type query', //#47207 type query throws exception in compatible mode + 'search.aggregation/200_top_hits_metric/top_hits aggregation with sequence numbers', // #42809 the use nested path and filter sort throws an exception + 'search/310_match_bool_prefix/multi_match multiple fields with cutoff_frequency throws exception', //#42654 cutoff_frequency, common terms are not supported. Throwing an exception + ] } tasks.named("yamlRestCompatTest").configure { @@ -76,52 +93,7 @@ tasks.named("yamlRestCompatTest").configure { // Skip these tests on Windows since the blacklist exceeds Windows CLI limits OS.current() != OS.WINDOWS } - - systemProperty 'tests.rest.blacklist', ([ - 'cluster.voting_config_exclusions/10_basic/Throw exception when adding voting config exclusion and specifying both node_ids and node_names', - 'cluster.voting_config_exclusions/10_basic/Throw exception when adding voting config exclusion without specifying nodes', - 'field_caps/30_filter/Field caps with index filter', - 'indices.create/10_basic/Create index without soft deletes', - 'indices.flush/10_basic/Index synced flush rest test', - 'indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set', - 'indices.open/10_basic/?wait_for_active_shards default is deprecated', - 'indices.open/10_basic/?wait_for_active_shards=index-setting', - // not fixing this in #70966 - 'indices.put_template/11_basic_with_types/Put template with empty mappings', - 'indices.shrink/30_copy_settings/Copy settings during shrink index', - 'indices.split/30_copy_settings/Copy settings during split index', - 'indices.upgrade/10_basic/Basic test for upgrade indices', - 'indices.upgrade/10_basic/Upgrade indices allow no indices', - 'indices.upgrade/10_basic/Upgrade indices disallow no indices', - 'indices.upgrade/10_basic/Upgrade indices disallow unavailable', - 'indices.upgrade/10_basic/Upgrade indices ignore unavailable', - 'mlt/20_docs/Basic mlt query with docs', - 'mlt/30_unlike/Basic mlt query with unlike', - 'search.aggregation/10_histogram/Deprecated _time order', - 'search.aggregation/200_top_hits_metric/top_hits aggregation with sequence numbers', - 'search.aggregation/20_terms/Deprecated _term order', - 'search.aggregation/51_filter_with_types/Filter aggs with terms lookup and ensure it\'s cached', - 'mtermvectors/11_basic_with_types/Basic tests for multi termvector get', - 'mtermvectors/21_deprecated_with_types/Deprecated camel case and _ parameters should fail in Term Vectors query', - 'mtermvectors/30_mix_typeless_typeful/mtermvectors without types on an index that has types', - 'search/10_source_filtering/docvalue_fields with default format', //use_field_mapping change - 'search/40_indices_boost/Indices boost using object', //indices_boost - 'search/150_rewrite_on_coordinator/Ensure that we fetch the document only once', //terms_lookup - 'search/171_terms_query_with_types/Terms Query with No.of terms exceeding index.max_terms_count should FAIL', //bulk - 'search/260_parameter_validation/test size=-1 is deprecated', //size=-1 change - 'search/310_match_bool_prefix/multi_match multiple fields with cutoff_frequency throws exception', //cutoff_frequency - 'search/340_type_query/type query', // type_query - probably should behave like match_all - 'search_shards/10_basic/Search shards aliases with and without filters', - 'snapshot.get/10_basic/Get missing snapshot info succeeds when ignore_unavailable is true', - 'snapshot.get/10_basic/Get missing snapshot info throws an exception', - 'snapshot.get/10_basic/Get snapshot info', - 'snapshot.get/10_basic/Get snapshot info contains include_global_state', - 'snapshot.get/10_basic/Get snapshot info when verbose is false', - 'snapshot.get/10_basic/Get snapshot info with metadata', - 'snapshot.get/10_basic/Get snapshot info with index details', - 'suggest/20_completion/Suggestions with source should work' - ] + v7compatiblityNotSupportedTests()) - .join(',') + systemProperty 'tests.rest.blacklist', v7compatibilityNotSupportedTests().join(',') } tasks.named("transformV7RestTests").configure({ task -> @@ -241,6 +213,22 @@ tasks.named("transformV7RestTests").configure({ task -> ) task.replaceValueInMatch("_all.primaries.indexing.types._doc.index_total", 2) + //override for "indices.open/10_basic/?wait_for_active_shards default is deprecated" and "indices.open/10_basic/?wait_for_active_shards=index-setting" + task.addAllowedWarningRegexForTest("\\?wait_for_active_shards=index-setting is now the default behaviour.*", "?wait_for_active_shards=index-setting") + task.removeWarningForTest("the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; " + + "specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" + , "?wait_for_active_shards default is deprecated") + + // override for exception message change in #55291 tests cluster.voting_config_exclusions/10_basic/ + // 'Throw exception when adding voting config exclusion and specifying both node_ids and node_names', + // 'Throw exception when adding voting config exclusion without specifying nodes', + task.replaceValueTextByKeyValue("catch", + '/Please set node identifiers correctly. One and only one of \\[node_name\\], \\[node_names\\] and \\[node_ids\\] has to be set/', + '/You must set \\[node_names\\] or \\[node_ids\\] but not both/') + + // sync_id is no longer available in SegmentInfos.userData // "indices.flush/10_basic/Index synced flush rest test" + task.replaceIsTrue("indices.testing.shards.0.0.commit.user_data.sync_id", "indices.testing.shards.0.0.commit.user_data") + }) tasks.register('enforceYamlTestConvention').configure { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json index 36f341d761531..e828af8a569ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json @@ -32,7 +32,7 @@ } }, "body":{ - "description":"The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard'" + "description":"The index, shard, and primary flag to explain. Empty means 'explain a randomly-chosen unassigned shard'" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.migrate_to_data_tiers.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.migrate_to_data_tiers.json new file mode 100644 index 0000000000000..8d7e4509b68cd --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.migrate_to_data_tiers.json @@ -0,0 +1,34 @@ +{ + "ilm.migrate_to_data_tiers":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html", + "description": "Migrates the indices and ILM policies away from custom node attribute allocation routing to data tiers routing" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ilm/migrate_to_data_tiers", + "methods":[ + "POST" + ] + } + ] + }, + "params": { + "dry_run": { + "type": "boolean", + "description": "If set to true it will simulate the migration, providing a way to retrieve the ILM policies and indices that need to be migrated. The default is false" + } + }, + "body":{ + "description":"Optionally specify a legacy index template name to delete and optionally specify a node attribute name used for index shard routing (defaults to \"data\")", + "required":false + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.disk_usage.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.disk_usage.json new file mode 100644 index 0000000000000..822d6ce9f84df --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.disk_usage.json @@ -0,0 +1,61 @@ +{ + "indices.disk_usage": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html", + "description": "Analyzes the disk usage of each field of an index or data stream" + }, + "stability": "experimental", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/{index}/_disk_usage", + "methods": [ + "POST" + ], + "parts": { + "index": { + "type": "string", + "description": "Comma-separated list of indices or data streams to analyze the disk usage" + } + } + } + ] + }, + "params": { + "run_expensive_tasks": { + "type": "boolean", + "description": "Must be set to [true] in order for the task to be performed. Defaults to false." + }, + "flush": { + "type": "boolean", + "description": "Whether flush or not before analyzing the index disk usage. Defaults to true" + }, + "ignore_unavailable": { + "type": "boolean", + "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type": "boolean", + "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type": "enum", + "options": [ + "open", + "closed", + "hidden", + "none", + "all" + ], + "default": "open", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json new file mode 100644 index 0000000000000..efdbdfa4d422a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json @@ -0,0 +1,57 @@ +{ + "indices.field_usage_stats": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html", + "description": "Returns the field usage stats for each field of an index" + }, + "stability": "experimental", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/{index}/_field_usage_stats", + "methods": [ + "GET" + ], + "parts": { + "index": { + "type": "string", + "description": "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params": { + "fields":{ + "type":"list", + "description":"A comma-separated list of fields to include in the stats if only a subset of fields should be returned (supports wildcards)" + }, + "ignore_unavailable": { + "type": "boolean", + "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type": "boolean", + "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type": "enum", + "options": [ + "open", + "closed", + "hidden", + "none", + "all" + ], + "default": "open", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json index e743a53cab379..f7ef92dfd4266 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json @@ -21,6 +21,10 @@ "type":"string", "description":"The name of the index to freeze" } + }, + "deprecated":{ + "version":"7.14.0", + "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release." } } ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json index c51f70e141f46..2327519ff2816 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json @@ -21,6 +21,10 @@ "type":"string", "description":"The name of the index to unfreeze" } + }, + "deprecated":{ + "version":"7.14.0", + "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release." } } ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model_deployment.json index d0c43dbb45659..8b242a34b3e16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model_deployment.json @@ -1,7 +1,7 @@ { "ml.infer_trained_model_deployment":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-infer-trained-model-deployment.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model-deployment.html", "description":"Evaluate a trained model." }, "stability":"experimental", @@ -20,7 +20,8 @@ "parts":{ "model_id":{ "type":"string", - "description":"The ID of the model to perform inference on" + "description":"The unique identifier of the trained model.", + "required":true } } } @@ -30,8 +31,13 @@ "timeout":{ "type":"time", "required":false, - "description":"Controls the time to wait for the inference result" + "description":"Controls the amount of time to wait for inference results.", + "default":"10s" } + }, + "body":{ + "description":"The input text to be evaluated.", + "required":true } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_job.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_job.json index 24fa08e4bff82..52cb95fc56b5b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_job.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_job.json @@ -26,6 +26,31 @@ } ] }, + "params":{ + "ignore_unavailable":{ + "type":"boolean", + "description":"Ignore unavailable indexes (default: false). Only set if datafeed_config is provided." + }, + "allow_no_indices":{ + "type":"boolean", + "description":"Ignore if the source indices expressions resolves to no concrete indices (default: true). Only set if datafeed_config is provided." + }, + "ignore_throttled":{ + "type":"boolean", + "description":"Ignore indices that are marked as throttled (default: true). Only set if datafeed_config is provided." + }, + "expand_wildcards":{ + "type":"enum", + "options":[ + "open", + "closed", + "hidden", + "none", + "all" + ], + "description":"Whether source index expressions should get expanded to open or closed indices (default: open). Only set if datafeed_config is provided." + } + }, "body":{ "description":"The job", "required":true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json index eada6bf35c276..1067c09f3b709 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json @@ -1,7 +1,7 @@ { "ml.start_trained_model_deployment":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-trained-model-deployment.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html", "description":"Start a trained model deployment." }, "stability":"experimental", @@ -20,7 +20,8 @@ "parts":{ "model_id":{ "type":"string", - "description":"The ID of the model to deploy" + "description":"The unique identifier of the trained model.", + "required":true } } } @@ -30,7 +31,8 @@ "timeout":{ "type":"time", "required":false, - "description":"Controls the time to wait until the model is deployed" + "description":"Controls the amount of time to wait for the model to deploy.", + "default": "20s" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json index fcc6f05899a0b..3e608a890b0a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json @@ -1,7 +1,7 @@ { "ml.stop_trained_model_deployment":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html", "description":"Stop a trained model deployment." }, "stability":"experimental", @@ -20,7 +20,8 @@ "parts":{ "model_id":{ "type":"string", - "description":"The ID of the model to undeploy" + "description":"The unique identifier of the trained model.", + "required":true } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json new file mode 100644 index 0000000000000..4a7c6e3c0d36c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json @@ -0,0 +1,33 @@ +{ + "nodes.clear_metering_archive":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html", + "description":"Removes the archived repositories metering information present in the cluster." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_nodes/{node_id}/_repositories_metering/{max_archive_version}", + "methods":[ + "DELETE" + ], + "parts":{ + "node_id":{ + "type":"list", + "description":"Comma-separated list of node IDs or names used to limit returned information." + }, + "max_archive_version":{ + "type":"long", + "description":"Specifies the maximum archive_version to be cleared from the archive." + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json new file mode 100644 index 0000000000000..caba879a9a967 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json @@ -0,0 +1,29 @@ +{ + "nodes.get_metering_info":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html", + "description":"Returns cluster repositories metering information." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_nodes/{node_id}/_repositories_metering", + "methods":[ + "GET" + ], + "parts":{ + "node_id":{ + "type":"list", + "description":"A comma-separated list of node IDs or names to limit the returned information." + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index acea11146d384..7a13a6c1033c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -127,7 +127,8 @@ "segments", "store", "warmer", - "bulk" + "bulk", + "shards" ], "description":"Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified." } @@ -175,7 +176,8 @@ "segments", "store", "warmer", - "bulk" + "bulk", + "shards" ], "description":"Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified." }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json index b962c06950884..5a29c99612a75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json @@ -1,7 +1,7 @@ { "render_search_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup.json index 3774df023843d..3869bf8ac9600 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.rollup.json @@ -1,10 +1,10 @@ { "rollup.rollup":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-api.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html", "description":"Rollup an index" }, - "stability":"stable", + "stability":"experimental", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json new file mode 100644 index 0000000000000..132b2639a2e04 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json @@ -0,0 +1,86 @@ +{ + "search_mvt": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html", + "description": "Searches a vector tile for geospatial values. Returns results as a binary Mapbox vector tile." + }, + "stability": "experimental", + "visibility": "public", + "headers": { + "accept": [ + "application/vnd.mapbox-vector-tile" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/{index}/_mvt/{field}/{zoom}/{x}/{y}", + "methods": [ + "GET", + "POST" + ], + "parts": { + "index": { + "type": "list", + "description": "Comma-separated list of data streams, indices, or aliases to search" + }, + "field": { + "type": "string", + "description": "Field containing geospatial data to return" + }, + "zoom": { + "type": "int", + "description": "Zoom level for the vector tile to search" + }, + "x": { + "type": "int", + "description": "X coordinate for the vector tile to search" + }, + "y": { + "type": "int", + "description": "Y coordinate for the vector tile to search" + } + } + } + ] + }, + "params":{ + "exact_bounds":{ + "type":"boolean", + "description":"If false, the meta layer's feature is the bounding box of the tile. If true, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation.", + "default":false + }, + "extent":{ + "type":"int", + "description":"Size, in pixels, of a side of the vector tile.", + "default":4096 + }, + "grid_precision":{ + "type":"int", + "description":"Additional zoom levels available through the aggs layer. Accepts 0-8.", + "default":8 + }, + "grid_type":{ + "type":"enum", + "options":[ + "grid", + "point" + ], + "description":"Determines the geometry type for features in the aggs layer.", + "default":"grid" + }, + "size":{ + "type":"int", + "description":"Maximum number of features to return in the hits layer. Accepts 0-10000.", + "default":10000 + } + }, + "body":{ + "description":"Search request body.", + "required":false + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_kibana.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_kibana.json new file mode 100644 index 0000000000000..b3e961d64df6e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_kibana.json @@ -0,0 +1,24 @@ +{ + "security.enroll_kibana":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-kibana-enrollment.html", + "description":"Allows a kibana instance to configure itself to communicate with a secured elasticsearch cluster." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/enroll/kibana", + "methods":[ + "GET" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_node.json index e0955a38b854f..17b88859033b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.enroll_node.json @@ -13,7 +13,7 @@ "url":{ "paths":[ { - "path":"/_security/enroll_node", + "path":"/_security/enroll/node", "methods":[ "GET" ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_privileges.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_privileges.json index f8aab200cbb51..a67a327b71f30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_privileges.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_user_privileges.json @@ -1,8 +1,8 @@ { "security.get_user_privileges":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html", - "description":"Retrieves application privileges." + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html", + "description":"Retrieves security privileges for the logged in user." }, "stability":"stable", "visibility":"public", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_api_keys.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_api_keys.json new file mode 100644 index 0000000000000..7e2b9a0bea60e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_api_keys.json @@ -0,0 +1,30 @@ +{ + "security.query_api_keys":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html", + "description":"Retrieves information for API keys using a subset of query DSL" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/_query/api_key", + "methods":[ + "GET", + "POST" + ] + } + ] + }, + "params":{}, + "body":{ + "description":"From, size, query, sort and search_after", + "required":false + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_authenticate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_authenticate.json new file mode 100644 index 0000000000000..e1247d87319ea --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_authenticate.json @@ -0,0 +1,28 @@ +{ + "security.saml_authenticate":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html", + "description":"Exchanges a SAML Response message for an Elasticsearch access token and refresh token pair" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/saml/authenticate", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The SAML response to authenticate", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_invalidate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_invalidate.json new file mode 100644 index 0000000000000..c18c338817901 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_invalidate.json @@ -0,0 +1,28 @@ +{ + "security.saml_invalidate":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html", + "description":"Consumes a SAML LogoutRequest" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/saml/invalidate", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The LogoutRequest message", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_logout.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_logout.json new file mode 100644 index 0000000000000..148805b16a853 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_logout.json @@ -0,0 +1,28 @@ +{ + "security.saml_logout":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html", + "description":"Invalidates an access token and a refresh token that were generated via the SAML Authenticate API" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/saml/logout", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The tokens to invalidate", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_prepare_authentication.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_prepare_authentication.json new file mode 100644 index 0000000000000..5691e0d0792ff --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_prepare_authentication.json @@ -0,0 +1,28 @@ +{ + "security.saml_prepare_authentication":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html", + "description":"Creates a SAML authentication request" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/saml/prepare", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The realm for which to create the authentication request, identified by either its name or the ACS URL", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_service_provider_metadata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_service_provider_metadata.json new file mode 100644 index 0000000000000..7f7cd557821cc --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.saml_service_provider_metadata.json @@ -0,0 +1,30 @@ +{ + "security.saml_service_provider_metadata":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html", + "description":"Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_security/saml/metadata/{realm_name}", + "methods":[ + "GET" + ], + "parts":{ + "realm_name":{ + "type":"string", + "description":"The name of the SAML realm to get the metadata for" + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index fd338fe2511fc..01387918e5278 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -42,6 +42,10 @@ "type":"boolean", "description":"Whether to include details of each index in the snapshot, if those details are available. Defaults to false." }, + "include_repository":{ + "type":"boolean", + "description":"Whether to include the repository name in the snapshot info. Defaults to true." + }, "verbose":{ "type":"boolean", "description":"Whether to show verbose snapshot info or only show the basic info found in the repository index blob" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.clear_cursor.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.clear_cursor.json index 26d4f039bc9f1..f36f623816b6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.clear_cursor.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.clear_cursor.json @@ -1,7 +1,7 @@ { "sql.clear_cursor":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-pagination.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html", "description":"Clears the SQL cursor" }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.delete_async.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.delete_async.json new file mode 100644 index 0000000000000..1a2a6f6c4c052 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.delete_async.json @@ -0,0 +1,31 @@ +{ + "sql.delete_async": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-async-sql-search-api.html", + "description": "Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_sql/async/delete/{id}", + "methods": [ + "DELETE" + ], + "parts": { + "id": { + "type": "string", + "description": "The async search ID" + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async.json new file mode 100644 index 0000000000000..d0a5a3c565a18 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async.json @@ -0,0 +1,51 @@ +{ + "sql.get_async": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-api.html", + "description": "Returns the current status and available results for an async SQL search or stored synchronous SQL search" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_sql/async/{id}", + "methods": [ + "GET" + ], + "parts": { + "id": { + "type": "string", + "description": "The async search ID" + } + } + } + ] + }, + "params": { + "delimiter": { + "type": "string", + "description": "Separator for CSV results", + "default": "," + }, + "format": { + "type": "string", + "description": "Short version of the Accept header, e.g. json, yaml" + }, + "keep_alive": { + "type": "time", + "description": "Retention period for the search and its results", + "default": "5d" + }, + "wait_for_completion_timeout": { + "type": "time", + "description": "Duration to wait for complete results" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async_status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async_status.json new file mode 100644 index 0000000000000..d433063d93c85 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.get_async_status.json @@ -0,0 +1,31 @@ +{ + "sql.get_async_status": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-async-sql-search-status-api.html", + "description": "Returns the current status of an async SQL search or a stored synchronous SQL search" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_sql/async/status/{id}", + "methods": [ + "GET" + ], + "parts": { + "id": { + "type": "string", + "description": "The async search ID" + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.query.json index 2cd1f9aca0367..a3fe47ecdf40e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.query.json @@ -1,7 +1,7 @@ { "sql.query":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-rest-overview.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html", "description":"Executes a SQL request" }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.translate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.translate.json index 09623c9bbae92..99f9216f88ce6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/sql.translate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/sql.translate.json @@ -1,7 +1,7 @@ { "sql.translate":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html", "description":"Translates SQL into Elasticsearch queries" }, "stability":"stable", diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json new file mode 100644 index 0000000000000..2cdc2f3bc9aea --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json @@ -0,0 +1,33 @@ +{ + "cluster.post_voting_config_exclusions_with_node_name_part":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html", + "description":"Updates the cluster voting config exclusions by node_name (not node ids or node names)." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] + }, + "url":{ + "paths":[ + { + "path":"/_cluster/voting_config_exclusions/{node_name}", + "methods":[ + "POST" + ], + "parts":{ + "node_name":{ + "type":"string", + "description":"A comma-separated list of node descriptors of the nodes to exclude from the voting configuration." + } + }, + "deprecated":{ + "version":"7.8.0", + "description":"node_name is deprecated, use node_names or node_ids instead" + } + } + ] + } + } +} diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/cluster.voting_config_exclusions/10_basic_compat.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/cluster.voting_config_exclusions/10_basic_compat.yml new file mode 100644 index 0000000000000..3019a77957fee --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/cluster.voting_config_exclusions/10_basic_compat.yml @@ -0,0 +1,20 @@ +--- +setup: + - skip: + version: "9.0.0 - " + reason: "compatible from 8.x to 7.x" + features: + - "headers" + - "warnings_regex" + +--- +"Throw exception when adding voting config exclusion by specifying a 'node_name'": + - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + cluster.post_voting_config_exclusions_with_node_name_part: + node_name: someNodeName + warnings_regex: + - ".* /_cluster/voting_config_exclusions/\\{node_name\\} has been removed. .*" + catch: /\[node_name\] has been removed, you must set \[node_names\] or \[node_ids\]/ diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/indices.deprecated.upgrade/10_basic_upgrade.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/indices.deprecated.upgrade/10_basic_upgrade.yml new file mode 100644 index 0000000000000..f68372b4474e9 --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/indices.deprecated.upgrade/10_basic_upgrade.yml @@ -0,0 +1,44 @@ +--- +setup: + - skip: + version: "9.0.0 - " + reason: "compatible from 8.x to 7.x" + features: + - "headers" + - "allowed_warnings_regex" + +--- +Basic test for upgrade indices: + - skip: + version: " - 7.10.99" + reason: "_upgrade api is deprecated since 7.11.0" + features: + - "warnings" + - do: + indices.create: + index: "test_index" + body: + settings: + index: + number_of_replicas: 0 + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + - do: + catch: "bad_request" + indices.upgrade: + index: "test_index" + warnings: + - "The _upgrade API is no longer useful and will be removed. Instead, see _reindex\ + \ API." + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + - match: + status: 400 + - match: + error.reason: "/Upgrade.action.(GET|POST).(_upgrade|/test_index/_upgrade).was.removed,.use._reindex.API.instead/" diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_cutoff_frequency.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_cutoff_frequency.yml new file mode 100644 index 0000000000000..2d645a9419171 --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_cutoff_frequency.yml @@ -0,0 +1,105 @@ +--- +setup: + - skip: + version: "9.0.0 - " + reason: "compatible from 8.x to 7.x" + features: + - "headers" + - "allowed_warnings_regex" + - do: + indices.create: + index: "test" + body: + mappings: + properties: + my_field1: + type: "text" + my_field2: + type: "text" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + - do: + index: + index: "test" + id: 1 + body: + my_field1: "brown fox jump" + my_field2: "xylophone" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + - do: + indices.refresh: {} + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + +--- +multi_match multiple fields with cutoff_frequency throws exception: +- do: + catch: "/cutoff_freqency is not supported. The \\[multi_match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" + search: + rest_total_hits_as_int: true + index: "test" + body: + query: + multi_match: + query: "brown" + type: "bool_prefix" + fields: + - "my_field1" + - "my_field2" + cutoff_frequency: 0.001 + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + +--- +match with cutoff_frequency throws exception: + - do: + catch: "/cutoff_freqency is not supported. The \\[match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" + search: + rest_total_hits_as_int: true + index: "test" + body: + query: + match: + my_field1: + query: "brown" + type: "bool_prefix" + cutoff_frequency: 0.001 + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + +--- +common querythrows exception: + - do: + catch: "/Common Terms Query usage is not supported. Use \\[match\\] query which can efficiently skip blocks of documents if the total number of hits is not tracked./" + search: + rest_total_hits_as_int: true + index: "test" + body: + query: + common: + my_field1: + query: "brown" + type: "bool_prefix" + cutoff_frequency: 0.001 + low_freq_operator: or + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml new file mode 100644 index 0000000000000..fa4e20fdfa6fe --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml @@ -0,0 +1,52 @@ +--- +setup: + - skip: + features: + - "headers" + - "allowed_warnings_regex" +--- +type query throws exception when used: + - do: + index: + index: "test1" + id: 1 + type: "cat" + refresh: true + body: + foo: "bar" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + + - do: + catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ + search: + rest_total_hits_as_int: true + index: "test1" + body: + query: + type: + value: "cat" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + + - do: + catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ + search: + rest_total_hits_as_int: true + index: "test1" + body: + query: + type: + value: "_doc" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/sort/10_nested_path_filter.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/sort/10_nested_path_filter.yml new file mode 100644 index 0000000000000..536ad86378e69 --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/sort/10_nested_path_filter.yml @@ -0,0 +1,149 @@ +--- +setup: +- skip: + features: + - "headers" + - "allowed_warnings_regex" +- do: + indices.create: + index: "my-index" + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + offer: + type: "nested" +- do: + index: + index: "my-index" + id: 1 + refresh: true + body: + offer: + price: 10 + color: blue + + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + +- do: + indices.create: + index: "my-locations" + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + pin: + properties: + location: + type: geo_point + offer: + type: "nested" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + +- do: + index: + index: "my-locations" + id: 1 + refresh: true + body: + offer: + price: 10 + color: blue + pin: + location: + lat: 40.12 + lon: -71.34 + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + + + + + +--- +"Sort with nested_path throws exception": +- do: + catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ + search: + rest_total_hits_as_int: true + index: "my-index" + body: + sort: + - offer.price: + mode: avg + order: asc + nested_path: offer + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + +--- +"Sort with nested_filter throws exception": + - do: + catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ + search: + rest_total_hits_as_int: true + index: "my-index" + body: + sort: + - offer.price: + mode: avg + order: asc + nested_filter: + term: + offer.color: blue + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + + +--- +"Geo search with nested_filter throws exception": + - do: + catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ + search: + rest_total_hits_as_int: true + index: "my-locations" + body: + query: + match_all: {} + sort: + _geo_distance: + pin.location: + - -70 + - 40 + nested_filter: + term: + offer.color: blue + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + +--- +"Geo search with nested_path throws exception": + - do: + catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ + search: + rest_total_hits_as_int: true + index: "my-locations" + body: + query: + match_all: {} + sort: + _geo_distance: + pin.location: + - -70 + - 40 + nested_path: "offer" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.snapshots/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.snapshots/10_basic.yml index a6ac0d9c52c6f..f7d60671c7e88 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.snapshots/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.snapshots/10_basic.yml @@ -1,8 +1,5 @@ --- "Help": - - skip: - version: " - 7.99.99" - reason: Repository field added in 8.0 - do: cat.snapshots: @@ -26,10 +23,6 @@ $/ --- "Test cat snapshots output": - - skip: - version: " - 7.99.99" - reason: Repository field added in 8.0 - - do: snapshot.create_repository: repository: test_cat_snapshots_1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml index 08b1a7ad8d690..490c093904a89 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml @@ -1,5 +1,52 @@ --- "Indices recovery test": + - skip: + # todo: change after backport + version: " - 7.99.99" + reason: recovery from snapshot bytes not available until 8.0 + + - do: + indices.create: + index: test_1 + body: + settings: + index: + number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.recovery: + index: [test_1] + human: true + + - match: { test_1.shards.0.type: "EMPTY_STORE" } + - match: { test_1.shards.0.stage: "DONE" } + - match: { test_1.shards.0.primary: true } + - match: { test_1.shards.0.start_time: /^2\d\d\d-.+/ } + - match: { test_1.shards.0.target.ip: /^\d+\.\d+\.\d+\.\d+$/ } + - gte: { test_1.shards.0.index.files.total: 0 } + - gte: { test_1.shards.0.index.files.reused: 0 } + - gte: { test_1.shards.0.index.files.recovered: 0 } + - match: { test_1.shards.0.index.files.percent: /^\d+\.\d\%$/ } + - gte: { test_1.shards.0.index.size.total_in_bytes: 0 } + - gte: { test_1.shards.0.index.size.reused_in_bytes: 0 } + - gte: { test_1.shards.0.index.size.recovered_in_bytes: 0 } + - gte: { test_1.shards.0.index.size.recovered_from_snapshot_in_bytes: 0 } + - match: { test_1.shards.0.index.size.percent: /^\d+\.\d\%$/ } + - gte: { test_1.shards.0.index.source_throttle_time_in_millis: 0 } + - gte: { test_1.shards.0.index.target_throttle_time_in_millis: 0 } + - gte: { test_1.shards.0.translog.recovered: 0 } + - gte: { test_1.shards.0.translog.total: -1 } + - gte: { test_1.shards.0.translog.total_on_start: 0 } + - gte: { test_1.shards.0.translog.total_time_in_millis: 0 } + - gte: { test_1.shards.0.verify_index.check_index_time_in_millis: 0 } + - gte: { test_1.shards.0.verify_index.total_time_in_millis: 0 } + +--- +"Indices recovery test without recovery from snapshot": - do: indices.create: @@ -71,27 +118,27 @@ index: [test_2] human: true - - match: { test_2.shards.0.type: "EXISTING_STORE" } - - match: { test_2.shards.0.stage: "DONE" } - - match: { test_2.shards.0.primary: true } - - match: { test_2.shards.0.start_time: /^2\d\d\d-.+/ } - - match: { test_2.shards.0.target.ip: /^\d+\.\d+\.\d+\.\d+$/ } - - gte: { test_2.shards.0.index.files.total: 0 } - - gte: { test_2.shards.0.index.files.reused: 0 } - - gte: { test_2.shards.0.index.files.recovered: 0 } - - match: { test_2.shards.0.index.files.percent: /^\d+\.\d\%$/ } - - gte: { test_2.shards.0.index.size.total_in_bytes: 0 } - - gte: { test_2.shards.0.index.size.reused_in_bytes: 0 } - - gte: { test_2.shards.0.index.size.recovered_in_bytes: 0 } - - match: { test_2.shards.0.index.size.percent: /^\d+\.\d\%$/ } - - gte: { test_2.shards.0.index.source_throttle_time_in_millis: 0 } - - gte: { test_2.shards.0.index.target_throttle_time_in_millis: 0 } - - gte: { test_2.shards.0.translog.recovered: 0 } - - gte: { test_2.shards.0.translog.total: 0 } - - gte: { test_2.shards.0.translog.total_on_start: 0 } - - gte: { test_2.shards.0.translog.total_time_in_millis: 0 } - - gte: { test_2.shards.0.verify_index.check_index_time_in_millis: 0 } - - gte: { test_2.shards.0.verify_index.total_time_in_millis: 0 } + - match: { test_2.shards.0.type: "EXISTING_STORE" } + - match: { test_2.shards.0.stage: "DONE" } + - match: { test_2.shards.0.primary: true } + - match: { test_2.shards.0.start_time: /^2\d\d\d-.+/ } + - match: { test_2.shards.0.target.ip: /^\d+\.\d+\.\d+\.\d+$/ } + - gte: { test_2.shards.0.index.files.total: 0 } + - gte: { test_2.shards.0.index.files.reused: 0 } + - gte: { test_2.shards.0.index.files.recovered: 0 } + - match: { test_2.shards.0.index.files.percent: /^\d+\.\d\%$/ } + - gte: { test_2.shards.0.index.size.total_in_bytes: 0 } + - gte: { test_2.shards.0.index.size.reused_in_bytes: 0 } + - gte: { test_2.shards.0.index.size.recovered_in_bytes: 0 } + - match: { test_2.shards.0.index.size.percent: /^\d+\.\d\%$/ } + - gte: { test_2.shards.0.index.source_throttle_time_in_millis: 0 } + - gte: { test_2.shards.0.index.target_throttle_time_in_millis: 0 } + - gte: { test_2.shards.0.translog.recovered: 0 } + - gte: { test_2.shards.0.translog.total: 0 } + - gte: { test_2.shards.0.translog.total_on_start: 0 } + - gte: { test_2.shards.0.translog.total_time_in_millis: 0 } + - gte: { test_2.shards.0.verify_index.check_index_time_in_millis: 0 } + - gte: { test_2.shards.0.verify_index.total_time_in_millis: 0 } --- "Indices recovery test index name not matching": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/50_disk_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/50_disk_usage.yml new file mode 100644 index 0000000000000..d3e2145b91cff --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/50_disk_usage.yml @@ -0,0 +1,105 @@ +--- +setup: + - skip: + version: " - 7.14.99" + reason: analyze index disk usage API is introduced in 7.15 + +--- +"Disk usage stats": + - do: + indices.create: + index: testindex + body: + mappings: + properties: + name: + type: text + quantity: + type: long + doc_values: false + genre: + type: keyword + doc_values: true + - do: + index: + index: testindex + body: { "name": "foo", "quantity": 0, "genre": [ "rock", "pop" ] } + - do: + index: + index: testindex + body: { "name": "bar", "quantity": 99, "genre": "pop" } + - do: + index: + index: testindex + body: { "name": "baz", "quantity": 50, "genre": "jazz" } + - do: + index: + index: testindex + body: { "name": "bar & baz", "quantity": 1000, "genre": "blue" } + - do: + index: + index: testindex + body: { "name": "foobar", "quantity": 1000, "genre": "country" } + - do: + indices.disk_usage: { index: "testindex", "run_expensive_tasks": true } + + - gt: { testindex.store_size_in_bytes: 100 } + # all_fields + - gt: { testindex.all_fields.total_in_bytes: 0 } + - gt: { testindex.all_fields.inverted_index.total_in_bytes: 0 } + - gt: { testindex.all_fields.stored_fields_in_bytes: 0 } + - gt: { testindex.all_fields.doc_values_in_bytes: 0 } + - gt: { testindex.all_fields.points_in_bytes: 0 } + - match: { testindex.all_fields.term_vectors_in_bytes: 0 } + + # genre + - gt: { testindex.fields.genre.total_in_bytes: 0 } + - gt: { testindex.fields.genre.inverted_index.total_in_bytes: 0 } + - match: { testindex.fields.genre.stored_fields_in_bytes: 0 } + - gt: { testindex.fields.genre.doc_values_in_bytes: 0 } + - match: { testindex.fields.genre.points_in_bytes: 0 } + - match: { testindex.fields.genre.norms_in_bytes: 0 } + - match: { testindex.fields.genre.term_vectors_in_bytes: 0 } + + # name + - gt: { testindex.fields.name.total_in_bytes: 0 } + - gt: { testindex.fields.name.inverted_index.total_in_bytes: 0 } + - match: { testindex.fields.name.stored_fields_in_bytes: 0 } + - match: { testindex.fields.name.doc_values_in_bytes: 0 } + - match: { testindex.fields.name.points_in_bytes: 0 } + - match: { testindex.fields.name.term_vectors_in_bytes: 0 } + + # quantity + - gt: { testindex.fields.quantity.total_in_bytes: 0 } + - match: { testindex.fields.quantity.inverted_index.total_in_bytes: 0 } + - match: { testindex.fields.quantity.stored_fields_in_bytes: 0 } + - match: { testindex.fields.quantity.doc_values_in_bytes: 0 } + - gt: { testindex.fields.quantity.points_in_bytes: 0 } + - match: { testindex.fields.quantity.norms_in_bytes: 0 } + - match: { testindex.fields.quantity.term_vectors_in_bytes: 0 } + + # _source + - gt: { testindex.fields._source.total_in_bytes: 0 } + - match: { testindex.fields._source.inverted_index.total_in_bytes: 0 } + - gt: { testindex.fields._source.stored_fields_in_bytes: 0 } + - match: { testindex.fields._source.doc_values_in_bytes: 0 } + - match: { testindex.fields._source.points_in_bytes: 0 } + - match: { testindex.fields._source.norms_in_bytes: 0 } + - match: { testindex.fields._source.term_vectors_in_bytes: 0 } + + # _id + - gt: { testindex.fields._id.total_in_bytes: 0 } + - gt: { testindex.fields._id.inverted_index.total_in_bytes: 0 } + - gt: { testindex.fields._id.stored_fields_in_bytes: 0 } + - match: { testindex.fields._id.doc_values_in_bytes: 0 } + - match: { testindex.fields._id.points_in_bytes: 0 } + - match: { testindex.fields._id.norms_in_bytes: 0 } + - match: { testindex.fields._id.term_vectors_in_bytes: 0 } + + # _seq_no + - gt: { testindex.fields._seq_no.total_in_bytes: 0 } + - match: { testindex.fields._seq_no.inverted_index.total_in_bytes: 0 } + - match: { testindex.fields._seq_no.stored_fields_in_bytes: 0 } + - gt: { testindex.fields._seq_no.points_in_bytes: 0 } + - match: { testindex.fields._seq_no.norms_in_bytes: 0 } + - match: { testindex.fields._seq_no.term_vectors_in_bytes: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml new file mode 100644 index 0000000000000..e9c8fc2ad8ae8 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml @@ -0,0 +1,208 @@ +--- +setup: + - skip: + version: " - 7.14.99" + reason: field usage stats API is introduced in 7.15 + +--- +"Field usage stats": + - do: + indices.create: + index: testindex + body: + settings: + routing.rebalance.enable: none + index.number_of_shards: 1 + index.number_of_replicas: 0 + mappings: + properties: + name: + type: text + "index_options": "offsets" + "term_vector" : "with_positions_offsets" + price: + type: double + + - do: + index: + index: testindex + body: { "name": "foo", "price": 100, "day" : "2003/09/06" } + + - do: + index: + index: testindex + body: { "name": "bar", "price": 120, "day" : "2003/09/07" } + + - do: + index: + index: testindex + body: { "name": "baz", "price": 100, "day" : "2003/09/13" } + + - do: + index: + index: testindex + body: { "name": "bar & baz", "price": 220 } + - do: + index: + index: testindex + id: testid + body: { "name": "foo bar", "price": 150, "day" : "2003/09/07" } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + bool: + must: + - match_phrase: + name: "foo bar" + - range: + day: + gte: "2003/09/07" + sort: [ "price" ] + + - do: + indices.field_usage_stats: { index: testindex } + + - is_true: testindex + - length: { testindex.shards: 1 } + - is_true: testindex.shards.0.tracking_id + - gt: { testindex.shards.0.tracking_started_at_millis: 0 } + - is_true: testindex.shards.0.routing + - is_true: testindex.shards.0.routing.state + - is_true: testindex.shards.0.routing.primary + - is_true: testindex.shards.0.routing.node + - is_true: testindex.shards.0.stats + + # all_fields + - gt: { testindex.shards.0.stats.all_fields.any: 0 } + - gt: { testindex.shards.0.stats.all_fields.inverted_index.terms: 0 } + - gt: { testindex.shards.0.stats.all_fields.inverted_index.postings: 0 } + - gt: { testindex.shards.0.stats.all_fields.inverted_index.proximity: 0 } + - gt: { testindex.shards.0.stats.all_fields.stored_fields: 0 } + - gt: { testindex.shards.0.stats.all_fields.doc_values: 0 } + - gt: { testindex.shards.0.stats.all_fields.points: 0 } + - match: { testindex.shards.0.stats.all_fields.norms: 0 } + - match: { testindex.shards.0.stats.all_fields.term_vectors: 0 } + - gt: { testindex.shards.0.stats.all_fields.inverted_index.term_frequencies: 0 } + - gt: { testindex.shards.0.stats.all_fields.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.all_fields.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.all_fields.inverted_index.payloads: 0 } + + # name + - gt: { testindex.shards.0.stats.fields.name.any: 0 } + - gt: { testindex.shards.0.stats.fields.name.inverted_index.terms: 0 } + - gt: { testindex.shards.0.stats.fields.name.inverted_index.postings: 0 } + - gt: { testindex.shards.0.stats.fields.name.inverted_index.proximity: 0 } + - match: { testindex.shards.0.stats.fields.name.stored_fields: 0 } + - match: { testindex.shards.0.stats.fields.name.doc_values: 0 } + - match: { testindex.shards.0.stats.fields.name.points: 0 } + - match: { testindex.shards.0.stats.fields.name.norms: 0 } + - match: { testindex.shards.0.stats.fields.name.term_vectors: 0 } + - gt: { testindex.shards.0.stats.fields.name.inverted_index.term_frequencies: 0 } + - gt: { testindex.shards.0.stats.fields.name.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.fields.name.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.fields.name.inverted_index.payloads: 0 } + + # price + - gt: { testindex.shards.0.stats.fields.price.any: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.terms: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.postings: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.proximity: 0 } + - match: { testindex.shards.0.stats.fields.price.stored_fields: 0 } + - gt: { testindex.shards.0.stats.fields.price.doc_values: 0 } + - match: { testindex.shards.0.stats.fields.price.points: 0 } + - match: { testindex.shards.0.stats.fields.price.norms: 0 } + - match: { testindex.shards.0.stats.fields.price.term_vectors: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.term_frequencies: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.fields.price.inverted_index.payloads: 0 } + + # day + - gt: { testindex.shards.0.stats.fields.day.any: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.terms: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.postings: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.proximity: 0 } + - match: { testindex.shards.0.stats.fields.day.stored_fields: 0 } + - gt: { testindex.shards.0.stats.fields.day.doc_values: 0 } + - gt: { testindex.shards.0.stats.fields.day.points: 0 } + - match: { testindex.shards.0.stats.fields.day.norms: 0 } + - match: { testindex.shards.0.stats.fields.day.term_vectors: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.term_frequencies: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.fields.day.inverted_index.payloads: 0 } + + # _source + - gt: { testindex.shards.0.stats.fields._source.any: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.terms: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.postings: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.proximity: 0 } + - gt: { testindex.shards.0.stats.fields._source.stored_fields: 0 } + - match: { testindex.shards.0.stats.fields._source.doc_values: 0 } + - match: { testindex.shards.0.stats.fields._source.points: 0 } + - match: { testindex.shards.0.stats.fields._source.norms: 0 } + - match: { testindex.shards.0.stats.fields._source.term_vectors: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.term_frequencies: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.fields._source.inverted_index.payloads: 0 } + + # _id + - gt: { testindex.shards.0.stats.fields._id.any: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.terms: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.postings: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.proximity: 0 } + - gt: { testindex.shards.0.stats.fields._id.stored_fields: 0 } + - match: { testindex.shards.0.stats.fields._id.doc_values: 0 } + - match: { testindex.shards.0.stats.fields._id.points: 0 } + - match: { testindex.shards.0.stats.fields._id.norms: 0 } + - match: { testindex.shards.0.stats.fields._id.term_vectors: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.term_frequencies: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.positions: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.offsets: 0 } + - match: { testindex.shards.0.stats.fields._id.inverted_index.payloads: 0 } + + - do: + termvectors: + index: testindex + id: testid + term_statistics : true + fields: name + + - do: + indices.field_usage_stats: { index: testindex } + + # name + - gt: { testindex.shards.0.stats.fields.name.term_vectors: 0 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase: + name: "foo bar" + + - do: + indices.field_usage_stats: { index: testindex } + + # name + - gt: { testindex.shards.0.stats.fields.name.norms: 0 } + + - do: + search: + body: { + "query" : { "match_phrase" : { "name" : "foo bar" } }, + "highlight" : { "type" : "unified", "fields" : { "*" : {} } } } + + - do: + indices.field_usage_stats: { index: testindex } + + # name + - gt: { testindex.shards.0.stats.fields.name.inverted_index.offsets: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index a3aa6e3593177..1ef7e81bf16df 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -109,6 +109,7 @@ - is_false: nodes.$node_id.indices.segments - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery + - is_false: nodes.$node_id.indices.shards --- "Metric - multi": @@ -166,6 +167,7 @@ - is_false: nodes.$node_id.indices.segments - is_false: nodes.$node_id.indices.translog - is_true: nodes.$node_id.indices.recovery + - is_false: nodes.$node_id.indices.shards --- "Metric - _all include_segment_file_sizes": @@ -223,6 +225,7 @@ - is_true: nodes.$node_id.indices.segments - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery + - is_false: nodes.$node_id.indices.shards - is_true: nodes.$node_id.indices.segments.file_sizes --- @@ -254,6 +257,7 @@ - is_true: nodes.$node_id.indices.segments - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery + - is_false: nodes.$node_id.indices.shards --- "Metric - _all include_unloaded_segments": @@ -300,7 +304,8 @@ nodes.stats: { metric: http } - is_true: nodes.$node_id - - gte: { nodes.$node_id.http.current_open: 1 } + # in rare test scenarios, the value of current_open can be zero + - gte: { nodes.$node_id.http.current_open: 0 } - gte: { nodes.$node_id.http.total_opened: 1 } - is_true: nodes.$node_id.http.clients - gte: { nodes.$node_id.http.clients.0.id: 1 } @@ -314,3 +319,72 @@ - gte: { nodes.$node_id.http.clients.0.request_size_bytes: 0 } # values for clients.0.closed_time_millis, clients.0.x_forwarded_for, and clients.0.x_opaque_id are often # null and cannot be tested here + +--- +"Metric - blank for indices shards": + - skip: + features: [arbitrary_key] + version: " - 7.14.99" + reason: "total shard count added in version 7.15.0" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: {} + + - is_true: nodes.$node_id.indices.shards + - match: { nodes.$node_id.indices.shards.total_count: 0 } + +--- +"Metric - _all for indices shards": + - skip: + features: [arbitrary_key] + version: " - 7.14.99" + reason: "total shard count added in version 7.15.0" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: _all } + + - is_true: nodes.$node_id.indices.shards + - match: { nodes.$node_id.indices.shards.total_count: 0 } + + +--- +"indices shards total count test": + + - skip: + features: ["allowed_warnings", arbitrary_key] + version: " - 7.14.99" + reason: "total shard count added in version 7.15.0" + + - do: + indices.create: + index: index1 + body: + settings: + number_of_shards: "5" + number_of_replicas: "0" + + - do: + indices.create: + index: index2 + body: + settings: + number_of_shards: "3" + number_of_replicas: "1" + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: _all } + + - gte: { nodes.$node_id.indices.shards.total_count: 1 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml index 3eed5dc74151d..e7b1086499b5a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml @@ -142,3 +142,27 @@ setup: - do: clear_scroll: scroll_id: $scroll_id + +--- +"Sliced scroll with doc values": + + - do: + search: + index: test_sliced_scroll + sort: foo + scroll: 1m + body: + slice: + field: foo + id: 0 + max: 2 + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: { hits.total.value: 3 } + - length: { hits.hits: 3 } + + - do: + clear_scroll: + scroll_id: $scroll_id diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index bd306fdb83113..4a5c390dc3f62 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -937,4 +937,4 @@ setup: histo: date_histogram: field: date - interval: second + fixed_interval: 1s diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml index de5b9b9b014a0..d7ca13eef4008 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/170_cardinality_metric.yml @@ -216,8 +216,8 @@ setup: --- "profiler int": - skip: - version: " - 7.99.99" - reason: new info added in 8.0.0 to be backported to 7.10.0 + version: " - 7.9.99" + reason: introduced in 7.10.0 - do: search: body: @@ -242,8 +242,8 @@ setup: --- "profiler double": - skip: - version: " - 7.99.99" - reason: new info added in 8.0.0 to be backported to 7.10.0 + version: " - 7.9.99" + reason: introduced in 7.10.0 - do: search: body: @@ -268,8 +268,8 @@ setup: --- "profiler string": - skip: - version: " - 7.99.99" - reason: new info added in 8.0.0 to be backported to 7.10.0 + version: " - 7.9.99" + reason: introduced in 7.10.0 - do: search: body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 1851519a52af5..864772c1ef266 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -756,8 +756,8 @@ setup: --- "string profiler via global ordinals filters implementation": - skip: - version: " - 7.12.99" - reason: filters implementation first supported with sub-aggregators in 7.13.0 + version: " - 7.99.99" + reason: profile info changed in 8.0.0 to be backported to 7.14.0 - do: bulk: index: test_1 @@ -797,7 +797,7 @@ setup: - match: { profile.shards.0.aggregations.0.type: StringTermsAggregatorFromFilters } - match: { profile.shards.0.aggregations.0.description: str_terms } - match: { profile.shards.0.aggregations.0.breakdown.collect_count: 0 } - - match: { profile.shards.0.aggregations.0.debug.delegate: FiltersAggregator.FilterByFilter } + - match: { profile.shards.0.aggregations.0.debug.delegate: FilterByFilterAggregator } - match: { profile.shards.0.aggregations.0.debug.delegate_debug.filters.0.query: "str:cow" } - match: { profile.shards.0.aggregations.0.debug.delegate_debug.filters.1.query: "str:pig" } - match: { profile.shards.0.aggregations.0.debug.delegate_debug.filters.2.query: "str:sheep" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/22_terms_disable_opt.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/22_terms_disable_opt.yml index 0d6e90071180c..d21321dd2fff4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/22_terms_disable_opt.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/22_terms_disable_opt.yml @@ -1,9 +1,31 @@ +--- setup: + # Lock to one shard so the tests don't sometimes try to collect from an empty + # index. If they do they'll use the GlobalOrdinals collector regardless of the + # optimization setting. That's fine - but it causes the test to fail so we + # need to dodge that case. - do: - cluster.put_settings: + indices.create: + index: test body: - persistent: - search.aggs.rewrite_to_filter_by_filter: false + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + bulk: + index: test + refresh: true + body: | + { "index": {} } + { "str": "sheep" } + { "index": {} } + { "str": "sheep" } + { "index": {} } + { "str": "cow" } + { "index": {} } + { "str": "pig" } --- teardown: @@ -14,23 +36,67 @@ teardown: search.aggs.rewrite_to_filter_by_filter: null --- -does not use optimization: +disable optimization: - skip: version: " - 7.13.1" reason: setting to disable optimization added in 7.13.2 + - do: - bulk: + cluster.put_settings: + body: + persistent: + search.aggs.rewrite_to_filter_by_filter: false + + + - do: + search: index: test - refresh: true - body: | - { "index": {} } - { "str": "sheep" } - { "index": {} } - { "str": "sheep" } - { "index": {} } - { "str": "cow" } - { "index": {} } - { "str": "pig" } + body: + profile: true + size: 0 + aggs: + str_terms: + terms: + field: str.keyword + - match: { aggregations.str_terms.buckets.0.key: sheep } + - match: { aggregations.str_terms.buckets.1.key: cow } + - match: { aggregations.str_terms.buckets.2.key: pig } + - match: { profile.shards.0.aggregations.0.type: /GlobalOrdinalsStringTermsAggregator.*/ } # Either the standard or low cardinality impl are fine + - match: { profile.shards.0.aggregations.0.description: str_terms } + +--- +enable optimization: + - skip: + version: " - 7.13.1" + reason: setting to disable optimization added in 7.13.2 + + - do: + cluster.put_settings: + body: + persistent: + search.aggs.rewrite_to_filter_by_filter: true + + - do: + search: + index: test + body: + profile: true + size: 0 + aggs: + str_terms: + terms: + field: str.keyword + - match: { aggregations.str_terms.buckets.0.key: sheep } + - match: { aggregations.str_terms.buckets.1.key: cow } + - match: { aggregations.str_terms.buckets.2.key: pig } + - match: { profile.shards.0.aggregations.0.type: /StringTermsAggregatorFromFilters/ } + - match: { profile.shards.0.aggregations.0.description: str_terms } + +--- +enabled by default: + - skip: + version: " - 7.13.1" + reason: setting to disable optimization added in 7.13.2 - do: search: @@ -45,5 +111,5 @@ does not use optimization: - match: { aggregations.str_terms.buckets.0.key: sheep } - match: { aggregations.str_terms.buckets.1.key: cow } - match: { aggregations.str_terms.buckets.2.key: pig } - - match: { profile.shards.0.aggregations.0.type: GlobalOrdinalsStringTermsAggregator } + - match: { profile.shards.0.aggregations.0.type: /StringTermsAggregatorFromFilters/ } - match: { profile.shards.0.aggregations.0.description: str_terms } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 7888023c0f603..1753e5b9d8f06 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -363,15 +363,12 @@ setup: ] --- -"Composite aggregation with format": +"Composite aggregation with format and calendar_interval": - skip: version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - features: warnings - do: - warnings: - - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' search: rest_total_hits_as_int: true index: test @@ -384,7 +381,7 @@ setup: "date": { "date_histogram": { "field": "date", - "interval": "1d", + "calendar_interval": "1d", "format": "yyyy-MM-dd" } } @@ -399,8 +396,6 @@ setup: - match: { aggregations.test.buckets.1.doc_count: 1 } - do: - warnings: - - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' search: rest_total_hits_as_int: true index: test @@ -416,7 +411,7 @@ setup: "date": { "date_histogram": { "field": "date", - "interval": "1d", + "calendar_interval": "1d", "format": "yyyy-MM-dd" } } @@ -429,12 +424,13 @@ setup: - match: { aggregations.test.buckets.0.doc_count: 1 } --- -"Composite aggregation with format and calendar_interval": +"Composite aggregation with invalid format": - skip: - version: " - 7.1.99" - reason: calendar_interval introduced in 7.2.0 + version: " - 7.13.99" + reason: After key parse checking added in 7.14 - do: + catch: /created output it couldn't parse/ search: rest_total_hits_as_int: true index: test @@ -448,47 +444,40 @@ setup: "date_histogram": { "field": "date", "calendar_interval": "1d", - "format": "yyyy-MM-dd" + # mixes week based and month based + "format": "YYYY-MM-dd" } } } ] - - match: {hits.total: 6} - - length: { aggregations.test.buckets: 2 } - - match: { aggregations.test.buckets.0.key.date: "2017-10-20" } - - match: { aggregations.test.buckets.0.doc_count: 1 } - - match: { aggregations.test.buckets.1.key.date: "2017-10-21" } - - match: { aggregations.test.buckets.1.doc_count: 1 } +--- +"Composite aggregation with lossy format": + - skip: + version: " - 7.13.99" + reason: After key parse checking added in 7.14 - do: - search: - rest_total_hits_as_int: true - index: test - body: - aggregations: - test: - composite: - after: { - date: "2017-10-20" - } - sources: [ - { - "date": { - "date_histogram": { - "field": "date", - "calendar_interval": "1d", - "format": "yyyy-MM-dd" - } + catch: /created output it couldn't parse/ + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "calendar_interval": "1d", + # format is lower resolution than buckets, after key will lose data + "format": "yyyy-MM" } } - ] - - - match: {hits.total: 6} - - length: { aggregations.test.buckets: 1 } - - match: { aggregations.test.buckets.0.key.date: "2017-10-21" } - - match: { aggregations.test.buckets.0.doc_count: 1 } - + } + ] --- "Composite aggregation with date_histogram offset": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index cd24da7bd616b..3742fb976a058 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -27,36 +27,6 @@ script: "MovingFunctions.windowMax(values)" --- -"Bad window deprecated interval": - - - skip: - version: " - 7.1.99" - reason: "interval deprecation added in 7.2" - features: "warnings" - - - do: - catch: /\[window\] must be a positive, non-zero integer\./ - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." - search: - rest_total_hits_as_int: true - body: - size: 0 - aggs: - the_histo: - date_histogram: - field: "date" - interval: "1d" - aggs: - the_avg: - avg: - field: "value_field" - the_mov_fn: - moving_fn: - buckets_path: "the_avg" - window: -1 - script: "MovingFunctions.windowMax(values)" ---- "Not under date_histo": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml index d7759afe4a907..8c10ff612b287 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml @@ -142,7 +142,7 @@ 'Misspelled fields get "did you mean"': - skip: version: " - 7.6.99" - reason: Implemented in 8.0 (to be backported to 7.7) + reason: introduced in 7.7.0 - do: catch: /\[significant_terms\] unknown field \[jlp\] did you mean \[jlh\]\?/ search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml index 68b4d8538a6d2..3ef42022714ca 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml @@ -80,8 +80,8 @@ setup: --- "profile at top level": - skip: - version: " - 7.99.99" - reason: Debug information added in 8.0.0 (to be backported to 7.9.0) + version: " - 7.9.99" + reason: introduced in 7.10.0 - do: search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/370_doc_count_field.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/370_doc_count_field.yml index 7e1c9af3fe3f7..8d3eb74c15f38 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/370_doc_count_field.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/370_doc_count_field.yml @@ -150,9 +150,10 @@ setup: --- "Test filters agg with doc_count": - skip: - version: " - 7.12.99" + version: " - 7.99.99" + reason: profile info changed in 8.0.0 to be backported to 7.14.0 features: default_shards - reason: "name changed in 7.13" + - do: search: body: @@ -177,7 +178,7 @@ setup: - match: { aggregations.f.buckets.abc.doc_count: 11 } - match: { aggregations.f.buckets.foo.doc_count: 8 } - match: { aggregations.f.buckets.xyz.doc_count: 5 } - - match: { profile.shards.0.aggregations.0.type: FiltersAggregator.FilterByFilter } + - match: { profile.shards.0.aggregations.0.type: FilterByFilterAggregator } # We can't assert that segments_with_doc_count_field is > 0 because we might # end up with two shards and all of the documents with the _doc_count field # may be on one field. We have a test for this in AggregationProfilerIT diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/380_global.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/380_global.yml index a7230f8f7e80f..34e357bd74249 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/380_global.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/380_global.yml @@ -37,8 +37,8 @@ simple: --- profile: - skip: - version: " - 7.99.99" - reason: fixed in 8.0.0 (to be backported to 7.13.0) + version: " - 7.12.99" + reason: fix introduced in 7.13.0 - do: search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml index 87faf54cc59dc..d476426312147 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -86,10 +86,6 @@ setup: --- "field collapsing and from": - - skip: - version: "all" - reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/52416" - - do: search: rest_total_hits_as_int: true @@ -110,10 +106,6 @@ setup: --- "field collapsing and inner_hits": - - skip: - version: "all" - reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/52416" - - do: search: rest_total_hits_as_int: true @@ -150,9 +142,7 @@ setup: --- "field collapsing, inner_hits, and fields": - - skip: - version: " - 7.9.99" - reason: the 'fields' option was added in 7.10 + - do: search: rest_total_hits_as_int: true @@ -234,9 +224,7 @@ setup: --- "field collapsing and search_after with invalid sort": - - skip: - version: " - 7.13.99" - reason: "support for collapsing with search_after was added in 7.14" + - do: catch: /Cannot use \[collapse\] in conjunction with \[search_after\] unless the search is sorted on the same field. Multiple sort fields are not allowed./ search: @@ -257,9 +245,7 @@ setup: --- "field collapsing and search_after": - - skip: - version: " - 7.13.99" - reason: "support for collapsing with search_after was added in 7.14" + - do: search: index: test @@ -316,10 +302,6 @@ setup: --- "no hits and inner_hits": - - skip: - version: "all" - reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/52416" - - do: search: rest_total_hits_as_int: true @@ -444,9 +426,7 @@ setup: --- "field collapsing on a field alias": - - skip: - version: " - 7.5.1" - reason: the bug fix was introduced in 7.5.2 + - do: indices.create: index: alias-test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/30_limits.yml index 137fcfce73abc..b4ff84e13353f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/30_limits.yml @@ -30,7 +30,7 @@ setup: - skip: version: " - 7.99.99" - reason: waiting for backport + reason: change was only made in 8.0.0 - do: catch: /\[from\] parameter cannot be negative but was \[-1\]/ @@ -44,7 +44,7 @@ setup: - skip: version: " - 7.99.99" - reason: waiting for backport + reason: change was only made in 8.0.0 - do: catch: /\[from\] parameter cannot be negative but was \[-1\]/ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index a11e1958b4cf7..70f40c413adf0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -858,8 +858,8 @@ Test nested field with sibling field resolving to DocValueFetcher: --- Test token_count inside nested field doesn't fail: - skip: - version: ' - 7.99.99' - reason: 'Added in 8.0 - change on backport' + version: ' - 7.11.99' + reason: 'fix introduced in 7.12.0' - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml index b921b4bd7eb35..b3ad192710cdf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml @@ -133,6 +133,36 @@ setup: body: id: "$point_in_time_id" +--- +"point-in-time with slicing": + - skip: + version: " - 7.14.99" + reason: "support for slicing was added in 7.15" + - do: + open_point_in_time: + index: test + keep_alive: 5m + - set: {id: point_in_time_id} + + - do: + search: + body: + slice: + id: 0 + max: 2 + size: 1 + query: + match: + foo: bar + sort: [{ age: desc }, { id: desc }] + pit: + id: "$point_in_time_id" + + - do: + close_point_in_time: + body: + id: "$point_in_time_id" + --- "wildcard": - skip: @@ -171,8 +201,8 @@ setup: --- "msearch": - skip: - version: " - 7.99.99" - reason: "After backport: 7.9.99 => point in time is introduced in 7.10" + version: " - 7.9.99" + reason: "point in time is introduced in 7.10" - do: open_point_in_time: index: "t*" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.get/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.get/10_basic.yml index 109ac83e37f87..b50ece87e9f88 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.get/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.get/10_basic.yml @@ -11,9 +11,6 @@ setup: --- "Get snapshot info": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: indices.create: @@ -34,8 +31,8 @@ setup: repository: test_repo_get_1 snapshot: test_snapshot - - is_true: responses.0.snapshots - - is_true: responses.0.snapshots.0.failures + - is_true: snapshots + - is_true: snapshots.0.failures - do: snapshot.delete: @@ -44,23 +41,15 @@ setup: --- "Get missing snapshot info throws an exception": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: + catch: /snapshot_missing_exception.+ is missing/ snapshot.get: repository: test_repo_get_1 snapshot: test_nonexistent_snapshot - - is_true: responses.0.error - - match: { responses.0.error.type: snapshot_missing_exception } - --- "Get missing snapshot info succeeds when ignore_unavailable is true": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - do: snapshot.get: @@ -68,14 +57,10 @@ setup: snapshot: test_nonexistent_snapshot ignore_unavailable: true - - is_true: responses.0.snapshots + - is_true: snapshots --- "Get snapshot info when verbose is false": - - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" - - do: indices.create: index: test_index @@ -96,13 +81,13 @@ setup: snapshot: test_snapshot verbose: false - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.snapshot: test_snapshot } - - match: { responses.0.snapshots.0.state: SUCCESS } - - is_false: responses.0.snapshots.0.failures - - is_false: responses.0.snapshots.0.shards - - is_false: responses.0.snapshots.0.version - - is_false: responses.0.snapshots.0._meta + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot } + - match: { snapshots.0.state: SUCCESS } + - is_false: snapshots.0.failures + - is_false: snapshots.0.shards + - is_false: snapshots.0.version + - is_false: snapshots.0._meta - do: snapshot.delete: @@ -112,8 +97,8 @@ setup: --- "Get snapshot info contains include_global_state": - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" + version: " - 6.1.99" + reason: "include_global_state field has been added in the response in 6.2.0" - do: indices.create: @@ -136,10 +121,10 @@ setup: repository: test_repo_get_1 snapshot: test_snapshot_with_include_global_state - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.snapshot: test_snapshot_with_include_global_state } - - match: { responses.0.snapshots.0.state: SUCCESS } - - match: { responses.0.snapshots.0.include_global_state: true } + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_with_include_global_state } + - match: { snapshots.0.state: SUCCESS } + - match: { snapshots.0.include_global_state: true } - do: snapshot.delete: @@ -159,10 +144,10 @@ setup: repository: test_repo_get_1 snapshot: test_snapshot_without_include_global_state - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.snapshot: test_snapshot_without_include_global_state } - - match: { responses.0.snapshots.0.state: SUCCESS } - - match: { responses.0.snapshots.0.include_global_state: false } + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_without_include_global_state } + - match: { snapshots.0.state: SUCCESS } + - match: { snapshots.0.include_global_state: false } - do: snapshot.delete: @@ -172,8 +157,8 @@ setup: --- "Get snapshot info with metadata": - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" + version: " - 7.2.99" + reason: "Introduced with 7.3" - do: indices.create: @@ -196,12 +181,12 @@ setup: repository: test_repo_get_1 snapshot: test_snapshot_with_metadata - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.snapshot: test_snapshot_with_metadata } - - match: { responses.0.snapshots.0.state: SUCCESS } - - match: { responses.0.snapshots.0.metadata.taken_by: test } - - match: { responses.0.snapshots.0.metadata.foo.bar: baz } - - is_false: responses.0.snapshots.0.index_details + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_with_metadata } + - match: { snapshots.0.state: SUCCESS } + - match: { snapshots.0.metadata.taken_by: test } + - match: { snapshots.0.metadata.foo.bar: baz } + - is_false: snapshots.0.index_details - do: snapshot.delete: @@ -211,8 +196,8 @@ setup: --- "Get snapshot info with index details": - skip: - version: " - 7.99.99" - reason: "8.0 changes get snapshots response format" + version: " - 7.12.99" + reason: "Introduced in 7.13.0" - do: indices.create: @@ -235,15 +220,49 @@ setup: index_details: true human: true - - is_true: responses.0.snapshots - - match: { responses.0.snapshots.0.snapshot: test_snapshot_with_index_details } - - match: { responses.0.snapshots.0.state: SUCCESS } - - gt: { responses.0.snapshots.0.index_details.test_index.shard_count: 0 } - - gt: { responses.0.snapshots.0.index_details.test_index.size_in_bytes: 0 } - - gte: { responses.0.snapshots.0.index_details.test_index.max_segments_per_shard: 0 } - - is_true: responses.0.snapshots.0.index_details.test_index.size + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_with_index_details } + - match: { snapshots.0.state: SUCCESS } + - gt: { snapshots.0.index_details.test_index.shard_count: 0 } + - gt: { snapshots.0.index_details.test_index.size_in_bytes: 0 } + - gte: { snapshots.0.index_details.test_index.max_segments_per_shard: 0 } + - is_true: snapshots.0.index_details.test_index.size - do: snapshot.delete: repository: test_repo_get_1 snapshot: test_snapshot_with_index_details + +--- +"Get snapshot info without repository names": + + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + snapshot.create: + repository: test_repo_get_1 + snapshot: test_snapshot_no_repo_name + wait_for_completion: true + + - do: + snapshot.get: + repository: test_repo_get_1 + snapshot: test_snapshot_no_repo_name + include_repository: false + human: true + + - is_true: snapshots + - match: { snapshots.0.snapshot: test_snapshot_no_repo_name } + - match: { snapshots.0.state: SUCCESS } + - is_false: snapshots.0.repository + + - do: + snapshot.delete: + repository: test_repo_get_1 + snapshot: test_snapshot_no_repo_name diff --git a/server/build.gradle b/server/build.gradle index 7688bcb61a61d..3144bce83851f 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -50,6 +50,9 @@ dependencies { api project(":libs:elasticsearch-cli") api 'com.carrotsearch:hppc:0.8.1' + // LZ4 + api 'org.lz4:lz4-java:1.8.0' + // time handling, remove with java 8 time api "joda-time:joda-time:${versions.joda}" @@ -82,10 +85,6 @@ dependencies { } -tasks.withType(JavaCompile).configureEach { - options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked" -} - // Until this project is always being formatted with spotless, we need to // guard against `spotless()` not existing. try { @@ -266,6 +265,11 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.geometry.S2LatLng' ) ignoreMissingClasses 'javax.xml.bind.DatatypeConverter' + + ignoreViolations( + // from java-lz4 + 'net.jpountz.util.UnsafeUtils' + ) } tasks.named("dependencyLicenses").configure { @@ -330,6 +334,10 @@ tasks.named('splitPackagesAudit').configure { 'org.elasticsearch.cli.EnvironmentAwareCommand', 'org.elasticsearch.cli.KeyStoreAwareCommand', 'org.elasticsearch.cli.LoggingAwareCommand', - 'org.elasticsearch.cli.LoggingAwareMultiCommand' + 'org.elasticsearch.cli.LoggingAwareMultiCommand', + + // these should be temporary, query needs package private access to TermScorer though + 'org.apache.lucene.search.XCombinedFieldQuery', + 'org.apache.lucene.search.XMultiNormsLeafSimScorer' } diff --git a/server/licenses/lucene-analyzers-common-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-analyzers-common-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 50a7f683a16f1..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -566b2124f9266408a69e37802c43d04444e718f9 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..8cd5ba872a31d --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 @@ -0,0 +1 @@ +18f3bbff2b7672ea0b9cc18c8110ef69c763ae6b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-backward-codecs-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 77c700f4585c6..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a520246b115a50b4dde232763118445f2a71f8db \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..932c95dc8cfcb --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 @@ -0,0 +1 @@ +fec88b5e71c699ceddc3ae0369481697ac9a5c96 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-core-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index aa05b1ccdaf3d..0000000000000 --- a/server/licenses/lucene-core-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f9509ff821ab7eeaf90fb77cf4437b81afcdd4b \ No newline at end of file diff --git a/server/licenses/lucene-core-8.9.0.jar.sha1 b/server/licenses/lucene-core-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..859cdb2013408 --- /dev/null +++ b/server/licenses/lucene-core-8.9.0.jar.sha1 @@ -0,0 +1 @@ +5c3f72357089f7f0c1ef44bbe7b4c67b6149a5af \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-grouping-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index a747e13e5702f..0000000000000 --- a/server/licenses/lucene-grouping-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc47b57fc219bc830e462fa864e7a9a46d8560c1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.9.0.jar.sha1 b/server/licenses/lucene-grouping-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..e28e73fb699bf --- /dev/null +++ b/server/licenses/lucene-grouping-8.9.0.jar.sha1 @@ -0,0 +1 @@ +9440fdd430b1c2dadbf3bc72656848d61e6f747f \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-highlighter-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 6f9f990b75235..0000000000000 --- a/server/licenses/lucene-highlighter-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e18f33997fbf23bbc9b670d32f4b8ecaf9fd94c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.9.0.jar.sha1 b/server/licenses/lucene-highlighter-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..111b023f9502c --- /dev/null +++ b/server/licenses/lucene-highlighter-8.9.0.jar.sha1 @@ -0,0 +1 @@ +94e80bdeab170b0ce1b36a32b6a790d23d7f6d7b \ No newline at end of file diff --git a/server/licenses/lucene-join-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-join-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 63aa70e6dc95e..0000000000000 --- a/server/licenses/lucene-join-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36bad2a791ea52517d96fb9acb74981663a45375 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.9.0.jar.sha1 b/server/licenses/lucene-join-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..9b8322851dfec --- /dev/null +++ b/server/licenses/lucene-join-8.9.0.jar.sha1 @@ -0,0 +1 @@ +5ae97803efd3344597f6b6bdf823b18d130e8851 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-memory-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 48285a09e53e8..0000000000000 --- a/server/licenses/lucene-memory-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73188d10ac95023b26d8d8cc6722d60ec397e4d5 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.9.0.jar.sha1 b/server/licenses/lucene-memory-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..bd7fbba668fc4 --- /dev/null +++ b/server/licenses/lucene-memory-8.9.0.jar.sha1 @@ -0,0 +1 @@ +09423a6dca2a9ba665e644d86a713d9a6b2b0d3f \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-misc-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 666d6cecb28bc..0000000000000 --- a/server/licenses/lucene-misc-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de4e19d27feb4abfc037c7b37b6fd7a842f44c4c \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.9.0.jar.sha1 b/server/licenses/lucene-misc-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..dd4e7baeb3c53 --- /dev/null +++ b/server/licenses/lucene-misc-8.9.0.jar.sha1 @@ -0,0 +1 @@ +067494d621ba2ef1f2e4da3ef167106f00b52051 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-queries-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 0891026fb24b8..0000000000000 --- a/server/licenses/lucene-queries-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc437dc4a9430f0d6523ef1945bad6df188e01bc \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.9.0.jar.sha1 b/server/licenses/lucene-queries-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..682553f877600 --- /dev/null +++ b/server/licenses/lucene-queries-8.9.0.jar.sha1 @@ -0,0 +1 @@ +c6bda4622abf240da6567a128242f46708fa6c00 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-queryparser-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 66ef546d93fb9..0000000000000 --- a/server/licenses/lucene-queryparser-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8150f28de44e093d08c4cd8437dc244d762ac0e4 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.9.0.jar.sha1 b/server/licenses/lucene-queryparser-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..d1978b318fd67 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.9.0.jar.sha1 @@ -0,0 +1 @@ +95a9d8cf8ca8eaf9f241fd323697d26d211721b2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-sandbox-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index c330e77e7ad9d..0000000000000 --- a/server/licenses/lucene-sandbox-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec6525d002c90a1bc2adead0c2e8483e94b03f7 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.9.0.jar.sha1 b/server/licenses/lucene-sandbox-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..9e0539ea7ae5d --- /dev/null +++ b/server/licenses/lucene-sandbox-8.9.0.jar.sha1 @@ -0,0 +1 @@ +af9f6c0287465e17a520b93b684474712433b293 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-spatial-extras-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 0e0c2c0d61069..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1d6509da3a888ef8d92e810f3dbf487924c627b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..cedc910adb51b --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 @@ -0,0 +1 @@ +c89f4e78712806e8d5bb4adfb21cf0722ad3f175 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-spatial3d-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index 7c690a4cee045..0000000000000 --- a/server/licenses/lucene-spatial3d-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2eacb8e345e70974b404367187e7b0be388ca95 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..f0c08e6e6bd29 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 @@ -0,0 +1 @@ +0d1238c4e8bf4409b3bb3fbddf2e977b0f19b24b \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.9.0-snapshot-ddc238e5df8.jar.sha1 b/server/licenses/lucene-suggest-8.9.0-snapshot-ddc238e5df8.jar.sha1 deleted file mode 100644 index c7496f1262bf9..0000000000000 --- a/server/licenses/lucene-suggest-8.9.0-snapshot-ddc238e5df8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2562bb10b19dc237a65763a8594734fbc45632c2 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.9.0.jar.sha1 b/server/licenses/lucene-suggest-8.9.0.jar.sha1 new file mode 100644 index 0000000000000..f6f8bb83d66ae --- /dev/null +++ b/server/licenses/lucene-suggest-8.9.0.jar.sha1 @@ -0,0 +1 @@ +98cb69950b48b829b6605a003c99aa7eb86fa9eb \ No newline at end of file diff --git a/server/licenses/lz4-java-1.8.0.jar.sha1 b/server/licenses/lz4-java-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..5e3536d1b7d29 --- /dev/null +++ b/server/licenses/lz4-java-1.8.0.jar.sha1 @@ -0,0 +1 @@ +4b986a99445e49ea5fbf5d149c4b63f6ed6c6780 \ No newline at end of file diff --git a/server/licenses/lz4-java-LICENSE.txt b/server/licenses/lz4-java-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/server/licenses/lz4-java-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/lz4-java-NOTICE.txt b/server/licenses/lz4-java-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index fea1b1c120e55..c8a984172ef41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -26,7 +26,6 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; -import java.security.AccessControlException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -35,11 +34,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.containsString; @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class ReloadSecureSettingsIT extends ESIntegTestCase { @@ -399,17 +398,7 @@ public void onFailure(Exception e) { private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - try { - keyStoreWrapper.save(environment.configFile(), password); - } catch (final AccessControlException e) { - if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { - // this is expected: the save method is extra diligent and wants to make sure - // the keystore is readable, not relying on umask and whatnot. It's ok, we don't - // care about this in tests. - } else { - throw e; - } - } + keyStoreWrapper.save(environment.configFile(), password, false); return keyStoreWrapper; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 86f56534e3972..4713bc9a5fb3a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponseHandler; @@ -65,11 +66,9 @@ import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class CancellableTasksIT extends ESIntegTestCase { @@ -259,9 +258,8 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest subRequest = generateTestRequest(nodes, 0, between(0, 1)); beforeSendLatches.get(subRequest).countDown(); mainAction.startSubTask(taskId, subRequest, future); - TransportException te = expectThrows(TransportException.class, future::actionGet); - assertThat(te.getCause(), instanceOf(TaskCancelledException.class)); - assertThat(te.getCause().getMessage(), equalTo("The parent task was cancelled, shouldn't start any child tasks")); + TaskCancelledException te = expectThrows(TaskCancelledException.class, future::actionGet); + assertThat(te.getMessage(), equalTo("parent task was cancelled [by user request]")); allowEntireRequest(rootRequest); waitForRootTask(rootTaskFuture); ensureAllBansRemoved(); @@ -331,7 +329,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { TaskManager taskManager = internalCluster().getInstance(TransportService.class, node.getName()).getTaskManager(); for (TaskId bannedParent : bannedParents) { if (bannedParent.getNodeId().equals(node.getId()) && randomBoolean()) { - Collection childConns = taskManager.startBanOnChildTasks(bannedParent.getId(), () -> {}); + Collection childConns = taskManager.startBanOnChildTasks(bannedParent.getId(), "", () -> {}); for (Transport.Connection connection : randomSubsetOf(childConns)) { connection.close(); } @@ -366,9 +364,9 @@ static void waitForRootTask(ActionFuture rootTask) { final Throwable cause = ExceptionsHelper.unwrap(e, TaskCancelledException.class); assertNotNull(cause); assertThat(cause.getMessage(), anyOf( - equalTo("The parent task was cancelled, shouldn't start any child tasks"), - containsString("Task cancelled before it started:"), - equalTo("Task was cancelled while executing"))); + equalTo("parent task was cancelled [by user request]"), + equalTo("task cancelled before starting [by user request]"), + equalTo("task cancelled [by user request]"))); } } @@ -476,9 +474,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener(listener.map(r -> new TestResponse()), subRequests.size() + 1); transportService.getThreadPool().generic().execute(ActionRunnable.supply(groupedListener, () -> { assertTrue(beforeExecuteLatches.get(request).await(60, TimeUnit.SECONDS)); - if (((CancellableTask) task).isCancelled()) { - throw new TaskCancelledException("Task was cancelled while executing"); - } + ((CancellableTask)task).ensureNotCancelled(); return new TestResponse(); })); for (TestRequest subRequest : subRequests) { @@ -504,7 +500,7 @@ protected void doRun() throws Exception { try { client.executeLocally(TransportTestAction.ACTION, subRequest, latchedListener); } catch (TaskCancelledException e) { - latchedListener.onFailure(new TransportException(e)); + latchedListener.onFailure(new SendRequestTransportException(subRequest.node, ACTION.name(), e)); } } else { transportService.sendRequest(subRequest.node, ACTION.name(), subRequest, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 0fa3cf33b11f0..b69a95defbd5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -180,8 +180,8 @@ public void testGetSnapshotWithBlocks() { try { setClusterReadOnly(true); GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet(); - assertThat(response.getSnapshots(REPOSITORY_NAME), hasSize(1)); - assertThat(response.getSnapshots(REPOSITORY_NAME).get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); + assertThat(response.getSnapshots(), hasSize(1)); + assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); } finally { setClusterReadOnly(false); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java new file mode 100644 index 0000000000000..8a4f8762801ee --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.apache.lucene.util.English; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class IndexDiskUsageAnalyzerIT extends ESIntegTestCase { + + public void testSimple() throws Exception { + final XContentBuilder mapping = XContentFactory.jsonBuilder(); + mapping.startObject(); + { + mapping.startObject("_doc"); + { + mapping.startObject("properties"); + { + mapping.startObject("english_text"); + mapping.field("type", "text"); + mapping.endObject(); + + mapping.startObject("value"); + mapping.field("type", "long"); + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + + final String index = "test-index"; + client().admin().indices().prepareCreate(index) + .setMapping(mapping) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5))) + .get(); + ensureGreen(index); + + int numDocs = randomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + int value = randomIntBetween(1, 1024); + final XContentBuilder doc = XContentFactory.jsonBuilder() + .startObject() + .field("english_text", English.intToEnglish(value)) + .field("value", value) + .endObject(); + client().prepareIndex(index) + .setId("id-" + i) + .setSource(doc) + .get(); + } + PlainActionFuture future = PlainActionFuture.newFuture(); + client().execute(AnalyzeIndexDiskUsageAction.INSTANCE, + new AnalyzeIndexDiskUsageRequest(new String[] {index}, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true), + future); + + AnalyzeIndexDiskUsageResponse resp = future.actionGet(); + final IndexDiskUsageStats stats = resp.getStats().get(index); + logger.info("--> stats {}", stats); + assertNotNull(stats); + assertThat(stats.getIndexSizeInBytes(), greaterThan(100L)); + + final IndexDiskUsageStats.PerFieldDiskUsage englishField = stats.getFields().get("english_text"); + assertThat(englishField.getInvertedIndexBytes(), greaterThan(0L)); + assertThat(englishField.getStoredFieldBytes(), equalTo(0L)); + assertThat(englishField.getNormsBytes(), greaterThan(0L)); + + final IndexDiskUsageStats.PerFieldDiskUsage valueField = stats.getFields().get("value"); + assertThat(valueField.getInvertedIndexBytes(), equalTo(0L)); + assertThat(valueField.getStoredFieldBytes(), equalTo(0L)); + assertThat(valueField.getPointsBytes(), greaterThan(0L)); + assertThat(valueField.getDocValuesBytes(), greaterThan(0L)); + + assertMetadataFields(stats); + } + + + void assertMetadataFields(IndexDiskUsageStats stats) { + final IndexDiskUsageStats.PerFieldDiskUsage sourceField = stats.getFields().get("_source"); + assertThat(sourceField.getInvertedIndexBytes(), equalTo(0L)); + assertThat(sourceField.getStoredFieldBytes(), greaterThan(0L)); + assertThat(sourceField.getPointsBytes(), equalTo(0L)); + assertThat(sourceField.getDocValuesBytes(), equalTo(0L)); + + final IndexDiskUsageStats.PerFieldDiskUsage idField = stats.getFields().get("_id"); + assertThat(idField.getInvertedIndexBytes(), greaterThan(0L)); + assertThat(idField.getStoredFieldBytes(), greaterThan(0L)); + assertThat(idField.getPointsBytes(), equalTo(0L)); + assertThat(idField.getDocValuesBytes(), equalTo(0L)); + + final IndexDiskUsageStats.PerFieldDiskUsage seqNoField = stats.getFields().get("_seq_no"); + assertThat(seqNoField.getInvertedIndexBytes(), equalTo(0L)); + assertThat(seqNoField.getStoredFieldBytes(), equalTo(0L)); + assertThat(seqNoField.getPointsBytes(), greaterThan(0L)); + assertThat(seqNoField.getDocValuesBytes(), greaterThan(0L)); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java new file mode 100644 index 0000000000000..2627e788ebb12 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.aliases; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.is; + +public class NetNewSystemIndexAliasIT extends ESIntegTestCase { + public static final String SYSTEM_INDEX_NAME = ".test-system-idx"; + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), NetNewSystemIndexTestPlugin.class); + } + + public void testGetAliasWithNetNewSystemIndices() throws Exception { + // make sure the net-new system index has been created + { + final IndexRequest request = new IndexRequest(SYSTEM_INDEX_NAME); + request.source("some_field", "some_value"); + IndexResponse resp = client().index(request).get(); + assertThat(resp.status().getStatus(), is(201)); + } + ensureGreen(); + + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + GetAliasesResponse aliasResponse = client().admin().indices().getAliases(getAliasesRequest).get(); + assertThat(aliasResponse.getAliases().size(), is(0)); + } + + public static class NetNewSystemIndexTestPlugin extends Plugin implements SystemIndexPlugin { + + public static final Settings SETTINGS = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.getKey(), "0-1") + .put(IndexMetadata.SETTING_PRIORITY, Integer.MAX_VALUE) + .build(); + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("_meta"); + builder.field("version", Version.CURRENT.toString()); + builder.endObject(); + + builder.field("dynamic", "strict"); + builder.startObject("properties"); + { + builder.startObject("some_field"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + return Collections.singletonList(SystemIndexDescriptor.builder() + .setIndexPattern(SYSTEM_INDEX_NAME + "*") + .setPrimaryIndex(SYSTEM_INDEX_NAME) + .setDescription("Test system index") + .setOrigin(getClass().getName()) + .setVersionMetaKey("version") + .setMappings(builder) + .setSettings(SETTINGS) + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setNetNew() + .build() + ); + } catch (IOException e) { + throw new UncheckedIOException("Failed to build " + SYSTEM_INDEX_NAME + " index mappings", e); + } + } + + @Override + public String getFeatureName() { + return this.getClass().getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "test plugin"; + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index f8d3ec3bc01ad..bc88ddb3337a2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequestBuilder; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; @@ -27,6 +28,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.Arrays; @@ -352,14 +354,16 @@ public void testConcurrentAddBlock() throws InterruptedException { } } + @TestLogging( + reason = "https://github.com/elastic/elasticsearch/issues/74345", + value = "org.elasticsearch.action.admin.indices.readonly:DEBUG,org.elasticsearch.cluster.metadata:DEBUG" + ) public void testAddBlockWhileIndexingDocuments() throws Exception { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createIndex(indexName); final APIBlock block = randomAddableBlock(); - int nbDocs = 0; - try { try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), 1000)) { indexer.setFailureAssertion(t -> { @@ -371,11 +375,11 @@ public void testAddBlockWhileIndexingDocuments() throws Exception { }); waitForDocs(randomIntBetween(10, 50), indexer); - assertAcked(client().admin().indices().prepareAddBlock(block, indexName)); + final AddIndexBlockResponse response = client().admin().indices().prepareAddBlock(block, indexName).get(); + assertTrue("Add Index Block request was not acknowledged: " + response, response.isAcknowledged()); indexer.stopAndAwaitStopped(); nbDocs += indexer.totalIndexedDocs(); } - assertIndexHasBlock(block, indexName); } finally { disableIndexBlock(indexName, block); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index cf4de829d559c..600770ea5f163 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.coordination.CoordinationMetadata; @@ -58,7 +59,7 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.metadata.AliasMetadata.newAliasMetadataBuilder; import static org.elasticsearch.cluster.routing.RandomShardRoutingMutator.randomChange; -import static org.elasticsearch.cluster.routing.RandomShardRoutingMutator.randomReason; +import static org.elasticsearch.cluster.routing.UnassignedInfoTests.randomUnassignedInfo; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; @@ -270,7 +271,7 @@ private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds for (int j = 0; j < replicaCount; j++) { UnassignedInfo unassignedInfo = null; if (randomInt(5) == 1) { - unassignedInfo = new UnassignedInfo(randomReason(), randomAlphaOfLength(10)); + unassignedInfo = randomUnassignedInfo(randomAlphaOfLength(10)); } if (availableNodeIds.isEmpty()) { break; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index b73fbf7d8113e..1ef53dbdc8d41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -215,7 +215,7 @@ public void testLargeClusterStatePublishing() throws Exception { int counter = 0; int numberOfFields = 0; while (true) { - mapping.startObject(UUIDs.randomBase64UUID()).field("type", "text").endObject(); + mapping.startObject(UUIDs.randomBase64UUID()).field("type", "boolean").endObject(); counter += 10; // each field is about 10 bytes, assuming compression in place numberOfFields++; if (counter > estimatedBytesSize) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 63c62f8f69002..eabb25afe7322 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -241,6 +241,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { throw new AssertionError(e); } assertNotNull(properties); + @SuppressWarnings("unchecked") Object fieldMapping = ((Map) properties).get("field"); assertNotNull(fieldMapping); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index f483fe58f4626..30239c8d31a68 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.routing; -import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -126,7 +125,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale }); internalCluster().stopNode(node1); - try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) { + try(Store store = new Store(shardId, indexSettings, newFSDirectory(indexPath), new DummyShardLock(shardId))) { store.removeCorruptionMarker(); } node1 = internalCluster().startNode(node1DataPathSettings); @@ -204,7 +203,7 @@ private String historyUUID(String node, String indexName) { } private void putFakeCorruptionMarker(IndexSettings indexSettings, ShardId shardId, Path indexPath) throws IOException { - try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) { + try(Store store = new Store(shardId, indexSettings, newFSDirectory(indexPath), new DummyShardLock(shardId))) { store.markStoreCorrupted(new IOException("fake ioexception")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 980f99068c4d6..81444dbfd9544 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -254,7 +254,7 @@ public void testRestoreSnapshotOverLimit() { equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); List snapshotInfos = client.admin().cluster().prepareGetSnapshots("test-repo") - .setSnapshots("test-snap").get().getSnapshots("test-repo"); + .setSnapshots("test-snap").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index 283f825c6f68b..fabbb90eeada3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -237,7 +237,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { private void assertSnapshotExists(String repository, String snapshot) { GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin().cluster().prepareGetSnapshots(repository) .setSnapshots(snapshot).get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(repository).get(0); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); assertEquals(0, snapshotInfo.failedShards()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 15f055cdca9ce..d2f73b3a163f7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -252,8 +252,10 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { String nodeWithHigherMatching = randomFrom(internalCluster().nodesInclude(indexName)); Settings nodeWithHigherMatchingSettings = internalCluster().dataPathSettings(nodeWithHigherMatching); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithHigherMatching)); - indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); + if (usually()) { + indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(1, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); + } assertAcked(client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "primaries").build())); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index 35b5395a97899..2b13cb8da85e0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -19,13 +19,13 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -781,6 +781,12 @@ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolea index("test", "1", doc); } + public void testGetRemoteIndex() { + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> client().prepareGet("cluster:index", "id").get()); + assertEquals("Cross-cluster calls are not supported in this context but remote indices were requested: [cluster:index]", + iae.getMessage()); + } + private void assertGetFieldsAlwaysWorks(String index, String docId, String[] fields) { assertGetFieldsAlwaysWorks(index, docId, fields, null); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index 061bbfd296a4f..968bb84ba85f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -100,12 +100,15 @@ public void testGlobalTemplatesDoNotApply() { assertThat(mappingsResponse.mappings().size(), is(1)); MappingMetadata mappingMetadata = mappingsResponse.mappings().get("a_hidden_index"); assertNotNull(mappingMetadata); + @SuppressWarnings("unchecked") Map propertiesMap = (Map) mappingMetadata.getSourceAsMap().get("properties"); assertNotNull(propertiesMap); assertThat(propertiesMap.size(), is(2)); + @SuppressWarnings("unchecked") Map barMap = (Map) propertiesMap.get("bar"); assertNotNull(barMap); assertThat(barMap.get("type"), is("text")); + @SuppressWarnings("unchecked") Map bazMap = (Map) propertiesMap.get("baz"); assertNotNull(bazMap); assertThat(bazMap.get("type"), is("text")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index afd57eed9dcd0..3dddead57ea98 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -92,6 +92,7 @@ private static void assertMappingsHaveField(GetMappingsResponse mappings, String MappingMetadata indexMappings = mappings.getMappings().get("index"); assertNotNull(indexMappings); Map typeMappingsMap = indexMappings.getSourceAsMap(); + @SuppressWarnings("unchecked") Map properties = (Map) typeMappingsMap.get("properties"); assertTrue("Could not find [" + field + "] in " + typeMappingsMap.toString(), properties.containsKey(field)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 961fa7cfd4fbf..2f90c28d509b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -12,8 +12,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -33,7 +31,6 @@ import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; @@ -53,7 +50,6 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.NoOpEngine; -import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; @@ -63,10 +59,8 @@ import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.breaker.CircuitBreakerStats; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -114,7 +108,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; public class IndexShardIT extends ESSingleNodeTestCase { @@ -551,71 +544,6 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } - /** Check that the accounting breaker correctly matches the segments API for memory usage */ - private void checkAccountingBreaker() { - CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - long usedMem = acctBreaker.getUsed(); - assertThat(usedMem, greaterThan(0L)); - NodesStatsResponse response = client().admin().cluster().prepareNodesStats().setIndices(true).setBreaker(true).get(); - NodeStats stats = response.getNodes().get(0); - assertNotNull(stats); - SegmentsStats segmentsStats = stats.getIndices().getSegments(); - CircuitBreakerStats breakerStats = stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING); - assertEquals(usedMem, segmentsStats.getMemoryInBytes()); - assertEquals(usedMem, breakerStats.getEstimated()); - } - - public void testCircuitBreakerIncrementedByIndexShard() throws Exception { - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get(); - - // Generate a couple of segments - client().prepareIndex("test").setId("1") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).get(); - // Use routing so 2 documents are guaranteed to be on the same shard - String routing = randomAlphaOfLength(5); - client().prepareIndex("test").setId("2") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); - client().prepareIndex("test").setId("3") - .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); - - checkAccountingBreaker(); - // Test that force merging causes the breaker to be correctly adjusted - logger.info("--> force merging to a single segment"); - client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(randomBoolean()).get(); - client().admin().indices().prepareRefresh().get(); - checkAccountingBreaker(); - - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("indices.breaker.total.limit", "1kb")).get(); - - // Test that we're now above the parent limit due to the segments - Exception e = expectThrows(Exception.class, - () -> client().prepareSearch("test") - .addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get()); - logger.info("--> got an expected exception", e); - assertThat(e.getCause(), notNullValue()); - assertThat(e.getCause().getMessage(), containsString("[parent] Data too large, data for [preallocate[aggregations]]")); - - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder() - .putNull("indices.breaker.total.limit") - .putNull("network.breaker.inflight_requests.overhead")).get(); - - // Test that deleting the index causes the breaker to correctly be decremented - logger.info("--> deleting index"); - client().admin().indices().prepareDelete("test").get(); - - // Accounting breaker should now be 0 - CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat(acctBreaker.getUsed(), equalTo(0L)); - } - public static final IndexShard recoverShard(IndexShard newShard) throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index c69317f83e37c..b9945f7ba62f7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -540,7 +540,8 @@ public void testResolvePath() throws Exception { nodeNameToNodeId.put(cursor.value.getName(), cursor.key); } - final GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); + final GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); final List iterators = iterableAsArrayList(shardIterators); final ShardRouting shardRouting = iterators.iterator().next().nextOrNull(); assertThat(shardRouting, notNullValue()); @@ -573,7 +574,8 @@ public void testResolvePath() throws Exception { private Path getPathToShardData(String indexName, String dirSuffix) { ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{indexName}, false); + GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); List iterators = iterableAsArrayList(shardIterators); ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); ShardRouting shardRouting = shardIterator.nextOrNull(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index d9d2e954c6c00..f447be2b8b59b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -601,7 +601,7 @@ public void testReplicaCorruption() throws Exception { private int numShards(String... index) { ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); + GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); return shardIterators.size(); } @@ -627,7 +627,7 @@ private ShardRouting corruptRandomPrimaryFile() throws IOException { private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException { ClusterState state = client().admin().cluster().prepareState().get().getState(); Index test = state.metadata().index("test").getIndex(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); + GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); List iterators = iterableAsArrayList(shardIterators); ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); ShardRouting shardRouting = shardIterator.nextOrNull(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 073b0f4dcd787..a982b0548f368 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -158,7 +158,7 @@ private Set nodeIdsWithIndex(String... indices) { protected int numAssignedShards(String... indices) { ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); - GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); + GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 6bf28b4a95754..c1fc22fa0d074 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -58,7 +58,7 @@ public void testCacheAggs() throws Exception { // see #9500 final SearchResponse r1 = client.prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0) - .dateHistogramInterval(DateHistogramInterval.MONTH)) + .calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r1); @@ -69,7 +69,7 @@ public void testCacheAggs() throws Exception { for (int i = 0; i < 10; ++i) { final SearchResponse r2 = client.prepareSearch("index").setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(dateHistogram("histo").field("f") - .timeZone(ZoneId.of("+01:00")).minDocCount(0).dateHistogramInterval(DateHistogramInterval.MONTH)) + .timeZone(ZoneId.of("+01:00")).minDocCount(0).calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r2); Histogram h1 = r1.getAggregations().get("histo"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java index 5f9ad0446a341..6c8dc221d0614 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java @@ -40,7 +40,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { TestSystemIndexDescriptor() { super(INDEX_NAME + "*", PRIMARY_INDEX_NAME, "Test system index", getOldMappings(), SETTINGS, INDEX_NAME, 0, "version", "stack", - Version.CURRENT.minimumCompatibilityVersion(), Type.INTERNAL_MANAGED, List.of(), List.of(), null); + Version.CURRENT.minimumCompatibilityVersion(), Type.INTERNAL_MANAGED, List.of(), List.of(), null, false); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index ab5460559074d..64edd111947b9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -231,7 +231,9 @@ public void testUpdateMappingConcurrently() throws Throwable { assertThat(response.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); MappingMetadata mappings = getMappingResponse.getMappings().get(indexName); - assertThat(((Map) mappings.getSourceAsMap().get("properties")).keySet(), + @SuppressWarnings("unchecked") + Map properties = (Map) mappings.getSourceAsMap().get("properties"); + assertThat(properties.keySet(), Matchers.hasItem(fieldName)); } } catch (Exception e) { @@ -310,6 +312,7 @@ private void assertMappingOnMaster(final String index, final String... fieldName assertTrue(mappingSource.containsKey("properties")); for (String fieldName : fieldNames) { + @SuppressWarnings("unchecked") Map mappingProperties = (Map) mappingSource.get("properties"); if (fieldName.indexOf('.') != -1) { fieldName = fieldName.replace(".", ".properties."); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index e6185fed20859..c008a26e1ce3d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -75,8 +75,6 @@ private void reset() { HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING).forEach(s -> resetSettings.putNull(s.getKey())); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 539f505063e79..0cb870a101fa9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -85,6 +86,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.tasks.Task; @@ -115,6 +117,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -127,6 +130,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; +import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -139,6 +143,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.oneOf; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @@ -622,11 +627,7 @@ public void testSnapshotRecovery() throws Exception { String nodeA = internalCluster().startNode(); logger.info("--> create repository"); - assertAcked(client().admin().cluster().preparePutRepository(REPO_NAME) - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", false) - ).get()); + createRepository(randomBoolean()); ensureGreen(); @@ -634,14 +635,7 @@ public void testSnapshotRecovery() throws Exception { createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) - .setWaitForCompletion(true).setIndices(INDEX_NAME).get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - - assertThat(client().admin().cluster().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get() - .getSnapshots(REPO_NAME).get(0).state(), equalTo(SnapshotState.SUCCESS)); + CreateSnapshotResponse createSnapshotResponse = createSnapshot(INDEX_NAME); client().admin().indices().prepareClose(INDEX_NAME).execute().actionGet(); @@ -774,6 +768,7 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, PeerRecoveryTargetService.Actions.TRANSLOG_OPS, PeerRecoveryTargetService.Actions.FILES_INFO, + PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT, PeerRecoveryTargetService.Actions.FILE_CHUNK, PeerRecoveryTargetService.Actions.CLEAN_FILES, PeerRecoveryTargetService.Actions.FINALIZE @@ -781,6 +776,10 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { final String recoveryActionToBlock = randomFrom(recoveryActions); logger.info("--> will temporarily interrupt recovery action between blue & red on [{}]", recoveryActionToBlock); + if (recoveryActionToBlock.equals(PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT)) { + createSnapshotThatCanBeUsedDuringRecovery(indexName); + } + MockTransportService blueTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName); MockTransportService redTransportService = @@ -817,6 +816,9 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { ).get(); ensureGreen(); + if (recoveryActionToBlock.equals(PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT)) { + assertThat(handlingBehavior.blocksRemaining.get(), is(equalTo(0))); + } searchResponse = client(redNodeName).prepareSearch(indexName).setPreference("_local").get(); assertHitCount(searchResponse, numDocs); } finally { @@ -944,6 +946,7 @@ public void testDisconnectsWhileRecovering() throws Exception { String[] recoveryActions = new String[]{ PeerRecoverySourceService.Actions.START_RECOVERY, PeerRecoveryTargetService.Actions.FILES_INFO, + PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT, PeerRecoveryTargetService.Actions.FILE_CHUNK, PeerRecoveryTargetService.Actions.CLEAN_FILES, //RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed @@ -954,6 +957,11 @@ public void testDisconnectsWhileRecovering() throws Exception { final boolean dropRequests = randomBoolean(); logger.info("--> will {} between blue & red on [{}]", dropRequests ? "drop requests" : "break connection", recoveryActionToBlock); + // Generate a snapshot to recover from it if the action that we're blocking is sending the request snapshot files + if (recoveryActionToBlock.equals(PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT)) { + createSnapshotThatCanBeUsedDuringRecovery(indexName); + } + MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName); MockTransportService redMockTransportService = @@ -1045,6 +1053,10 @@ public void testDisconnectsDuringRecovery() throws Exception { ensureSearchable(indexName); assertHitCount(client().prepareSearch(indexName).get(), numDocs); + if (randomBoolean()) { + createSnapshotThatCanBeUsedDuringRecovery(indexName); + } + MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNodeName); MockTransportService blueMockTransportService = @@ -1318,6 +1330,8 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { connection.sendRequest(requestId, action, request, options); }); } + assertGlobalCheckpointIsStableAndSyncedInAllNodes(indexName, nodes,0); + IndexShard shard = internalCluster().getInstance(IndicesService.class, failingNode) .getShardOrNull(new ShardId(resolveIndex(indexName), 0)); final long lastSyncedGlobalCheckpoint = shard.getLastSyncedGlobalCheckpoint(); @@ -1667,9 +1681,9 @@ public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { internalCluster().startDataOnlyNode(randomNodeDataPathSettings); ensureGreen(); for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { - assertThat(shardStats.getSeqNoStats().getMaxSeqNo(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(shardStats.getSeqNoStats().getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - assertThat(shardStats.getSeqNoStats().getGlobalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); + assertThat(shardStats.getSeqNoStats().getMaxSeqNo(), equalTo(NO_OPS_PERFORMED)); + assertThat(shardStats.getSeqNoStats().getLocalCheckpoint(), equalTo(NO_OPS_PERFORMED)); + assertThat(shardStats.getSeqNoStats().getGlobalCheckpoint(), equalTo(NO_OPS_PERFORMED)); } } @@ -1795,4 +1809,64 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .mapToLong(n -> n.getIndices().getStore().getReservedSize().getBytes()).sum(), equalTo(0L)); } + private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, List nodes, int shard) throws Exception { + assertThat(nodes, is(not(empty()))); + + ShardId shardId = new ShardId(resolveIndex(indexName), shard); + IndexShard indexShard = internalCluster().getInstance(IndicesService.class, nodes.get(0)).getShardOrNull(shardId); + assertThat(indexShard, is(notNullValue())); + long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); + + for (String node : nodes) { + IndexShard nodeIndexShard = internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId); + assertThat(nodeIndexShard, is(notNullValue())); + + assertThat(nodeIndexShard.seqNoStats().getMaxSeqNo(), is(equalTo(maxSeqNo))); + assertBusy(() -> assertThat(nodeIndexShard.getLastSyncedGlobalCheckpoint(), equalTo(maxSeqNo))); + } + } + + private void createSnapshotThatCanBeUsedDuringRecovery(String indexName) throws Exception { + // Ensure that the safe commit == latest commit + assertBusy(() -> { + ShardStats stats = client().admin().indices().prepareStats(indexName).clear().get() + .asMap().entrySet().stream().filter(e -> e.getKey().shardId().getId() == 0) + .map(Map.Entry::getValue).findFirst().orElse(null); + assertThat(stats, is(notNullValue())); + assertThat(stats.getSeqNoStats(), is(notNullValue())); + + assertThat(Strings.toString(stats.getSeqNoStats()), + stats.getSeqNoStats().getMaxSeqNo(), equalTo(stats.getSeqNoStats().getGlobalCheckpoint())); + }, 60, TimeUnit.SECONDS); + + // Force merge to make sure that the resulting snapshot would contain the same index files as the safe commit + ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setFlush(randomBoolean()).get(); + assertThat(forceMergeResponse.getTotalShards(), equalTo(forceMergeResponse.getSuccessfulShards())); + createRepository(true); + createSnapshot(indexName); + } + + private void createRepository(boolean enableSnapshotPeerRecoveries) { + assertAcked( + client().admin().cluster().preparePutRepository(REPO_NAME) + .setType("fs") + .setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), enableSnapshotPeerRecoveries) + .put("compress", false) + ).get() + ); + } + + private CreateSnapshotResponse createSnapshot(String indexName) { + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) + .setWaitForCompletion(true).setIndices(indexName).get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client().admin().cluster().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get() + .getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + return createSnapshotResponse; + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java new file mode 100644 index 0000000000000..053c3d32a6b23 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java @@ -0,0 +1,794 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.support.FilterBlobContainer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.recovery.RecoveryStats; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class SnapshotBasedIndexRecoveryIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList( + TestRepositoryPlugin.class, + MockTransportService.TestPlugin.class, + InternalSettingsPlugin.class + ); + } + + public static class TestRepositoryPlugin extends Plugin implements RepositoryPlugin { + public static final String FAULTY_TYPE = "faultyrepo"; + public static final String INSTRUMENTED_TYPE = "instrumentedrepo"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings + ) { + return Map.of( + FAULTY_TYPE, + metadata -> new FaultyRepository(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings), + INSTRUMENTED_TYPE, + metadata -> new InstrumentedRepo(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings) + ); + } + } + + public static class InstrumentedRepo extends FsRepository { + AtomicLong totalBytesRead = new AtomicLong(); + + public InstrumentedRepo(RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings) { + super(metadata, environment, namedXContentRegistry, clusterService, bigArrays, recoverySettings); + } + + @Override + public BlobContainer shardContainer(IndexId indexId, int shardId) { + return new FilterBlobContainer(super.shardContainer(indexId, shardId)) { + @Override + protected BlobContainer wrapChild(BlobContainer child) { + return child; + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + // Take into account only index files + if (blobName.startsWith("__") == false) { + return super.readBlob(blobName); + } + + return new FilterInputStream(super.readBlob(blobName)) { + @Override + public int read(byte[] b, int off, int len) throws IOException { + int read = super.read(b, off, len); + if (read > 0) { + totalBytesRead.addAndGet(read); + } + return read; + } + }; + } + }; + } + } + + public static class FaultyRepository extends FsRepository { + public FaultyRepository(RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings) { + super(metadata, environment, namedXContentRegistry, clusterService, bigArrays, recoverySettings); + } + + @Override + public BlobContainer shardContainer(IndexId indexId, int shardId) { + return new FilterBlobContainer(super.shardContainer(indexId, shardId)) { + @Override + protected BlobContainer wrapChild(BlobContainer child) { + return child; + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + // Fail only in index files + if (blobName.startsWith("__") == false) { + return super.readBlob(blobName); + } + + return new FilterInputStream(super.readBlob(blobName)) { + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (randomBoolean()) { + // Return random data + for (int i = 0; i < len; i++) { + b[off + i] = randomByte(); + } + return len; + } else { + if (randomBoolean()) { + throw new IOException("Unable to read blob " + blobName); + } else { + // Skip some file chunks + int read = super.read(b, off, len); + return read / 2; + } + } + } + }; + } + }; + } + } + + public void testPeerRecoveryUsesSnapshots() throws Exception { + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + String snapshot = "snap"; + createSnapshot(repoName, snapshot, Collections.singletonList(indexName)); + + String targetNode = internalCluster().startDataOnlyNode(); + + MockTransportService sourceMockTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, sourceNode); + MockTransportService targetMockTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); + + sourceMockTransportService.addSendBehavior(targetMockTransportService, (connection, requestId, action, request, options) -> { + assertNotEquals(PeerRecoveryTargetService.Actions.FILE_CHUNK, action); + connection.sendRequest(requestId, action, request, options); + }); + + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, targetNode); + InstrumentedRepo repository = (InstrumentedRepo) repositoriesService.repository(repoName); + + // segments_N and .si files are recovered from the file metadata directly + long expectedRecoveredBytesFromRepo = 0; + long totalBytesRecoveredFromSnapshot = 0; + for (RecoveryState.FileDetail fileDetail : recoveryState.getIndex().fileDetails()) { + totalBytesRecoveredFromSnapshot += fileDetail.recoveredFromSnapshot(); + if (fileDetail.name().startsWith("segments") || fileDetail.name().endsWith(".si")) { + continue; + } + expectedRecoveredBytesFromRepo += fileDetail.recovered(); + } + + assertThat(repository.totalBytesRead.get(), is(equalTo(expectedRecoveredBytesFromRepo))); + + long snapshotSizeForIndex = getSnapshotSizeForIndex(repoName, snapshot, indexName); + assertThat(repository.totalBytesRead.get(), is(greaterThan(0L))); + assertThat(repository.totalBytesRead.get(), is(lessThanOrEqualTo(snapshotSizeForIndex))); + assertThat(totalBytesRecoveredFromSnapshot, is(equalTo(snapshotSizeForIndex))); + + assertDocumentsAreEqual(indexName, numDocs); + } + + public void testFallbacksToSourceNodeWhenSnapshotDownloadFails() throws Exception { + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.FAULTY_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + String targetNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + assertDocumentsAreEqual(indexName, numDocs); + } + + public void testRateLimitingIsEnforced() throws Exception { + try { + updateSetting(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "50k"); + + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + //we theoretically only need more than 256 bytes, since SimpleRateLimiter.MIN_PAUSE_CHECK_MSEC=5. + // We do need a bit more though to ensure we have enough time to handle if network and CI is generally slow, + // since if the experienced download rate is less than 50KB there will be no throttling. + // I would at least 4x that to be on a somewhat safe side against things like a single GC. + int numDocs = randomIntBetween(1000, 2000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, "fs"); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + String targetNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + assertDocumentsAreEqual(indexName, numDocs); + + NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear() + .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); + for (NodeStats nodeStats : statsResponse.getNodes()) { + RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats(); + String nodeName = nodeStats.getNode().getName(); + if (nodeName.equals(sourceNode)) { + assertThat(recoveryStats.throttleTime().getMillis(), is(equalTo(0L))); + } + if (nodeName.equals(targetNode)) { + assertThat(recoveryStats.throttleTime().getMillis(), is(greaterThan(0L))); + } + } + } finally { + updateSetting(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), null); + } + } + + public void testPeerRecoveryTriesToUseMostOfTheDataFromAnAvailableSnapshot() throws Exception { + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + forceMerge(); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + int docsIndexedAfterSnapshot = randomIntBetween(1, 2000); + indexDocs(indexName, numDocs, docsIndexedAfterSnapshot); + + String targetNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + InstrumentedRepo repository = getRepositoryOnNode(repoName, targetNode); + + long snapshotSizeForIndex = getSnapshotSizeForIndex(repoName, "snap", indexName); + assertThat(repository.totalBytesRead.get(), is(greaterThan(0L))); + assertThat(repository.totalBytesRead.get(), is(lessThanOrEqualTo(snapshotSizeForIndex))); + + assertDocumentsAreEqual(indexName, numDocs + docsIndexedAfterSnapshot); + } + + public void testPeerRecoveryDoNotUseSnapshotsWhenSegmentsAreNotShared() throws Exception { + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + int docsIndexedAfterSnapshot = randomIntBetween(1, 2000); + indexDocs(indexName, numDocs, docsIndexedAfterSnapshot); + forceMerge(); + + String targetNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + InstrumentedRepo repository = getRepositoryOnNode(repoName, targetNode); + + assertThat(repository.totalBytesRead.get(), is(equalTo(0L))); + + assertDocumentsAreEqual(indexName, numDocs + docsIndexedAfterSnapshot); + } + + public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), "1"); + + try { + String sourceNode = internalCluster().startDataOnlyNode(); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", sourceNode) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, numDocs, numDocs); + + String repoName = "repo"; + createRepo(repoName, "fs"); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + String targetNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + MockTransportService targetMockTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); + + CountDownLatch recoverSnapshotFileRequestReceived = new CountDownLatch(1); + CountDownLatch respondToRecoverSnapshotFile = new CountDownLatch(1); + AtomicInteger numberOfRecoverSnapshotFileRequestsReceived = new AtomicInteger(); + targetMockTransportService.addRequestHandlingBehavior(PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT, + (handler, request, channel, task) -> { + assertThat(numberOfRecoverSnapshotFileRequestsReceived.incrementAndGet(), is(equalTo(1))); + recoverSnapshotFileRequestReceived.countDown(); + respondToRecoverSnapshotFile.await(); + handler.messageReceived(request, channel, task); + } + ); + + recoverSnapshotFileRequestReceived.await(); + + assertAcked(client().admin().indices().prepareDelete(indexName).get()); + + respondToRecoverSnapshotFile.countDown(); + + assertThat(indexExists(indexName), is(equalTo(false))); + } finally { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), null); + } + } + + public void testRecoveryAfterRestoreUsesSnapshots() throws Exception { + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + assertAcked(client().admin().indices().prepareDelete(indexName).get()); + + List restoredIndexDataNodes = internalCluster().startDataOnlyNodes(2); + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster() + .prepareRestoreSnapshot(repoName, "snap") + .setIndices(indexName) + .setIndexSettings(Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", String.join(",", restoredIndexDataNodes)) + ).setWaitForCompletion(true) + .get(); + + RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); + assertThat(restoreInfo.successfulShards(), is(equalTo(restoreInfo.totalShards()))); + + ensureGreen(indexName); + assertDocumentsAreEqual(indexName, numDocs); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + String sourceNode = recoveryState.getSourceNode().getName(); + String targetNode = recoveryState.getTargetNode().getName(); + + assertThat(restoredIndexDataNodes.contains(sourceNode), is(equalTo(true))); + assertThat(restoredIndexDataNodes.contains(targetNode), is(equalTo(true))); + assertPeerRecoveryWasSuccessful(recoveryState, sourceNode, targetNode); + + // Since we did a restore first, and the index is static the data retrieved by the target node + // via repository should be equal to the amount of data that the source node retrieved from the repo + InstrumentedRepo sourceRepo = getRepositoryOnNode(repoName, sourceNode); + InstrumentedRepo targetRepo = getRepositoryOnNode(repoName, targetNode); + assertThat(sourceRepo.totalBytesRead.get(), is(equalTo(targetRepo.totalBytesRead.get()))); + + long snapshotSizeForIndex = getSnapshotSizeForIndex(repoName, "snap", indexName); + + assertThat(sourceRepo.totalBytesRead.get(), is(greaterThan(0L))); + assertThat(sourceRepo.totalBytesRead.get(), is(lessThanOrEqualTo(snapshotSizeForIndex))); + } + + public void testReplicaRecoveryUsesSnapshots() throws Exception { + List dataNodes = internalCluster().startDataOnlyNodes(3); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.include._name", String.join(",", dataNodes)) + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(indexName); + assertDocumentsAreEqual(indexName, numDocs); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + String currentPrimary = recoveryState.getSourceNode().getName(); + String replica = recoveryState.getTargetNode().getName(); + assertPeerRecoveryWasSuccessful(recoveryState, currentPrimary, replica); + + long snapshotSizeForIndex = getSnapshotSizeForIndex(repoName, "snap", indexName); + + InstrumentedRepo replicaRepo = getRepositoryOnNode(repoName, replica); + assertThat(replicaRepo.totalBytesRead.get(), is(greaterThan(0L))); + assertThat(replicaRepo.totalBytesRead.get(), is(lessThanOrEqualTo(snapshotSizeForIndex))); + + // Stop the current replica + if (randomBoolean()) { + internalCluster().stopNode(replica); + + ensureGreen(indexName); + assertDocumentsAreEqual(indexName, numDocs); + + RecoveryState recoveryStateAfterReplicaFailure = getLatestPeerRecoveryStateForShard(indexName, 0); + final String name = recoveryStateAfterReplicaFailure.getSourceNode().getName(); + final String newReplica = recoveryStateAfterReplicaFailure.getTargetNode().getName(); + assertPeerRecoveryWasSuccessful(recoveryStateAfterReplicaFailure, name, newReplica); + + InstrumentedRepo newReplicaRepo = getRepositoryOnNode(repoName, newReplica); + assertThat(newReplicaRepo.totalBytesRead.get(), is(greaterThan(0L))); + assertThat(newReplicaRepo.totalBytesRead.get(), is(lessThanOrEqualTo(snapshotSizeForIndex))); + } + } + + public void testDisabledSnapshotBasedRecoveryUsesSourceFiles() throws Exception { + updateSetting(RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey(), "false"); + + try { + internalCluster().ensureAtLeastNumDataNodes(2); + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build() + ); + + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, 0, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.INSTRUMENTED_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(indexName); + assertDocumentsAreEqual(indexName, numDocs); + + RecoveryState recoveryState = getLatestPeerRecoveryStateForShard(indexName, 0); + String currentPrimary = recoveryState.getSourceNode().getName(); + String replica = recoveryState.getTargetNode().getName(); + assertPeerRecoveryWasSuccessful(recoveryState, currentPrimary, replica); + + InstrumentedRepo replicaRepo = getRepositoryOnNode(repoName, replica); + assertThat(replicaRepo.totalBytesRead.get(), is(equalTo(0L))); + } finally { + updateSetting(RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey(), null); + } + } + + private long getSnapshotSizeForIndex(String repository, String snapshot, String index) { + GetSnapshotsResponse getSnapshotsResponse = + client().admin().cluster().prepareGetSnapshots(repository).addSnapshots(snapshot).get(); + for (SnapshotInfo snapshotInfo : getSnapshotsResponse.getSnapshots()) { + SnapshotInfo.IndexSnapshotDetails indexSnapshotDetails = snapshotInfo.indexSnapshotDetails().get(index); + assertThat(indexSnapshotDetails, is(notNullValue())); + return indexSnapshotDetails.getSize().getBytes(); + } + + return -1; + } + + private void indexDocs(String indexName, int docIdOffset, int docCount) throws Exception { + IndexRequestBuilder[] builders = new IndexRequestBuilder[docCount]; + for (int i = 0; i < builders.length; i++) { + int docId = i + docIdOffset; + builders[i] = client().prepareIndex(indexName) + .setId(Integer.toString(docId)) + .setSource("field", docId, "field2", "Some text " + docId); + } + indexRandom(true, builders); + + // Ensure that the safe commit == latest commit + assertBusy(() -> { + ShardStats stats = client().admin().indices().prepareStats(indexName).clear().get() + .asMap().entrySet().stream().filter(e -> e.getKey().shardId().getId() == 0) + .map(Map.Entry::getValue).findFirst().orElse(null); + assertThat(stats, is(notNullValue())); + assertThat(stats.getSeqNoStats(), is(notNullValue())); + + assertThat(Strings.toString(stats.getSeqNoStats()), + stats.getSeqNoStats().getMaxSeqNo(), equalTo(stats.getSeqNoStats().getGlobalCheckpoint())); + }, 60, TimeUnit.SECONDS); + } + + private void assertDocumentsAreEqual(String indexName, int docCount) { + assertDocCount(indexName, docCount); + for (int testCase = 0; testCase < 3; testCase++) { + final SearchRequestBuilder searchRequestBuilder = client().prepareSearch(indexName) + .addSort("field", SortOrder.ASC) + .setSize(10_000); + + SearchResponse searchResponse; + switch (testCase) { + case 0: + searchResponse = searchRequestBuilder. + setQuery(QueryBuilders.matchAllQuery()).get(); + assertSearchResponseContainsAllIndexedDocs(searchResponse, docCount); + break; + case 1: + int docIdToMatch = randomIntBetween(0, docCount - 1); + searchResponse = searchRequestBuilder.setQuery(QueryBuilders.termQuery("field", docIdToMatch)).get(); + assertThat(searchResponse.getSuccessfulShards(), equalTo(1)); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + SearchHit searchHit = searchResponse.getHits().getAt(0); + + Map source = searchHit.getSourceAsMap(); + + assertThat(source, is(notNullValue())); + assertThat(source.get("field"), is(equalTo(docIdToMatch))); + assertThat(source.get("field2"), is(equalTo("Some text " + docIdToMatch))); + break; + case 2: + searchResponse = searchRequestBuilder.setQuery(QueryBuilders.matchQuery("field2", "text")).get(); + assertSearchResponseContainsAllIndexedDocs(searchResponse, docCount); + break; + default: + throw new IllegalStateException("Unexpected value: " + testCase); + } + } + } + + private void assertSearchResponseContainsAllIndexedDocs(SearchResponse searchResponse, long docCount) { + assertThat(searchResponse.getSuccessfulShards(), equalTo(1)); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(docCount)); + for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { + SearchHit searchHit = searchResponse.getHits().getAt(i); + Map source = searchHit.getSourceAsMap(); + + assertThat(source, is(notNullValue())); + assertThat(source.get("field"), is(equalTo(i))); + assertThat(source.get("field2"), is(equalTo("Some text " + i))); + } + } + + private void assertPeerRecoveryWasSuccessful(RecoveryState recoveryState, String sourceNode, String targetNode) throws Exception { + assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); + assertThat(recoveryState.getRecoverySource(), equalTo(RecoverySource.PeerRecoverySource.INSTANCE)); + + assertThat(recoveryState.getSourceNode(), notNullValue()); + assertThat(recoveryState.getSourceNode().getName(), equalTo(sourceNode)); + assertThat(recoveryState.getTargetNode(), notNullValue()); + assertThat(recoveryState.getTargetNode().getName(), equalTo(targetNode)); + + RecoveryState.Index indexState = recoveryState.getIndex(); + assertThat(indexState.recoveredBytesPercent(), greaterThanOrEqualTo(0.0f)); + assertThat(indexState.recoveredBytesPercent(), lessThanOrEqualTo(100.0f)); + } + + private RecoveryState getLatestPeerRecoveryStateForShard(String indexName, int shardId) { + RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(indexName).get(); + assertThat(recoveryResponse.hasRecoveries(), equalTo(true)); + List indexRecoveries = recoveryResponse.shardRecoveryStates().get(indexName); + assertThat(indexRecoveries, notNullValue()); + + List peerRecoveries = indexRecoveries.stream() + .filter(recoveryState -> recoveryState.getRecoverySource().equals(RecoverySource.PeerRecoverySource.INSTANCE)) + .filter(recoveryState -> recoveryState.getShardId().getId() == shardId) + .collect(Collectors.toList()); + + assertThat(peerRecoveries, is(not(empty()))); + return peerRecoveries.get(peerRecoveries.size() - 1); + } + + private void updateSetting(String key, String value) { + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest.persistentSettings(Settings.builder().put(key, value)); + assertAcked(client().admin().cluster().updateSettings(settingsRequest).actionGet()); + } + + private void createRepo(String repoName, String type) { + final Settings.Builder settings = Settings.builder() + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) + .put("location", randomRepoPath()); + createRepository(logger, repoName, type, settings, true); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java new file mode 100644 index 0000000000000..dad74ec7607c7 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.recovery.plan; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.ShardGeneration; +import org.elasticsearch.repositories.ShardSnapshotInfo; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.SnapshotException; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class ShardSnapshotsServiceIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(FailingRepoPlugin.class); + } + + public static class FailingRepoPlugin extends Plugin implements RepositoryPlugin { + public static final String TYPE = "failingrepo"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + TYPE, + metadata -> new FailingRepo(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings) + ); + } + } + + public static class FailingRepo extends FsRepository { + static final String FAIL_GET_REPOSITORY_DATA_SETTING_KEY = "fail_get_repository_data"; + static final String FAIL_LOAD_SHARD_SNAPSHOT_SETTING_KEY = "fail_load_shard_snapshot"; + static final String FAIL_LOAD_SHARD_SNAPSHOTS_SETTING_KEY = "fail_load_shard_snapshots"; + + private final boolean failGetRepositoryData; + private final boolean failLoadShardSnapshot; + private final boolean failLoadShardSnapshots; + + public FailingRepo(RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings) { + super(metadata, environment, namedXContentRegistry, clusterService, bigArrays, recoverySettings); + this.failGetRepositoryData = metadata.settings().getAsBoolean(FAIL_GET_REPOSITORY_DATA_SETTING_KEY, false); + this.failLoadShardSnapshot = metadata.settings().getAsBoolean(FAIL_LOAD_SHARD_SNAPSHOT_SETTING_KEY, false); + this.failLoadShardSnapshots = metadata.settings().getAsBoolean(FAIL_LOAD_SHARD_SNAPSHOTS_SETTING_KEY, false); + } + + @Override + public void getRepositoryData(ActionListener listener) { + if (failGetRepositoryData) { + listener.onFailure(new IOException("Failure getting repository data")); + return; + } + super.getRepositoryData(listener); + } + + @Override + public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { + if (failLoadShardSnapshot) { + throw new SnapshotException( + metadata.name(), + snapshotId, + "failed to read shard snapshot file for [" + shardContainer.path() + ']', + new FileNotFoundException("unable to find file") + ); + } + return super.loadShardSnapshot(shardContainer, snapshotId); + } + + @Override + public BlobStoreIndexShardSnapshots getBlobStoreIndexShardSnapshots(IndexId indexId, + int shardId, + ShardGeneration shardGen) throws IOException { + if (failLoadShardSnapshots) { + throw new FileNotFoundException("Failed to get blob store index shard snapshots"); + } + return super.getBlobStoreIndexShardSnapshots(indexId, shardId, shardGen); + } + } + + public void testReturnsEmptyListWhenThereAreNotAvailableRepositories() throws Exception { + String indexName = "test"; + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + ShardId shardId = getShardIdForIndex(indexName); + + Optional shardSnapshot = getLatestShardSnapshot(shardId); + assertThat(shardSnapshot.isPresent(), is(equalTo(false))); + } + + public void testOnlyFetchesSnapshotFromEnabledRepositories() throws Exception { + final String indexName = "test"; + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + ShardId shardId = getShardIdForIndex(indexName); + + for (int i = 0; i < randomIntBetween(1, 50); i++) { + index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); + } + + String snapshotName = "snap"; + + int numberOfNonEnabledRepos = randomIntBetween(1, 3); + List nonEnabledRepos = new ArrayList<>(); + for (int i = 0; i < numberOfNonEnabledRepos; i++) { + String repositoryName = "non-enabled-repo-" + i; + Path repoPath = randomRepoPath(); + createRepository(repositoryName, "fs", repoPath, false); + createSnapshot(repositoryName, snapshotName, indexName); + nonEnabledRepos.add(repositoryName); + } + + int numberOfRecoveryEnabledRepositories = randomIntBetween(0, 4); + List recoveryEnabledRepos = new ArrayList<>(); + for (int i = 0; i < numberOfRecoveryEnabledRepositories; i++) { + String repositoryName = "repo-" + i; + createRepository(repositoryName, "fs", randomRepoPath(), true); + recoveryEnabledRepos.add(repositoryName); + createSnapshot(repositoryName, snapshotName, indexName); + } + + Optional latestShardSnapshot = getLatestShardSnapshot(shardId); + + if (numberOfRecoveryEnabledRepositories == 0) { + assertThat(latestShardSnapshot.isPresent(), is(equalTo(false))); + } else { + assertThat(latestShardSnapshot.isPresent(), is(equalTo(true))); + + ShardSnapshot shardSnapshotData = latestShardSnapshot.get(); + ShardSnapshotInfo shardSnapshotInfo = shardSnapshotData.getShardSnapshotInfo(); + assertThat(recoveryEnabledRepos.contains(shardSnapshotInfo.getRepository()), is(equalTo(true))); + assertThat(nonEnabledRepos.contains(shardSnapshotInfo.getRepository()), is(equalTo(false))); + + assertThat(shardSnapshotData.getMetadataSnapshot().size(), is(greaterThan(0))); + + assertThat(shardSnapshotInfo.getShardId(), is(equalTo(shardId))); + assertThat(shardSnapshotInfo.getSnapshot().getSnapshotId().getName(), is(equalTo(snapshotName))); + } + } + + public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Exception { + final String indexName = "test"; + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + ShardId shardId = getShardIdForIndex(indexName); + + for (int i = 0; i < randomIntBetween(1, 50); i++) { + index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); + } + + String snapshotName = "snap"; + + int numberOfFailingRepos = randomIntBetween(1, 3); + List> failingRepos = new ArrayList<>(); + List failingRepoNames = new ArrayList<>(); + for (int i = 0; i < numberOfFailingRepos; i++) { + String repositoryName = "failing-repo-" + i; + Path repoPath = randomRepoPath(); + createRepository(repositoryName, FailingRepoPlugin.TYPE, repoPath, true); + createSnapshot(repositoryName, snapshotName, indexName); + failingRepos.add(Tuple.tuple(repositoryName, repoPath)); + failingRepoNames.add(repositoryName); + } + + int numberOfWorkingRepositories = randomIntBetween(0, 4); + List workingRepos = new ArrayList<>(); + for (int i = 0; i < numberOfWorkingRepositories; i++) { + String repositoryName = "repo-" + i; + createRepository(repositoryName, "fs", randomRepoPath(), true); + workingRepos.add(repositoryName); + createSnapshot(repositoryName, snapshotName, indexName); + } + + for (Tuple failingRepo : failingRepos) { + // Update repository settings to fail fetching the repository information at any stage + String repoFailureType = + randomFrom(FailingRepo.FAIL_GET_REPOSITORY_DATA_SETTING_KEY, + FailingRepo.FAIL_LOAD_SHARD_SNAPSHOT_SETTING_KEY, + FailingRepo.FAIL_LOAD_SHARD_SNAPSHOTS_SETTING_KEY + ); + + assertAcked(client().admin().cluster().preparePutRepository(failingRepo.v1()) + .setType(FailingRepoPlugin.TYPE) + .setVerify(false) + .setSettings(Settings.builder().put(repoFailureType, true).put("location", failingRepo.v2())) + ); + } + + Optional latestShardSnapshot = getLatestShardSnapshot(shardId); + + if (numberOfWorkingRepositories == 0) { + assertThat(latestShardSnapshot.isPresent(), is(equalTo(false))); + } else { + assertThat(latestShardSnapshot.isPresent(), is(equalTo(true))); + ShardSnapshot shardSnapshotData = latestShardSnapshot.get(); + ShardSnapshotInfo shardSnapshotInfo = shardSnapshotData.getShardSnapshotInfo(); + assertThat(workingRepos.contains(shardSnapshotInfo.getRepository()), is(equalTo(true))); + assertThat(failingRepoNames.contains(shardSnapshotInfo.getRepository()), is(equalTo(false))); + + assertThat(shardSnapshotData.getMetadataSnapshot().size(), is(greaterThan(0))); + + assertThat(shardSnapshotInfo.getShardId(), is(equalTo(shardId))); + assertThat(shardSnapshotInfo.getSnapshot().getSnapshotId().getName(), is(equalTo(snapshotName))); + } + } + + public void testFetchingInformationFromAnIncompatibleMasterNodeReturnsAnEmptyList() { + String indexName = "test"; + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + ShardId shardId = getShardIdForIndex(indexName); + + for (int i = 0; i < randomIntBetween(1, 50); i++) { + index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); + } + + String snapshotName = "snap"; + String repositoryName = "repo"; + createRepository(repositoryName, "fs", randomRepoPath(), true); + createSnapshot(repositoryName, snapshotName, indexName); + + RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); + ThreadPool threadPool = internalCluster().getMasterNodeInstance(ThreadPool.class); + ClusterService clusterService = internalCluster().getMasterNodeInstance(ClusterService.class); + ShardSnapshotsService shardSnapshotsService = new ShardSnapshotsService(client(), repositoriesService, threadPool, clusterService) { + @Override + protected boolean masterSupportsFetchingLatestSnapshots() { + return false; + } + }; + + PlainActionFuture> latestSnapshots = PlainActionFuture.newFuture(); + shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, latestSnapshots); + assertThat(latestSnapshots.actionGet().isPresent(), is(equalTo(false))); + } + + private Optional getLatestShardSnapshot(ShardId shardId) throws Exception { + ShardSnapshotsService shardSnapshotsService = getShardSnapshotsService(); + + PlainActionFuture> future = PlainActionFuture.newFuture(); + shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, future); + return future.get(); + } + + private ShardSnapshotsService getShardSnapshotsService() { + RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); + ThreadPool threadPool = internalCluster().getMasterNodeInstance(ThreadPool.class); + ClusterService clusterService = internalCluster().getMasterNodeInstance(ClusterService.class); + return new ShardSnapshotsService(client(), repositoriesService, threadPool, clusterService); + } + + private ShardId getShardIdForIndex(String indexName) { + ClusterState state = clusterAdmin().prepareState().get().getState(); + return state.routingTable().index(indexName).shard(0).shardId(); + } + + private void createRepository(String repositoryName, String type, Path location, boolean recoveryEnabledRepo) { + assertAcked(client().admin().cluster().preparePutRepository(repositoryName) + .setType(type) + .setVerify(false) + .setSettings(Settings.builder() + .put("location", location) + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), recoveryEnabledRepo) + ) + ); + } + + private void createSnapshot(String repoName, String snapshotName, String index) { + clusterAdmin() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(index) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a05cd5e6fd48e..0bc80c26c9a35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -615,7 +615,6 @@ public void testSegmentsStats() { assertThat(stats.getTotal().getSegments(), notNullValue()); assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards)); - assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0L)); if (includeSegmentFileSizes) { assertThat(stats.getTotal().getSegments().getFiles().size(), greaterThan(0)); for (ObjectObjectCursor cursor : stats.getTotal().getSegments().getFiles()) { @@ -735,7 +734,7 @@ public void testEncodeDecodeCommonStats() throws IOException { public void testFlagOrdinalOrder() { Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh, Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Completion, Flag.Segments, - Flag.Translog, Flag.RequestCache, Flag.Recovery, Flag.Bulk}; + Flag.Translog, Flag.RequestCache, Flag.Recovery, Flag.Bulk, Flag.Shards}; assertThat(flags.length, equalTo(Flag.values().length)); for (int i = 0; i < flags.length; i++) { @@ -914,6 +913,10 @@ private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean s case Bulk: builder.setBulk(set); break; + case Shards: + // We don't actually expose shards in IndexStats, but this test fails if it isn't handled + builder.request().flags().set(Flag.Shards, set); + break; default: fail("new flag? " + flag); break; @@ -956,6 +959,8 @@ private static boolean isSet(Flag flag, CommonStats response) { return response.getRecoveryStats() != null; case Bulk: return response.getBulk() != null; + case Shards: + return response.getShards() != null; default: fail("new flag? " + flag); return false; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 6d48bebd9f376..a230cedcb68c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -33,9 +33,11 @@ import java.util.Objects; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.Is.is; @@ -72,11 +74,7 @@ public void testPersistentActionFailure() throws Exception { PlainActionFuture> future = new PlainActionFuture<>(); persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); long allocationId = future.get().getAllocationId(); - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get() - .getTasks().size(), equalTo(1)); - }); + waitForTaskToStart(); TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); @@ -103,11 +101,7 @@ public void testPersistentActionCompletion() throws Exception { String taskId = UUIDs.base64UUID(); persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); long allocationId = future.get().getAllocationId(); - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get() - .getTasks().size(), equalTo(1)); - }); + waitForTaskToStart(); TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .setDetailed(true).get().getTasks().get(0); logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId()); @@ -120,7 +114,7 @@ public void testPersistentActionCompletion() throws Exception { logger.info("Simulating errant completion notification"); //try sending completion request with incorrect allocation id PlainActionFuture> failedCompletionNotificationFuture = new PlainActionFuture<>(); - persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, failedCompletionNotificationFuture); + persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, null, failedCompletionNotificationFuture); assertFutureThrows(failedCompletionNotificationFuture, ResourceNotFoundException.class); // Make sure that the task is still running assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") @@ -260,11 +254,7 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2); assertFutureThrows(future2, ResourceAlreadyExistsException.class); - assertBusy(() -> { - // Wait for the task to start - assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get() - .getTasks().size(), equalTo(1)); - }); + waitForTaskToStart(); TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); @@ -303,7 +293,7 @@ public void testUnassignRunningPersistentTask() throws Exception { PlainActionFuture> unassignmentFuture = new PlainActionFuture<>(); - // Disallow re-assignment after it is unallocated to verify master and node state + // Disallow re-assignment after it is unassigned to verify master and node state TestPersistentTasksExecutor.setNonClusterStateCondition(false); persistentTasksClusterService.unassignPersistentTask(taskId, @@ -339,6 +329,69 @@ public void testUnassignRunningPersistentTask() throws Exception { stopOrCancelTask(taskInfo.getTaskId()); } + public void testAbortLocally() throws Exception { + PersistentTasksClusterService persistentTasksClusterService = + internalCluster().getInstance(PersistentTasksClusterService.class, internalCluster().getMasterName()); + // Speed up rechecks to a rate that is quicker than what settings would allow + persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1)); + PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); + PlainActionFuture> future = new PlainActionFuture<>(); + persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); + String taskId = future.get().getId(); + long allocationId = future.get().getAllocationId(); + waitForTaskToStart(); + TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get().getTasks().get(0); + + // Disallow re-assignment after it is unassigned to verify master and node state + TestPersistentTasksExecutor.setNonClusterStateCondition(false); + + // Verifying parent + assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId)); + assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster")); + + assertThat(new TestTasksRequestBuilder(client()).setOperation("abort_locally").setTaskId(firstRunningTask.getTaskId()) + .get().getTasks().size(), equalTo(1)); + + assertBusy(() -> { + // Verify that the task is NOT running on any node + List tasks = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get() + .getTasks(); + assertThat(tasks.size(), equalTo(0)); + + // Verify that the task is STILL in internal cluster state, unassigned, with a reason indicating local abort + PersistentTask task = assertClusterStateHasTask(taskId); + assertThat(task.getAssignment().getExecutorNode(), nullValue()); + // Although the assignment explanation is initially set to "Simulating local abort", because + // of the way we prevent reassignment to the same node in this test it may quickly change to + // "non cluster state condition prevents assignment" - either proves the unassignment worked + assertThat(task.getAssignment().getExplanation(), + either(equalTo("Simulating local abort")) + .or(equalTo("non cluster state condition prevents assignment")) + ); + }); + + // Allow it to be reassigned again + TestPersistentTasksExecutor.setNonClusterStateCondition(true); + + // Verify it starts again + waitForTaskToStart(); + + // Verify that persistent task is in cluster state and that the local abort reason has been removed. + // (Since waitForTaskToStart() waited for the local task to start, there might be a short period when + // the tasks API reports the local task but the cluster state update containing the new assignment + // reason has not been published, hence the busy wait here.) + assertBusy(() -> { + PersistentTask task = assertClusterStateHasTask(taskId); + assertThat(task.getAssignment().getExplanation(), not(equalTo("Simulating local abort"))); + }); + + // Complete or cancel the running task + TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get().getTasks().get(0); + stopOrCancelTask(taskInfo.getTaskId()); + } + private void stopOrCancelTask(TaskId taskId) { if (randomBoolean()) { logger.info("Completing the running task"); @@ -362,7 +415,7 @@ private static void waitForTaskToStart() throws Exception { }); } - private static void assertClusterStateHasTask(String taskId) { + private static PersistentTask assertClusterStateHasTask(String taskId) { Collection> clusterTasks = ((PersistentTasksCustomMetadata) internalCluster() .clusterService() .state() @@ -370,7 +423,9 @@ private static void assertClusterStateHasTask(String taskId) { .custom(PersistentTasksCustomMetadata.TYPE)) .tasks(); assertThat(clusterTasks, hasSize(1)); - assertThat(clusterTasks.iterator().next().getId(), equalTo(taskId)); + PersistentTask task = clusterTasks.iterator().next(); + assertThat(task.getId(), equalTo(taskId)); + return task; } private void assertNoRunningTasks() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java new file mode 100644 index 0000000000000..d69d0b562abcd --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -0,0 +1,353 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.snapshots.mockstore.MockRepository; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.snapshots.SnapshotsService.NO_FEATURE_STATES_VALUE; +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class IndexSnapshotsServiceIT extends AbstractSnapshotIntegTestCase { + public void testGetShardSnapshotFromUnknownRepoReturnsAnError() throws Exception { + boolean useMultipleUnknownRepositories = randomBoolean(); + List repositories = useMultipleUnknownRepositories ? List.of("unknown", "unknown-2") : List.of("unknown"); + final ActionFuture responseFuture = getLatestSnapshotForShardFuture(repositories, "idx", 0, false); + + if (useMultipleUnknownRepositories) { + GetShardSnapshotResponse response = responseFuture.get(); + assertThat(response.getLatestShardSnapshot().isPresent(), is(equalTo(false))); + + final Map failures = response.getRepositoryFailures(); + for (String repository : repositories) { + RepositoryException repositoryException = failures.get(repository); + assertThat(repositoryException, is(notNullValue())); + assertThat( + repositoryException.getMessage(), + equalTo(String.format(Locale.ROOT, "[%s] Unable to find the latest snapshot for shard [[idx][0]]", repository)) + ); + } + } else { + expectThrows(RepositoryException.class, responseFuture::actionGet); + } + + disableRepoConsistencyCheck("This test checks an empty repository"); + } + + public void testGetShardSnapshotFromEmptyRepositoryReturnsEmptyResult() { + final String fsRepoName = randomAlphaOfLength(10); + createRepository(fsRepoName, FsRepository.TYPE); + + final Optional indexShardSnapshotInfo = getLatestSnapshotForShard(fsRepoName, "test", 0); + assertThat(indexShardSnapshotInfo.isEmpty(), equalTo(true)); + + disableRepoConsistencyCheck("This test checks an empty repository"); + } + + public void testGetShardSnapshotFromUnknownIndexReturnsEmptyResult() { + final String fsRepoName = randomAlphaOfLength(10); + createRepository(fsRepoName, FsRepository.TYPE); + + createSnapshot(fsRepoName, "snap-1", Collections.emptyList()); + + final Optional indexShardSnapshotInfo = getLatestSnapshotForShard(fsRepoName, "test", 0); + assertThat(indexShardSnapshotInfo.isEmpty(), equalTo(true)); + } + + public void testGetShardSnapshotFromUnknownShardReturnsEmptyResult() { + final String fsRepoName = randomAlphaOfLength(10); + final String indexName = "test-idx"; + + createIndexWithContent(indexName); + + createRepository(fsRepoName, FsRepository.TYPE); + createSnapshot(fsRepoName, "snap-1", Collections.singletonList(indexName)); + + final Optional indexShardSnapshotInfo = getLatestSnapshotForShard(fsRepoName, indexName, 100); + assertThat(indexShardSnapshotInfo.isEmpty(), equalTo(true)); + } + + public void testGetShardSnapshotOnEmptyRepositoriesListThrowsAnError() { + expectThrows(IllegalArgumentException.class, () -> getLatestSnapshotForShardFuture(Collections.emptyList(), "idx", 0, false)); + } + + public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exception { + final String repoName = "repo-name"; + final Path repoPath = randomRepoPath(); + createRepository(repoName, FsRepository.TYPE, repoPath); + + final boolean useBwCFormat = randomBoolean(); + if (useBwCFormat) { + final Version version = randomVersionBetween(random(), Version.V_7_5_0, Version.CURRENT); + initWithSnapshotVersion(repoName, repoPath, version); + // Re-create repo to clear repository data cache + assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); + createRepository(repoName, "fs", repoPath); + } + + createSnapshot(repoName, "empty-snap", Collections.emptyList()); + + final String indexName = "test"; + final String indexName2 = "test-2"; + List indices = List.of(indexName, indexName2); + createIndex(indexName, indexName2); + SnapshotInfo lastSnapshot = null; + int numSnapshots = randomIntBetween(5, 25); + for (int i = 0; i < numSnapshots; i++) { + if (randomBoolean()) { + indexRandomDocs(indexName, 5); + indexRandomDocs(indexName2, 10); + } + final List snapshotIndices = randomSubsetOf(indices); + final SnapshotInfo snapshotInfo = createSnapshot(repoName, String.format(Locale.ROOT, "snap-%03d", i), snapshotIndices); + if (snapshotInfo.indices().contains(indexName)) { + lastSnapshot = snapshotInfo; + } + } + + if (useBwCFormat) { + // Reload the RepositoryData so we don't use cached data that wasn't serialized + assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); + createRepository(repoName, "fs", repoPath); + } + + final Optional indexShardSnapshotInfoOpt = getLatestSnapshotForShard(repoName, indexName, 0); + if (lastSnapshot == null) { + assertThat(indexShardSnapshotInfoOpt.isPresent(), equalTo(false)); + } else { + assertThat(indexShardSnapshotInfoOpt.isPresent(), equalTo(true)); + + final ShardSnapshotInfo shardSnapshotInfo = indexShardSnapshotInfoOpt.get(); + + final ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); + final IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); + final String indexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); + assertThat(shardSnapshotInfo.getIndexMetadataIdentifier(), equalTo(indexMetadataId)); + + final Snapshot snapshot = shardSnapshotInfo.getSnapshot(); + assertThat(snapshot, equalTo(lastSnapshot.snapshot())); + } + } + + public void testGetShardSnapshotWhileThereIsARunningSnapshot() throws Exception { + final String fsRepoName = randomAlphaOfLength(10); + createRepository(fsRepoName, "mock"); + + createSnapshot(fsRepoName, "empty-snap", Collections.emptyList()); + + final String indexName = "test-idx"; + createIndexWithContent(indexName); + + blockAllDataNodes(fsRepoName); + + final String snapshotName = "snap-1"; + final ActionFuture snapshotFuture = client().admin() + .cluster() + .prepareCreateSnapshot(fsRepoName, snapshotName) + .setIndices(indexName) + .setWaitForCompletion(true) + .execute(); + + waitForBlockOnAnyDataNode(fsRepoName); + + assertThat(getLatestSnapshotForShard(fsRepoName, indexName, 0).isEmpty(), equalTo(true)); + + unblockAllDataNodes(fsRepoName); + + assertSuccessful(snapshotFuture); + } + + public void testGetShardSnapshotFailureHandlingLetOtherRepositoriesRequestsMakeProgress() throws Exception { + final String failingRepoName = randomAlphaOfLength(10); + createRepository(failingRepoName, "mock"); + int repoCount = randomIntBetween(1, 10); + List workingRepoNames = new ArrayList<>(); + for (int i = 0; i < repoCount; i++) { + final String repoName = randomAlphaOfLength(10); + createRepository(repoName, "fs"); + workingRepoNames.add(repoName); + } + + final String indexName = "test-idx"; + createIndexWithContent(indexName); + + int snapshotIdx = 0; + createSnapshot(failingRepoName, String.format(Locale.ROOT, "snap-%03d", snapshotIdx++), Collections.singletonList(indexName)); + SnapshotInfo latestSnapshot = null; + for (String workingRepoName : workingRepoNames) { + String snapshot = String.format(Locale.ROOT, "snap-%03d", snapshotIdx++); + latestSnapshot = createSnapshot(workingRepoName, snapshot, Collections.singletonList(indexName)); + } + + final MockRepository repository = getRepositoryOnMaster(failingRepoName); + if (randomBoolean()) { + repository.setBlockAndFailOnReadIndexFiles(); + } else { + repository.setBlockAndFailOnReadSnapFiles(); + } + + PlainActionFuture future = getLatestSnapshotForShardFuture( + CollectionUtils.appendToCopy(workingRepoNames, failingRepoName), + indexName, + 0 + ); + waitForBlock(internalCluster().getMasterName(), failingRepoName); + repository.unblock(); + + final GetShardSnapshotResponse response = future.actionGet(); + + final Optional error = response.getFailureForRepository(failingRepoName); + assertThat(error.isPresent(), is(equalTo(true))); + assertThat( + error.get().getMessage(), + equalTo(String.format(Locale.ROOT, "[%s] Unable to find the latest snapshot for shard [[%s][0]]", failingRepoName, indexName)) + ); + + for (String workingRepoName : workingRepoNames) { + assertThat(response.getFailureForRepository(workingRepoName).isEmpty(), is(equalTo(true))); + } + + Optional shardSnapshotInfoOpt = response.getLatestShardSnapshot(); + + assertThat(shardSnapshotInfoOpt.isPresent(), equalTo(true)); + ShardSnapshotInfo shardSnapshotInfo = shardSnapshotInfoOpt.get(); + assertThat(shardSnapshotInfo.getSnapshot(), equalTo(latestSnapshot.snapshot())); + assertThat(shardSnapshotInfo.getRepository(), equalTo(latestSnapshot.repository())); + } + + public void testGetShardSnapshotInMultipleRepositoriesReturnsTheLatestSnapshot() { + int repoCount = randomIntBetween(2, 10); + List repositories = new ArrayList<>(); + for (int i = 0; i < repoCount; i++) { + final String repoName = randomAlphaOfLength(10); + createRepository(repoName, "fs"); + repositories.add(repoName); + } + + final String indexName = "test-idx"; + createIndexWithContent(indexName); + + int snapshotIdx = 0; + SnapshotInfo expectedLatestSnapshot = null; + for (String repository : repositories) { + String snapshot = String.format(Locale.ROOT, "snap-%03d", snapshotIdx++); + expectedLatestSnapshot = createSnapshot(repository, snapshot, Collections.singletonList(indexName)); + } + + GetShardSnapshotResponse response = getLatestSnapshotForShardFuture(repositories, indexName, 0).actionGet(); + + assertThat(response.getRepositoryFailures(), is(anEmptyMap())); + Optional shardSnapshotInfoOpt = response.getLatestShardSnapshot(); + + assertThat(shardSnapshotInfoOpt.isPresent(), equalTo(true)); + ShardSnapshotInfo shardSnapshotInfo = shardSnapshotInfoOpt.get(); + assertThat(shardSnapshotInfo.getSnapshot(), equalTo(expectedLatestSnapshot.snapshot())); + assertThat(shardSnapshotInfo.getRepository(), equalTo(expectedLatestSnapshot.repository())); + } + + public void testFailedSnapshotsAreNotReturned() throws Exception { + final String indexName = "test"; + createIndexWithContent(indexName); + + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository) repositoriesService.repository(repoName)).setBlockAndFailOnWriteSnapFiles(); + } + + client().admin() + .cluster() + .prepareCreateSnapshot(repoName, "snap") + .setIndices(indexName) + .setWaitForCompletion(false) + .setFeatureStates(NO_FEATURE_STATES_VALUE) + .get(); + + waitForBlockOnAnyDataNode(repoName); + + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository) repositoriesService.repository(repoName)).unblock(); + } + + assertBusy(() -> assertThat(getSnapshot(repoName, "snap").state(), equalTo(SnapshotState.PARTIAL))); + + Optional shardSnapshotInfo = getLatestSnapshotForShard(repoName, indexName, 0); + assertThat(shardSnapshotInfo.isEmpty(), equalTo(true)); + + final SnapshotInfo snapshotInfo = createSnapshot(repoName, "snap-1", Collections.singletonList(indexName)); + + Optional latestSnapshotForShard = getLatestSnapshotForShard(repoName, indexName, 0); + assertThat(latestSnapshotForShard.isPresent(), equalTo(true)); + assertThat(latestSnapshotForShard.get().getSnapshot(), equalTo(snapshotInfo.snapshot())); + assertThat(latestSnapshotForShard.get().getRepository(), equalTo(snapshotInfo.repository())); + } + + private Optional getLatestSnapshotForShard(String repository, String indexName, int shard) { + final GetShardSnapshotResponse response = getLatestSnapshotForShardFuture(Collections.singletonList(repository), indexName, shard) + .actionGet(); + return response.getLatestShardSnapshot(); + } + + private PlainActionFuture getLatestSnapshotForShardFuture( + List repositories, + String indexName, + int shard + ) { + return getLatestSnapshotForShardFuture(repositories, indexName, shard, true); + } + + private PlainActionFuture getLatestSnapshotForShardFuture( + List repositories, + String indexName, + int shard, + boolean useAllRepositoriesRequest + ) { + ShardId shardId = new ShardId(new Index(indexName, "__na__"), shard); + PlainActionFuture future = PlainActionFuture.newFuture(); + final GetShardSnapshotRequest request; + if (useAllRepositoriesRequest && randomBoolean()) { + request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + } else { + request = GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, repositories); + } + + client().execute(GetShardSnapshotAction.INSTANCE, request, future); + return future; + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java index 980751bf9c1b9..d3f4f7515f455 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.mockstore.MockRepository; @@ -81,5 +82,12 @@ public void testUpdateRepository() { final Repository updatedRepository = repositoriesService.repository(repositoryName); assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); + + // check that a noop update does not verify. Since the new data node does not share the same `path.repo`, verification will fail if + // it runs. + internalCluster().startDataOnlyNode(Settings.builder().put(Environment.PATH_REPO_SETTING.getKey(), createTempDir()).build()); + assertAcked( + client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings).get() + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 45537596ab3f8..1472803843d63 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -173,6 +173,8 @@ public void testResolveSearchRouting() { "test3", newSet("0", "1", "2", "tw ", " ltw ", " lw")))); } + @SafeVarargs + @SuppressWarnings("varargs") private Set newSet(T... elements) { return newHashSet(elements); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 7baaec567ec82..d4b5f11dc5807 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -137,8 +138,14 @@ private SearchResponse ensureSearchWasCancelled(ActionFuture sea SearchResponse response = searchResponse.actionGet(); logger.info("Search response {}", response); assertNotEquals("At least one shard should have failed", 0, response.getFailedShards()); + for (ShardSearchFailure failure : response.getShardFailures()) { + // We should have fail because the search has been cancel. The status of the exceptions should be 400. + assertThat(ExceptionsHelper.status(failure.getCause()), equalTo(RestStatus.BAD_REQUEST)); + } return response; } catch (SearchPhaseExecutionException ex) { + // We should have fail because the search has been cancel. The status of the response should be 400. + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.BAD_REQUEST)); logger.info("All shards failed with", ex); return null; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java index 2216db3295018..35cca197dd06c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java @@ -42,6 +42,7 @@ public void testOpenContextsAfterRejections() throws Exception { refresh(); int numSearches = 10; + @SuppressWarnings({"rawtypes", "unchecked"}) Future[] responses = new Future[numSearches]; SearchType searchType = randomFrom(SearchType.DEFAULT, SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH); logger.info("search type is {}", searchType); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index 49e9bd866cfe5..982196a43756e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -103,6 +103,7 @@ private void assertMetadata(Map returnedMetadata) { Object nestedObject = returnedMetadata.get("complex"); assertNotNull(nestedObject); + @SuppressWarnings("unchecked") Map nestedMap = (Map)nestedObject; assertEquals("value", nestedMap.get("nested")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index d2a9122a0a053..46fc3cd772847 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.jdk.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -20,6 +19,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.jdk.JavaVersion; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -52,7 +52,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -65,7 +64,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -196,7 +194,7 @@ private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -231,7 +229,7 @@ public void testSingleValuedField() throws Exception { public void testSingleValuedFieldWithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1) .timeZone(ZoneId.of("+01:00"))).execute() .actionGet(); ZoneId tz = ZoneId.of("+01:00"); @@ -295,7 +293,7 @@ public void testSingleValued_timeZone_epoch() throws Exception { ZoneId tz = ZoneId.of("+01:00"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").field("date") - .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) + .calendarInterval(DateHistogramInterval.DAY).minDocCount(1) .timeZone(tz).format(format)) .get(); assertSearchResponse(response); @@ -333,7 +331,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.key(true))) .get(); @@ -356,7 +354,7 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.key(false))) .get(); @@ -378,7 +376,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.count(true))) .get(); @@ -400,7 +398,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.count(false))) .get(); @@ -420,7 +418,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) .subAggregation(sum("sum").field("value"))) .get(); @@ -480,7 +478,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("sum", true)) .subAggregation(max("sum").field("value"))) .get(); @@ -503,7 +501,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("sum", false)) .subAggregation(max("sum").field("value"))) .get(); @@ -526,7 +524,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("stats", "sum", false)) .subAggregation(stats("stats").field("value"))) .get(); @@ -549,7 +547,7 @@ public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("max_constant", randomBoolean())) .subAggregation(max("max_constant").field("constant"))) .get(); @@ -575,10 +573,10 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { .prepareSearch("idx") .addAggregation( dateHistogram("histo").field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("inner_histo>avg", asc)) .subAggregation(dateHistogram("inner_histo") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .field("dates") .subAggregation(avg("avg").field("value")))) .get(); @@ -606,7 +604,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + .calendarInterval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(response); @@ -650,7 +648,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("dates").dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -694,7 +692,7 @@ public void testMultiValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("dates") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.count(false))) .get(); @@ -745,7 +743,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { .addAggregation(dateHistogram("histo") .field("dates") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + .calendarInterval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(response); @@ -798,7 +796,7 @@ public void testScriptSingleValue() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").script( new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)) + .calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -837,7 +835,7 @@ public void testScriptMultiValued() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").script( new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)) + .calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -890,7 +888,7 @@ public void testScriptMultiValued() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -903,7 +901,7 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); @@ -940,7 +938,7 @@ public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(dateHistogram("date_histo").field("value").interval(1))) + .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR))) .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); @@ -976,7 +974,7 @@ public void testSingleValueWithTimeZone() throws Exception { .addAggregation(dateHistogram("date_histo") .field("date") .timeZone(ZoneId.of("-02:00")) - .dateHistogramInterval(DateHistogramInterval.DAY) + .calendarInterval(DateHistogramInterval.DAY) .format("yyyy-MM-dd:HH-mm-ssZZZZZ")) .get(); @@ -1070,7 +1068,7 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { response = client().prepareSearch("idx2") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.days(interval)) + .fixedInterval(DateHistogramInterval.days(interval)) .minDocCount(0) // when explicitly specifying a format, the extended bounds should be defined by the same format .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) @@ -1140,7 +1138,7 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { .setQuery(QueryBuilders.rangeQuery("date") .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId())) .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.hours(1)) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.hours(1)) .timeZone(timezone).minDocCount(0).extendedBounds(new LongBounds("now/d", "now/d+23h")) ).get(); assertSearchResponse(response); @@ -1192,7 +1190,7 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { response = client() .prepareSearch(index) .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.days(1)) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.days(1)) .offset("+6h").minDocCount(0) .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) ).get(); @@ -1241,7 +1239,7 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception .setQuery(matchAllQuery()) .addAggregation(dateHistogram("date_histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.DAY)) + .calendarInterval(DateHistogramInterval.DAY)) .get(); assertSearchHits(response, "0", "1", "2", "3", "4"); @@ -1261,7 +1259,7 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception public void testIssue6965() { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")) - .dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0)) + .calendarInterval(DateHistogramInterval.MONTH).minDocCount(0)) .get(); assertSearchResponse(response); @@ -1302,7 +1300,7 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z")); ensureSearchable("test9491"); SearchResponse response = client().prepareSearch("test9491") - .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.YEAR) + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.YEAR) .timeZone(ZoneId.of("Asia/Jerusalem")).format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX")) .get(); assertSearchResponse(response); @@ -1320,7 +1318,7 @@ public void testIssue8209() throws InterruptedException, ExecutionException { client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z")); ensureSearchable("test8209"); SearchResponse response = client().prepareSearch("test8209") - .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.MONTH) + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH) .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") .timeZone(ZoneId.of("CET")).minDocCount(0)) .get(); @@ -1338,18 +1336,7 @@ public void testIssue8209() throws InterruptedException, ExecutionException { internalCluster().wipeIndices("test8209"); } - /** - * see issue #9634, negative dateHistogramInterval in date_histogram should raise exception - */ - public void testExceptionOnNegativeInterval() { - try { - client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").interval(-TimeUnit.DAYS.toMillis(1)).minDocCount(0)).get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("[interval] must be 1 or greater for aggregation [date_histogram]")); - } - } + // TODO: add some tests for negative fixed and calendar intervals /** * https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices @@ -1365,7 +1352,7 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce SearchResponse response = client().prepareSearch(indexDateUnmapped) .addAggregation( - dateHistogram("histo").field("dateField").dateHistogramInterval(DateHistogramInterval.MONTH).format("yyyy-MM") + dateHistogram("histo").field("dateField").calendarInterval(DateHistogramInterval.MONTH).format("yyyy-MM") .minDocCount(0).extendedBounds(new LongBounds("2018-01", "2018-01"))) .get(); assertSearchResponse(response); @@ -1387,7 +1374,7 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, indexRandom(true, client().prepareIndex(index).setSource("d", "1477954800000")); ensureSearchable(index); SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin"))).get(); + .calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin"))).get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); @@ -1399,7 +1386,7 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")).format("yyyy-MM-dd")) + .calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")).format("yyyy-MM-dd")) .get(); assertSearchResponse(response); histo = response.getAggregations().get("histo"); @@ -1438,7 +1425,7 @@ public void testDSTEndTransition() throws Exception { response = client().prepareSearch("idx") .setQuery(new MatchNoneQueryBuilder()) .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) - .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( + .calendarInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) .get(); @@ -1477,7 +1464,7 @@ public void testScriptCaching() throws Exception { params.put("fieldname", "d"); SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateHistogram("histo").field("d") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + .calendarInterval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(r); assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() @@ -1488,7 +1475,7 @@ public void testScriptCaching() throws Exception { // Test that a request using a deterministic script gets cached r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateHistogram("histo").field("d") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + .calendarInterval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(r); assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() @@ -1498,7 +1485,7 @@ public void testScriptCaching() throws Exception { // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.MONTH)).get(); + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)).get(); assertSearchResponse(r); assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() @@ -1548,7 +1535,7 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); assertSearchResponse(response); @@ -1591,7 +1578,7 @@ public void testDateNanosHistogram() throws Exception { //Search interval 24 hours SearchResponse r = client().prepareSearch("nanos") .addAggregation(dateHistogram("histo").field("date"). - interval(1000 * 60 * 60 * 24).timeZone(ZoneId.of("Europe/Berlin"))) + fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("Europe/Berlin"))) .addDocValueField("date") .get(); assertSearchResponse(r); @@ -1606,7 +1593,7 @@ public void testDateNanosHistogram() throws Exception { r = client().prepareSearch("nanos") .addAggregation(dateHistogram("histo").field("date") - .interval(1000 * 60 * 60 * 24).timeZone(ZoneId.of("UTC"))) + .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC"))) .addDocValueField("date") .get(); assertSearchResponse(r); @@ -1624,7 +1611,7 @@ public void testDateKeyFormatting() { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.MONTH) + .calendarInterval(DateHistogramInterval.MONTH) .timeZone(ZoneId.of("America/Edmonton"))) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index f98b87689a493..aad4ef7963c4b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -77,7 +77,7 @@ public void testSingleValueWithPositiveOffset() throws Exception { .field("date") .offset("2h") .format(DATE_FORMAT) - .dateHistogramInterval(DateHistogramInterval.DAY)) + .fixedInterval(DateHistogramInterval.DAY)) .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -99,7 +99,7 @@ public void testSingleValueWithNegativeOffset() throws Exception { .field("date") .offset("-2h") .format(DATE_FORMAT) - .dateHistogramInterval(DateHistogramInterval.DAY)) + .fixedInterval(DateHistogramInterval.DAY)) .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -126,7 +126,7 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { .offset("6h") .minDocCount(0) .format(DATE_FORMAT) - .dateHistogramInterval(DateHistogramInterval.DAY)) + .fixedInterval(DateHistogramInterval.DAY)) .get(); assertThat(response.getHits().getTotalHits().value, equalTo(24L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 6ad8e262e37d9..4dd5b4ec6cefe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -391,7 +391,7 @@ private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception .addAggregation( dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.DAY) + .fixedInterval(DateHistogramInterval.DAY) .order(order) .minDocCount(0)) .get(); @@ -405,7 +405,7 @@ private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception .addAggregation( dateHistogram("histo") .field("date") - .dateHistogramInterval(DateHistogramInterval.DAY) + .fixedInterval(DateHistogramInterval.DAY) .order(order) .minDocCount(minDocCount)) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 55de09ce607de..0cb218750672c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -82,7 +82,7 @@ public void testGlobal() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(global("global") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -97,7 +97,7 @@ public void testFilter() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(filter("filter", QueryBuilders.matchAllQuery()) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -112,7 +112,7 @@ public void testMissing() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -130,7 +130,7 @@ public void testGlobalWithFilterWithMissing() throws Exception { .subAggregation(filter("filter", QueryBuilders.matchAllQuery()) .subAggregation(missing("missing").field("foobar") .subAggregation(dateHistogram("histo").field("date") - .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0))))) + .fixedInterval(DateHistogramInterval.DAY).minDocCount(0))))) .get(); assertSearchResponse(response); @@ -146,7 +146,7 @@ public void testNested() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(nested("nested", "nested") - .subAggregation(dateHistogram("histo").field("nested.date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -162,7 +162,7 @@ public void testStringTerms() throws Exception { .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(terms("terms").field("term-s") .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -178,7 +178,7 @@ public void testLongTerms() throws Exception { .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(terms("terms").field("term-l") .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -194,7 +194,7 @@ public void testDoubleTerms() throws Exception { .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(terms("terms").field("term-d") .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -209,7 +209,7 @@ public void testRange() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(range("range").field("value").addRange("r1", 0, 10) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -224,7 +224,7 @@ public void testDateRange() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(dateRange("range").field("date").addRange("r1", "2014-01-01", "2014-01-10") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -239,7 +239,7 @@ public void testIpRange() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(ipRange("range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -254,7 +254,7 @@ public void testHistogram() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(histogram("topHisto").field("value").interval(5) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -268,8 +268,8 @@ public void testHistogram() throws Exception { public void testDateHistogram() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(dateHistogram("topHisto").field("date").dateHistogramInterval(DateHistogramInterval.MONTH) - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .addAggregation(dateHistogram("topHisto").field("date").calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -285,7 +285,7 @@ public void testGeoHashGrid() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); @@ -300,7 +300,7 @@ public void testGeoTileGrid() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(QueryBuilders.matchAllQuery()) .addAggregation(geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) .minDocCount(0))) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index a7974999913bd..a2e76892342ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.test.ESIntegTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -88,8 +89,7 @@ public void setupSuiteScopeCluster() throws Exception { .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) .endObject())); } - assertAcked(prepareCreate("idx_fixed_docs_0").setMapping(STRING_FIELD_NAME, "type=keyword") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))); + Map shard0DocsPerTerm = new HashMap<>(); shard0DocsPerTerm.put("A", 25); shard0DocsPerTerm.put("B", 18); @@ -101,16 +101,8 @@ public void setupSuiteScopeCluster() throws Exception { shard0DocsPerTerm.put("H", 2); shard0DocsPerTerm.put("I", 1); shard0DocsPerTerm.put("J", 1); - for (Map.Entry entry : shard0DocsPerTerm.entrySet()) { - for (int i = 0; i < entry.getValue(); i++) { - String term = entry.getKey(); - builders.add(client().prepareIndex("idx_fixed_docs_0").setId(term + "-" + i) - .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).endObject())); - } - } + buildIndex(shard0DocsPerTerm, "idx_fixed_docs_0", 0, builders); - assertAcked(prepareCreate("idx_fixed_docs_1").setMapping(STRING_FIELD_NAME, "type=keyword") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))); Map shard1DocsPerTerm = new HashMap<>(); shard1DocsPerTerm.put("A", 30); shard1DocsPerTerm.put("B", 25); @@ -122,17 +114,8 @@ public void setupSuiteScopeCluster() throws Exception { shard1DocsPerTerm.put("Q", 6); shard1DocsPerTerm.put("J", 8); shard1DocsPerTerm.put("C", 4); - for (Map.Entry entry : shard1DocsPerTerm.entrySet()) { - for (int i = 0; i < entry.getValue(); i++) { - String term = entry.getKey(); - builders.add(client().prepareIndex("idx_fixed_docs_1").setId(term + "-" + i) - .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).field("shard", 1).endObject())); - } - } + buildIndex(shard1DocsPerTerm, "idx_fixed_docs_1", 1, builders); - assertAcked(prepareCreate("idx_fixed_docs_2") - .setMapping(STRING_FIELD_NAME, "type=keyword") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))); Map shard2DocsPerTerm = new HashMap<>(); shard2DocsPerTerm.put("A", 45); shard2DocsPerTerm.put("C", 44); @@ -142,16 +125,49 @@ public void setupSuiteScopeCluster() throws Exception { shard2DocsPerTerm.put("H", 28); shard2DocsPerTerm.put("Q", 2); shard2DocsPerTerm.put("D", 1); - for (Map.Entry entry : shard2DocsPerTerm.entrySet()) { + buildIndex(shard2DocsPerTerm, "idx_fixed_docs_2", 2, builders); + + Map shard3DocsPerTerm = Map.of( + "A", 1, + "B", 1, + "C", 1 + ); + buildIndex(shard3DocsPerTerm, "idx_fixed_docs_3", 3, builders); + + Map shard4DocsPerTerm = Map.of( + "K", 1, + "L", 1, + "M", 1 + ); + buildIndex(shard4DocsPerTerm, "idx_fixed_docs_4", 4, builders); + + Map shard5DocsPerTerm = Map.of( + "X", 1, + "Y", 1, + "Z", 1 + ); + buildIndex(shard5DocsPerTerm, "idx_fixed_docs_5", 5, builders); + + indexRandom(true, builders); + ensureSearchable(); + } + + private void buildIndex(Map docsPerTerm, String index, int shard, List builders) + throws IOException { + assertAcked( + prepareCreate(index).setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) + ); + for (Map.Entry entry : docsPerTerm.entrySet()) { for (int i = 0; i < entry.getValue(); i++) { String term = entry.getKey(); - builders.add(client().prepareIndex("idx_fixed_docs_2").setId(term + "-" + i) - .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).field("shard", 2).endObject())); + builders.add( + client().prepareIndex(index) + .setId(term + "-" + i) + .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).field("shard", shard).endObject()) + ); } } - - indexRandom(true, builders); - ensureSearchable(); } private void assertDocCountErrorWithinBounds(int size, SearchResponse accurateResponse, SearchResponse testResponse) { @@ -1014,4 +1030,21 @@ public void testFixedDocs() throws Exception { assertThat(bucket.getDocCountError(), equalTo(29L)); } + /** + * Tests the upper bounds are correct when performing incremental reductions + * See https://github.com/elastic/elasticsearch/issues/40005 for more details + */ + public void testIncrementalReduction() { + SearchResponse response = client().prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5") + .addAggregation(terms("terms") + .executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5).shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); + assertSearchResponse(response); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms.getDocCountError(), equalTo(0L)); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java new file mode 100644 index 0000000000000..d76aa092d2685 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.terms; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matchers; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +/** + * Test that index enough data to trigger the creation of Cuckoo filters. + */ + +public class RareTermsIT extends ESSingleNodeTestCase { + + private static final String index = "idx"; + + private void indexDocs(int numDocs) { + final BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < numDocs; ++i) { + bulk.add(new IndexRequest(index).source("{\"str_value\" : \"s" + i + "\"}", XContentType.JSON)); + } + assertNoFailures(bulk.get()); + } + + public void testSingleValuedString() { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + createIndex(index, settings.build()); + // We want to trigger the usage of cuckoo filters that happen only when there are + // more than 10k distinct values in one shard. + final int numDocs = randomIntBetween(12000, 17000); + // Index every value 3 times + for (int i = 0; i < 3; i++) { + indexDocs(numDocs); + assertNoFailures(client().admin().indices().prepareRefresh(index).get()); + } + // There are no rare terms that only appear in one document + assertNumRareTerms(1, 0); + // All terms have a cardinality lower than 10 + assertNumRareTerms(10, numDocs); + } + + private void assertNumRareTerms(int maxDocs, int rareTerms) { + final SearchRequestBuilder requestBuilder = client().prepareSearch(index); + requestBuilder.addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)); + final SearchResponse response = requestBuilder.get(); + assertNoFailures(response); + final RareTerms terms = response.getAggregations().get("rareTerms"); + assertThat(terms.getBuckets().size(), Matchers.equalTo(rareTerms)); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index b6090286b81ad..a8bf4d6a04175 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -86,10 +86,10 @@ protected Map, Object>> pluginScripts() { aggScript(vars, state -> { // Lazily populate state.list for tests without an init script if (state.containsKey("list") == false) { - state.put("list", new ArrayList()); + state.put("list", new ArrayList<>()); } - ((List) state.get("list")).add(1); + ((List) state.get("list")).add(1); })); scripts.put("state[param1] = param2", vars -> @@ -100,7 +100,7 @@ protected Map, Object>> pluginScripts() { ((Map) vars.get("vars")).put("multiplier", 3); Map state = (Map) vars.get("state"); - state.put("list", new ArrayList()); + state.put("list", new ArrayList<>()); return state; }); @@ -109,10 +109,10 @@ protected Map, Object>> pluginScripts() { aggScript(vars, state -> { // Lazily populate state.list for tests without an init script if (state.containsKey("list") == false) { - state.put("list", new ArrayList()); + state.put("list", new ArrayList<>()); } - ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); + ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); })); // Equivalent to: @@ -128,12 +128,12 @@ protected Map, Object>> pluginScripts() { // return newaggregation" // scripts.put("sum state values as a new aggregation", vars -> { - List newAggregation = new ArrayList(); + List newAggregation = new ArrayList<>(); Map state = (Map) vars.get("state"); List list = (List) state.get("list"); if (list != null) { - Integer sum = 0; + int sum = 0; for (Object s : list) { sum += ((Number) s).intValue(); } @@ -142,13 +142,9 @@ protected Map, Object>> pluginScripts() { return newAggregation; }); - scripts.put("no-op aggregation", vars -> { - return (Map) vars.get("state"); - }); + scripts.put("no-op aggregation", vars -> vars.get("state")); - scripts.put("no-op list aggregation", vars -> { - return (List>) vars.get("states"); - }); + scripts.put("no-op list aggregation", vars -> vars.get("states")); // Equivalent to: // @@ -165,8 +161,8 @@ protected Map, Object>> pluginScripts() { // return newaggregation" // scripts.put("sum all states (lists) values as a new aggregation", vars -> { - List newAggregation = new ArrayList(); - Integer sum = 0; + List newAggregation = new ArrayList<>(); + int sum = 0; List> states = (List>) vars.get("states"); for (List list : states) { @@ -181,8 +177,8 @@ protected Map, Object>> pluginScripts() { }); scripts.put("sum all states' state.list values as a new aggregation", vars -> { - List newAggregation = new ArrayList(); - Integer sum = 0; + List newAggregation = new ArrayList<>(); + int sum = 0; List> states = (List>) vars.get("states"); @@ -218,8 +214,8 @@ protected Map, Object>> pluginScripts() { // scripts.put("multiplied sum all states (lists) values as a new aggregation", vars -> { Integer multiplier = (Integer) vars.get("multiplier"); - List newAggregation = new ArrayList(); - Integer sum = 0; + List newAggregation = new ArrayList<>(); + int sum = 0; List> states = (List>) vars.get("states"); for (List list : states) { @@ -253,8 +249,8 @@ protected Map, Object>> nonDeterministicPlu return scripts; } - @SuppressWarnings("unchecked") static Map aggScript(Map vars, Consumer> fn) { + @SuppressWarnings("unchecked") Map aggState = (Map) vars.get("state"); fn.accept(aggState); return aggState; @@ -377,7 +373,7 @@ public void testMap() { if (map.size() == 1) { assertThat(map.get("count"), notNullValue()); assertThat(map.get("count"), instanceOf(Number.class)); - assertThat(map.get("count"), equalTo((Number) 1)); + assertThat(map.get("count"), equalTo(1)); numShardsRun++; } } @@ -431,7 +427,7 @@ public void testMapWithParams() { String stringValue = (String) entry.getKey(); assertThat(stringValue, equalTo("12")); Number numberValue = (Number) entry.getValue(); - assertThat(numberValue, equalTo((Number) 1)); + assertThat(numberValue, equalTo(1)); numShardsRun++; } } @@ -476,6 +472,7 @@ public void testInitMutatesParams() { for (Object object : aggregationList) { assertThat(object, notNullValue()); assertThat(object, instanceOf(HashMap.class)); + @SuppressWarnings("unchecked") Map map = (Map) object; assertThat(map, hasKey("list")); assertThat(map.get("list"), instanceOf(List.class)); @@ -484,7 +481,7 @@ public void testInitMutatesParams() { assertThat(o, notNullValue()); assertThat(o, instanceOf(Number.class)); Number numberValue = (Number) o; - assertThat(numberValue, equalTo((Number) 3)); + assertThat(numberValue, equalTo(3)); totalCount += numberValue.longValue(); } } @@ -698,8 +695,8 @@ public void testInitMapCombineReduceGetProperty() throws Exception { assertThat(object, instanceOf(Number.class)); assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); assertThat(((InternalAggregation)global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); - assertThat((List) ((InternalAggregation)global).getProperty("scripted.value"), sameInstance((List) aggregationList)); - assertThat((List) ((InternalAggregation)scriptedMetricAggregation).getProperty("value"), sameInstance((List) aggregationList)); + assertThat((List) ((InternalAggregation)global).getProperty("scripted.value"), sameInstance(aggregationList)); + assertThat((List) ((InternalAggregation)scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); } public void testMapCombineReduceWithParams() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index 71d8eea432c4d..b173c15805a9b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Avg; @@ -111,7 +112,7 @@ private XContentBuilder newDocBuilder(long timeMillis, String fooValue, Double v public void testEmptyBucketSort() { SearchResponse response = client().prepareSearch(INDEX) .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).interval(TimeValue.timeValueHours(1).millis())) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) .get(); assertSearchResponse(response); @@ -129,7 +130,7 @@ public void testEmptyBucketSort() { // Now let's test using size response = client().prepareSearch(INDEX) .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).interval(TimeValue.timeValueHours(1).millis()) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3))) .get(); @@ -146,7 +147,7 @@ public void testEmptyBucketSort() { // Finally, let's test using size + from response = client().prepareSearch(INDEX) .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).interval(TimeValue.timeValueHours(1).millis()) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3).from(2))) .get(); @@ -294,7 +295,7 @@ public void testSortTermsOnCountWithSecondarySort() { public void testSortDateHistogramDescending() { SearchResponse response = client().prepareSearch(INDEX) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).interval(TimeValue.timeValueHours(1).millis())) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) .get(); assertSearchResponse(response); @@ -305,7 +306,7 @@ public void testSortDateHistogramDescending() { List ascendingTimeBuckets = histo.getBuckets(); response = client().prepareSearch(INDEX) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).interval(TimeValue.timeValueHours(1).millis()) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) .subAggregation(bucketSort("bucketSort", Arrays.asList( new FieldSortBuilder("_key").order(SortOrder.DESC))))) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index 53fa8243223fc..7d01a18424467 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -97,7 +97,7 @@ public void testSingleValuedField() throws Exception { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv", "_count"))).get(); assertSearchResponse(response); @@ -139,7 +139,7 @@ public void testSingleValuedFieldNormalised() throws Exception { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.DAY))).get(); assertSearchResponse(response); @@ -199,7 +199,7 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep SearchResponse response = client() .prepareSearch(IDX_DST_START) - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY) .timeZone(timezone).minDocCount(0) .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR))) .get(); @@ -250,7 +250,7 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti SearchResponse response = client() .prepareSearch(IDX_DST_END) - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY) .timeZone(timezone).minDocCount(0) .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR))) .get(); @@ -303,7 +303,7 @@ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exce SearchResponse response = client() .prepareSearch(IDX_DST_KATHMANDU) - .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.HOUR) + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.HOUR) .timeZone(timezone).minDocCount(0) .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.MINUTE))) .get(); @@ -359,7 +359,7 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(sum("sum").field("value")).subAggregation(derivative("deriv", "sum"))) .get(); @@ -430,7 +430,7 @@ public void testMultiValuedField() throws Exception { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("dates").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv", "_count"))).get(); assertSearchResponse(response); @@ -485,7 +485,7 @@ public void testUnmapped() throws Exception { SearchResponse response = client() .prepareSearch("idx_unmapped") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv", "_count"))).get(); assertSearchResponse(response); @@ -500,7 +500,7 @@ public void testPartiallyUnmapped() throws Exception { SearchResponse response = client() .prepareSearch("idx", "idx_unmapped") .addAggregation( - dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0) + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv", "_count"))).get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 2232799e96071..f73b76054fc4b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -517,7 +517,7 @@ public void testFieldIsntWrittenOutTwice() throws Exception { PipelineAggregatorBuilders.maxBucket("peak", "licenses_per_day>total_licenses"); SumAggregationBuilder sumAggBuilder = AggregationBuilders.sum("total_licenses").field("license.count"); DateHistogramAggregationBuilder licensePerDayBuilder = - AggregationBuilders.dateHistogram("licenses_per_day").field("@timestamp").dateHistogramInterval(DateHistogramInterval.DAY); + AggregationBuilders.dateHistogram("licenses_per_day").field("@timestamp").fixedInterval(DateHistogramInterval.DAY); licensePerDayBuilder.subAggregation(sumAggBuilder); groupByLicenseAgg.subAggregation(licensePerDayBuilder); groupByLicenseAgg.subAggregation(peakPipelineAggBuilder); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java index 7f602cdb614a3..2fd8be334ff3e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java @@ -81,7 +81,7 @@ public String toString(){ } } - private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { + private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { if (expectedBucketIter.hasNext() == false) { fail("`expectedBucketIter` iterator ended before `actual` iterator, size mismatch"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 2de490ba5b188..04d38578a87be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.basic; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -21,6 +22,7 @@ import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -65,10 +67,12 @@ public void run() { while (stop.get() == false) { SearchResponse sr = client().prepareSearch().setSize(numDocs).get(); if (sr.getHits().getTotalHits().value != numDocs) { - // if we did not search all shards but had no failures that is potentially fine + // if we did not search all shards but had no serious failures that is potentially fine // if only the hit-count is wrong. this can happen if the cluster-state is behind when the // request comes in. It's a small window but a known limitation. - if (sr.getTotalShards() != sr.getSuccessfulShards() && sr.getFailedShards() == 0) { + if (sr.getTotalShards() != sr.getSuccessfulShards() && + Stream.of(sr.getShardFailures()).allMatch( + ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { nonCriticalExceptions.add("Count is " + sr.getHits().getTotalHits().value + " but " + numDocs + " was expected. " + formatShardStatus(sr)); } else { @@ -84,8 +88,7 @@ public void run() { } } catch (SearchPhaseExecutionException ex) { // it's possible that all shards fail if we have a small number of shards. - // with replicas this should not happen - if (numberOfReplicas == 1 || ex.getMessage().contains("all shards failed") == false) { + if (ex.getMessage().contains("all shards failed") == false) { throw ex; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index f0ad85d3b672f..5e8785d60ab6f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -695,15 +695,60 @@ public void testPlainHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); - logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource() .query(termQuery("field1", "test")) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); + .highlighter(highlight().highlighterType("plain").field("field1").order("score").preTags("").postTags("")); SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a test")); + } + + public void testPlainHighlighterOrder() throws Exception { + ensureGreen(); + + client().prepareIndex("test") + .setSource("field1", "The quick brown fox jumps over the lazy brown dog but to no suprise the dog doesn't care").get(); + refresh(); + + { + // fragments should be in order of appearance by default + SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) + .highlighter( + highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25) + ); + + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + + // lets be explicit about the order + source = searchSource().query(matchQuery("field1", "brown dog")) + .highlighter( + highlight().highlighterType("plain").field("field1").order("none").preTags("").postTags("").fragmentSize(25) + ); + + searchResponse = client().prepareSearch("test").setSource(source).get(); + + assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + } + { + // order by score + SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) + .highlighter( + highlight().highlighterType("plain").order("score").field("field1").preTags("").postTags("").fragmentSize(25) + ); + + SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); + + assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo("The quick brown fox")); + assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + } } public void testFastVectorHighlighter() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 9022005b56c56..b477781930238 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -17,9 +17,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -409,7 +409,7 @@ protected TestMetadataMapper() { } @Override - protected void parseCreateField(ParseContext context) throws IOException {} + protected void parseCreateField(DocumentParserContext context) throws IOException {} @Override protected String contentType() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 09bce4213f6aa..1f911f5d59038 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -85,27 +85,28 @@ protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); scripts.put("doc['num1'].value", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); ScriptDocValues.Doubles num1 = (ScriptDocValues.Doubles) doc.get("num1"); return num1.getValue(); }); scripts.put("doc['num1'].value * factor", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); ScriptDocValues.Doubles num1 = (ScriptDocValues.Doubles) doc.get("num1"); + @SuppressWarnings("unchecked") Map params = (Map) vars.get("params"); Double factor = (Double) params.get("factor"); return num1.getValue() * factor; }); scripts.put("doc['date'].date.millis", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); ScriptDocValues.Dates dates = (ScriptDocValues.Dates) doc.get("date"); return dates.getValue().toInstant().toEpochMilli(); }); scripts.put("doc['date'].date.nanos", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); ScriptDocValues.Dates dates = (ScriptDocValues.Dates) doc.get("date"); return DateUtils.toLong(dates.getValue().toInstant()); }); @@ -135,19 +136,19 @@ protected Map, Object>> pluginScripts() { } static Object fieldsScript(Map vars, String fieldName) { - Map fields = (Map) vars.get("_fields"); + Map fields = (Map) vars.get("_fields"); FieldLookup fieldLookup = (FieldLookup) fields.get(fieldName); return fieldLookup.getValue(); } - @SuppressWarnings("unchecked") static Object sourceScript(Map vars, String path) { - Map source = (Map) vars.get("_source"); + @SuppressWarnings("unchecked") + Map source = (Map) vars.get("_source"); return XContentMapValues.extractValue(path, source); } static Object docScript(Map vars, String fieldName) { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); ScriptDocValues values = (ScriptDocValues) doc.get(fieldName); return values; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 2c8093c13fc3c..be264e8289184 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -18,6 +18,8 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.DocReader; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ExplainableScoreScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; @@ -79,8 +81,8 @@ public boolean needs_score() { } @Override - public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - return new MyScript(params1, lookup, ctx); + public ScoreScript newInstance(DocReader docReader) throws IOException { + return new MyScript(params1, lookup, ((DocValuesDocReader) docReader).getLeafReaderContext()); } }; return context.factoryClazz.cast(factory); @@ -97,7 +99,7 @@ public Set> getSupportedContexts() { static class MyScript extends ScoreScript implements ExplainableScoreScript { MyScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { - super(params, lookup, leafContext); + super(params, null, new DocValuesDocReader(lookup, leafContext)); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/AbstractGeoBoundingBoxQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/AbstractGeoBoundingBoxQueryIT.java index 0336b1bd8cd08..d0a57b45999d2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/AbstractGeoBoundingBoxQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/AbstractGeoBoundingBoxQueryIT.java @@ -100,7 +100,7 @@ public void testSimpleBoundingBoxTest() throws Exception { } searchResponse = client().prepareSearch() // from NY - .setQuery(geoBoundingBoxQuery("location").setCorners(40.73, -74.1, 40.717, -73.99).type("indexed")) + .setQuery(geoBoundingBoxQuery("location").setCorners(40.73, -74.1, 40.717, -73.99)) .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -150,8 +150,7 @@ public void testLimit2BoundingBox() throws Exception { searchResponse = client().prepareSearch() .setQuery( boolQuery().must(termQuery("userid", 880)).filter( - geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875) - .type("indexed")) + geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875)) ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -164,8 +163,7 @@ public void testLimit2BoundingBox() throws Exception { searchResponse = client().prepareSearch() .setQuery( boolQuery().must(termQuery("userid", 534)).filter( - geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875) - .type("indexed")) + geoBoundingBoxQuery("location").setCorners(74.579421999999994, 143.5, -66.668903999999998, 113.96875)) ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -216,7 +214,6 @@ public void testCompleteLonRange() throws Exception { searchResponse = client().prepareSearch() .setQuery( geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, -180, -50, 180) - .type("indexed") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); searchResponse = client().prepareSearch() @@ -227,7 +224,6 @@ public void testCompleteLonRange() throws Exception { searchResponse = client().prepareSearch() .setQuery( geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, -180, -90, 180) - .type("indexed") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); @@ -239,7 +235,6 @@ public void testCompleteLonRange() throws Exception { searchResponse = client().prepareSearch() .setQuery( geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(50, 0, -50, 360) - .type("indexed") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); searchResponse = client().prepareSearch() @@ -250,7 +245,6 @@ public void testCompleteLonRange() throws Exception { searchResponse = client().prepareSearch() .setQuery( geoBoundingBoxQuery("location").setValidationMethod(GeoValidationMethod.COERCE).setCorners(90, 0, -90, 360) - .type("indexed") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 5a83a881946cb..ddd66e0dc4ef1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.search.profile.aggregation; +import io.github.nik9000.mapmatcher.MapMatcher; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; @@ -32,6 +34,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static io.github.nik9000.mapmatcher.ListMatcher.matchesList; import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -44,10 +47,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.SuiteScopeTestCase @@ -74,6 +73,7 @@ public class AggregationProfilerIT extends ESIntegTestCase { ); private static final String TOTAL_BUCKETS = "total_buckets"; + private static final String BUILT_BUCKETS = "built_buckets"; private static final String DEFERRED = "deferred_aggregators"; private static final String COLLECTION_STRAT = "collection_strategy"; private static final String RESULT_STRAT = "result_strategy"; @@ -143,10 +143,10 @@ public void testSimpleProfile() { assertThat(breakdown.get(COLLECT), greaterThan(0L)); assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); assertThat(breakdown.get(REDUCE), equalTo(0L)); - Map debug = histoAggResult.getDebugInfo(); - assertThat(debug, notNullValue()); - assertThat(debug.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); - assertThat(((Number) debug.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); } } @@ -187,11 +187,10 @@ public void testMultiLevelProfile() { assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - Map histoDebugInfo = histoAggResult.getDebugInfo(); - assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); - assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); assertThat(termsAggResult, notNullValue()); @@ -213,23 +212,33 @@ public void testMultiLevelProfile() { assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = termsAggResult.getTimeBreakdown(); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap( + avgAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + ); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); } } - private void assertRemapTermsDebugInfo(ProfileResult termsAggResult) { - assertThat(termsAggResult.getDebugInfo(), hasEntry(COLLECTION_STRAT, "remap using many bucket ords")); - assertThat(termsAggResult.getDebugInfo(), hasEntry(RESULT_STRAT, "terms")); - assertThat(termsAggResult.getDebugInfo(), hasEntry(HAS_FILTER, false)); - assertThat(termsAggResult.getDebugInfo().toString(), (int) termsAggResult.getDebugInfo().get(SEGMENTS_WITH_SINGLE), greaterThan(0)); + private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... deferredAggregators) { + MapMatcher matcher = matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)) + .entry(BUILT_BUCKETS, greaterThan(0)) + .entry(COLLECTION_STRAT, "remap using many bucket ords") + .entry(RESULT_STRAT, "terms") + .entry(HAS_FILTER, false) + .entry(SEGMENTS_WITH_SINGLE, greaterThan(0)) + .entry(SEGMENTS_WITH_MULTI, 0); + if (deferredAggregators.length > 0) { + matcher = matcher.entry(DEFERRED, List.of(deferredAggregators)); + } + assertMap(termsAggResult.getDebugInfo(), matcher); } public void testMultiLevelProfileBreadthFirst() { @@ -261,10 +270,10 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - Map histoDebugInfo = histoAggResult.getDebugInfo(); - assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); - assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); @@ -279,7 +288,7 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult); + assertRemapTermsDebugInfo(termsAggResult, "avg"); assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); @@ -294,7 +303,10 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap( + avgAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + ); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -330,8 +342,7 @@ public void testDiversifiedAggProfile() { assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); - assertThat(diversifyAggResult.getDebugInfo(), equalTo(Map.of(DEFERRED, List.of("max")))); - assertThat(diversifyAggResult.getProfiledChildren().size(), equalTo(1)); + assertMap(diversifyAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max"))); ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); assertThat(maxAggResult, notNullValue()); @@ -347,7 +358,7 @@ public void testDiversifiedAggProfile() { assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -391,10 +402,10 @@ public void testComplexProfile() { assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - Map histoDebugInfo = histoAggResult.getDebugInfo(); - assertThat(histoDebugInfo, notNullValue()); - assertThat(histoDebugInfo.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); - assertThat(((Number) histoDebugInfo.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren().stream() @@ -432,7 +443,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); @@ -448,7 +459,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); @@ -483,7 +494,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); maxAggResult = stringsAggResultSubAggregations.get("max"); @@ -499,7 +510,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); tagsAggResult = stringsAggResultSubAggregations.get("tags"); @@ -535,7 +546,7 @@ public void testComplexProfile() { assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertThat(avgAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); maxAggResult = tagsAggResultSubAggregations.get("max"); @@ -551,7 +562,7 @@ public void testComplexProfile() { assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertThat(maxAggResult.getDebugInfo(), equalTo(Map.of())); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); } } @@ -623,30 +634,32 @@ public void testFilterByFilter() throws InterruptedException, IOException { assertThat(breakdown.get(COLLECT), equalTo(0L)); assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); assertThat(breakdown.get(REDUCE), equalTo(0L)); - Map debug = histoAggResult.getDebugInfo(); - assertThat(debug, notNullValue()); - assertThat(debug.keySet(), equalTo(Set.of("delegate", "delegate_debug"))); - assertThat(debug.get("delegate"), equalTo("RangeAggregator.FromFilters")); - Map delegate = (Map) debug.get("delegate_debug"); - assertThat(delegate.keySet(), equalTo(Set.of("average_docs_per_range", "ranges", "delegate", "delegate_debug"))); - assertThat( - ((Number) delegate.get("average_docs_per_range")).doubleValue(), - equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2) + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + .entry("delegate", "RangeAggregator.FromFilters") + .entry( + "delegate_debug", + matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) + .entry("ranges", 1) + .entry("delegate", "FilterByFilterAggregator") + .entry( + "delegate_debug", + matchesMap().entry("segments_with_deleted_docs", 0) + .entry("segments_with_doc_count_field", 0) + .entry("segments_counted", 0) + .entry("segments_collected", greaterThan(0)) + .entry( + "filters", + matchesList().item( + matchesMap().entry("query", "DocValuesFieldExistsQuery [field=date]") + .entry("specialized_for", "docvalues_field_exists") + .entry("results_from_metadata", 0) + ) + ) + ) + ) ); - assertThat(((Number) delegate.get("ranges")).longValue(), equalTo(1L)); - assertThat(delegate.get("delegate"), equalTo("FiltersAggregator.FilterByFilter")); - Map delegateDebug = (Map) delegate.get("delegate_debug"); - assertThat(delegateDebug, hasEntry("segments_with_deleted_docs", 0)); - assertThat(delegateDebug, hasEntry("segments_with_doc_count_field", 0)); - assertThat(delegateDebug, hasEntry("max_cost", (long) RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)); - assertThat(delegateDebug, hasEntry("estimated_cost", (long) RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)); - assertThat((long) delegateDebug.get("estimate_cost_time"), greaterThanOrEqualTo(0L)); // ~1,276,734 nanos is normal - List filtersDebug = (List) delegateDebug.get("filters"); - assertThat(filtersDebug, hasSize(1)); - Map queryDebug = (Map) filtersDebug.get(0); - assertThat(queryDebug, hasKey("scorers_prepared_while_estimating_cost")); - assertThat((int) queryDebug.get("scorers_prepared_while_estimating_cost"), greaterThan(0)); - assertThat(queryDebug, hasEntry("query", "DocValuesFieldExistsQuery [field=date]")); } } @@ -721,6 +734,7 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio assertMap( debug, matchesMap().entry("delegate", "RangeAggregator.NoOverlap") + .entry("built_buckets", 1) .entry("delegate_debug", matchesMap().entry("ranges", 1).entry("average_docs_per_range", 10000.0)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java index b9c72b53801c6..d89fa1abef555 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java @@ -79,7 +79,7 @@ public void testExists() throws Exception { Map barObject = new HashMap<>(); barObject.put("foo", "bar"); barObject.put("bar", singletonMap("bar", "foo")); - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) final Map[] sources = new Map[] { // simple property singletonMap("foo", "bar"), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 4e91091207680..c410e727a8ef5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -143,7 +143,7 @@ private TestContext create(SearchType... searchTypes) throws Exception { } refresh(); - final SortBuilder sort; + final SortBuilder sort; if (randomBoolean()) { if (randomBoolean()) { sort = SortBuilders.fieldSort("field1").missing(1); @@ -175,10 +175,10 @@ class TestContext { final int numDocs; final int scrollRequestSize; - final SortBuilder sort; + final SortBuilder sort; final SearchType searchType; - TestContext(int numDocs, int scrollRequestSize, SortBuilder sort, SearchType searchType) { + TestContext(int numDocs, int scrollRequestSize, SortBuilder sort, SearchType searchType) { this.numDocs = numDocs; this.scrollRequestSize = scrollRequestSize; this.sort = sort; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index fd1514098ef2c..38e39acbc105f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -153,9 +153,9 @@ public void testWithSimpleTypes() throws Exception { for (int i = 0; i < numFields-1; i++) { types[i] = randomInt(6); } - List documents = new ArrayList<>(); + List> documents = new ArrayList<>(); for (int i = 0; i < NUM_DOCS; i++) { - List values = new ArrayList<>(); + List values = new ArrayList<>(); for (int type : types) { switch (type) { case 0: @@ -265,9 +265,9 @@ public void testWithCustomFormatSortValueOfDateField() throws Exception { containsString("failed to parse date field [23/04/2018] with format [epoch_millis]")); } - private static class ListComparator implements Comparator { + private static class ListComparator implements Comparator> { @Override - public int compare(List o1, List o2) { + public int compare(List o1, List o2) { if (o1.size() > o2.size()) { return 1; } @@ -282,6 +282,7 @@ public int compare(List o1, List o2) { } Object cmp1 = o1.get(i); Object cmp2 = o2.get(i); + @SuppressWarnings({"unchecked", "rawtypes"}) int cmp = ((Comparable)cmp1).compareTo(cmp2); if (cmp != 0) { return cmp; @@ -292,7 +293,7 @@ public int compare(List o1, List o2) { } private ListComparator LST_COMPARATOR = new ListComparator(); - private void assertSearchFromWithSortValues(String indexName, List documents, int reqSize) throws Exception { + private void assertSearchFromWithSortValues(String indexName, List> documents, int reqSize) throws Exception { int numFields = documents.get(0).size(); { createIndexMappingsFromObjectType(indexName, documents.get(0)); @@ -324,7 +325,7 @@ private void assertSearchFromWithSortValues(String indexName, List documen } SearchResponse searchResponse = req.get(); for (SearchHit hit : searchResponse.getHits()) { - List toCompare = convertSortValues(documents.get(offset++)); + List toCompare = convertSortValues(documents.get(offset++)); assertThat(LST_COMPARATOR.compare(toCompare, Arrays.asList(hit.getSortValues())), equalTo(0)); } sortValues = searchResponse.getHits().getHits()[searchResponse.getHits().getHits().length-1].getSortValues(); @@ -336,7 +337,7 @@ private void createIndexMappingsFromObjectType(String indexName, List ty List mappings = new ArrayList<> (); int numFields = types.size(); for (int i = 0; i < numFields; i++) { - Class type = types.get(i).getClass(); + Class type = types.get(i).getClass(); if (type == Integer.class) { mappings.add("field" + Integer.toString(i)); mappings.add("type=integer"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 576e2ec0dcb7e..3a6a4c087d13e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -10,17 +10,24 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.ClosePointInTimeAction; +import org.elasticsearch.action.search.ClosePointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeAction; +import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchException; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -159,6 +166,99 @@ public void testWithPreferenceAndRoutings() throws Exception { } } + private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { + int totalResults = 0; + List keys = new ArrayList<>(); + for (int id = 0; id < numSlice; id++) { + SliceBuilder sliceBuilder = new SliceBuilder(field, id, numSlice); + SearchResponse searchResponse = request.slice(sliceBuilder).get(); + totalResults += searchResponse.getHits().getHits().length; + int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; + int numSliceResults = searchResponse.getHits().getHits().length; + String scrollId = searchResponse.getScrollId(); + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + while (searchResponse.getHits().getHits().length > 0) { + searchResponse = client().prepareSearchScroll("test") + .setScrollId(scrollId) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .get(); + scrollId = searchResponse.getScrollId(); + totalResults += searchResponse.getHits().getHits().length; + numSliceResults += searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + } + assertThat(numSliceResults, equalTo(expectedSliceResults)); + clearScroll(scrollId); + } + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet<>(keys).size(), equalTo(numDocs)); + } + + public void testPointInTime() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); + + // Test the default slicing strategy (null), as well as numeric doc values + for (String field : new String[]{null, "random_int", "static_int"}) { + // Open point-in-time reader + OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)); + OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + String pointInTimeId = response.getPointInTimeId(); + + // Test sort on document IDs + assertSearchSlicesWithPointInTime(field, ShardDocSortField.NAME, pointInTimeId, max, numDocs); + // Test numeric sort + assertSearchSlicesWithPointInTime(field, "random_int", pointInTimeId, max, numDocs); + + // Close point-in-time reader + client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pointInTimeId)).actionGet(); + } + } + + private void assertSearchSlicesWithPointInTime(String sliceField, String sortField, String pointInTimeId, int numSlice, int numDocs) { + int totalResults = 0; + List keys = new ArrayList<>(); + for (int id = 0; id < numSlice; id++) { + int numSliceResults = 0; + + SearchRequestBuilder request = client().prepareSearch("test") + .slice(new SliceBuilder(sliceField, id, numSlice)) + .setPointInTime(new PointInTimeBuilder(pointInTimeId)) + .addSort(SortBuilders.fieldSort(sortField)) + .setSize(randomIntBetween(10, 100)); + + SearchResponse searchResponse = request.get(); + int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; + + while (true) { + int numHits = searchResponse.getHits().getHits().length; + if (numHits == 0) { + break; + } + + totalResults += numHits; + numSliceResults += numHits; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + + Object[] sortValues = searchResponse.getHits().getHits()[numHits - 1].getSortValues(); + searchResponse = request.searchAfter(sortValues).get(); + } + assertThat(numSliceResults, equalTo(expectedSliceResults)); + } + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet<>(keys).size(), equalTo(numDocs)); + } + public void testInvalidFields() throws Exception { setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, @@ -193,40 +293,7 @@ public void testInvalidQuery() throws Exception { Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); assertThat(rootCause.getMessage(), - equalTo("`slice` cannot be used outside of a scroll context")); - } - - private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { - int totalResults = 0; - List keys = new ArrayList<>(); - for (int id = 0; id < numSlice; id++) { - SliceBuilder sliceBuilder = new SliceBuilder(field, id, numSlice); - SearchResponse searchResponse = request.slice(sliceBuilder).get(); - totalResults += searchResponse.getHits().getHits().length; - int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; - int numSliceResults = searchResponse.getHits().getHits().length; - String scrollId = searchResponse.getScrollId(); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertTrue(keys.add(hit.getId())); - } - while (searchResponse.getHits().getHits().length > 0) { - searchResponse = client().prepareSearchScroll("test") - .setScrollId(scrollId) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) - .get(); - scrollId = searchResponse.getScrollId(); - totalResults += searchResponse.getHits().getHits().length; - numSliceResults += searchResponse.getHits().getHits().length; - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertTrue(keys.add(hit.getId())); - } - } - assertThat(numSliceResults, equalTo(expectedSliceResults)); - clearScroll(scrollId); - } - assertThat(totalResults, equalTo(numDocs)); - assertThat(keys.size(), equalTo(numDocs)); - assertThat(new HashSet(keys).size(), equalTo(numDocs)); + equalTo("[slice] can only be used with [scroll] or [point-in-time] requests")); } private Throwable findRootCause(Exception e) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 86134d7250b5e..5e1bd24ba3153 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -868,6 +868,146 @@ public void testSortMissingStrings() throws IOException { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); } + public void testSortMissingDates() throws IOException { + for (String type : List.of("date", "date_nanos")) { + String index = "test_" + type; + assertAcked( + prepareCreate(index).setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("mydate") + .field("type", type) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + ensureGreen(); + client().prepareIndex(index).setId("1").setSource("mydate", "2021-01-01").get(); + client().prepareIndex(index).setId("2").setSource("mydate", "2021-02-01").get(); + client().prepareIndex(index).setId("3").setSource("other_field", "value").get(); + + refresh(); + + for (boolean withFormat : List.of(true, false)) { + String format = null; + if (withFormat) { + format = type.equals("date") ? "strict_date_optional_time" : "strict_date_optional_time_nanos"; + } + + SearchResponse searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "1", "2", "3" }); + + searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "3", "1", "2" }); + + searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "2", "1", "3" }); + + searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "3", "2", "1" }); + } + } + } + + /** + * Sort across two indices with both "date" and "date_nanos" type using "numeric_type" set to "date_nanos" + */ + public void testSortMissingDatesMixedTypes() throws IOException { + for (String type : List.of("date", "date_nanos")) { + String index = "test_" + type; + assertAcked( + prepareCreate(index).setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("mydate") + .field("type", type) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + + } + ensureGreen(); + + client().prepareIndex("test_date").setId("1").setSource("mydate", "2021-01-01").get(); + client().prepareIndex("test_date").setId("2").setSource("mydate", "2021-02-01").get(); + client().prepareIndex("test_date").setId("3").setSource("other_field", 1).get(); + client().prepareIndex("test_date_nanos").setId("4").setSource("mydate", "2021-03-01").get(); + client().prepareIndex("test_date_nanos").setId("5").setSource("mydate", "2021-04-01").get(); + client().prepareIndex("test_date_nanos").setId("6").setSource("other_field", 2).get(); + refresh(); + + for (boolean withFormat : List.of(true, false)) { + String format = null; + if (withFormat) { + format = "strict_date_optional_time_nanos"; + } + + String index = "test*"; + SearchResponse searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos")) + .addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "1", "2", "4", "5", "3", "6" }); + + searchResponse = client().prepareSearch(index) + .addSort( + SortBuilders.fieldSort("mydate") + .order(SortOrder.ASC) + .missing("_first") + .setFormat(format) + .setNumericType("date_nanos") + ) + .addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "3", "6", "1", "2", "4", "5" }); + + searchResponse = client().prepareSearch(index) + .addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos")) + .addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "5", "4", "2", "1", "3", "6" }); + + searchResponse = client().prepareSearch(index) + .addSort( + SortBuilders.fieldSort("mydate") + .order(SortOrder.DESC) + .missing("_first") + .setFormat(format) + .setNumericType("date_nanos") + ) + .addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)) + .get(); + assertHitsInOrder(searchResponse, new String[] { "3", "6", "5", "4", "2", "1" }); + } + } + + private void assertHitsInOrder(SearchResponse response, String[] expectedIds) { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(expectedIds.length, hits.length); + int i = 0; + for (String id : expectedIds) { + assertEquals(id, hits[i].getId()); + i++; + } + } + public void testIgnoreUnmapped() throws Exception { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java new file mode 100644 index 0000000000000..ff0d8d274d250 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.stats; + +import org.elasticsearch.action.admin.indices.stats.FieldUsageShardResponse; +import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction; +import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.search.stats.FieldUsageStats; +import org.elasticsearch.index.search.stats.FieldUsageStats.UsageContext; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.test.ESIntegTestCase; + +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class FieldUsageStatsIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put("search.aggs.rewrite_to_filter_by_filter", false) + .build(); + } + + private FieldUsageStats aggregated(List stats) { + assertFalse(stats.isEmpty()); + return stats.stream().map(FieldUsageShardResponse::getStats).reduce(FieldUsageStats::add).get(); + } + + public void testFieldUsageStats() throws ExecutionException, InterruptedException { + internalCluster().ensureAtLeastNumDataNodes(2); + int numShards = randomIntBetween(1, 2); + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numShards) + .put(SETTING_NUMBER_OF_REPLICAS, 1))); + + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT); + LocalDate date = LocalDate.of(2015, 9, 1); + + for (int i = 0; i < 30; i++) { + client().prepareIndex("test").setId(Integer.toString(i)).setSource( + "field", "value", "field2", "value2", "date_field", formatter.format(date.plusDays(i))).get(); + } + client().admin().indices().prepareRefresh("test").get(); + + ensureGreen("test"); + + FieldUsageStats stats = + aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); + + assertFalse(stats.hasField("field")); + assertFalse(stats.hasField("field.keyword")); + assertFalse(stats.hasField("field2")); + assertFalse(stats.hasField("date_field")); + + SearchResponse searchResponse = client().prepareSearch() + .setSearchType(SearchType.DEFAULT) + .setQuery(QueryBuilders.termQuery("field", "value")) + .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) + .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) + .setSize(between(5, 100)) + .setPreference("fixed") + .get(); + + assertHitCount(searchResponse, 30); + assertAllSuccessful(searchResponse); + + stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); + logger.info("Stats after first query: {}", stats); + + assertTrue(stats.hasField("_id")); + assertEquals(Set.of(UsageContext.STORED_FIELDS), stats.get("_id").keySet()); + assertTrue(stats.hasField("_source")); + assertEquals(Set.of(UsageContext.STORED_FIELDS), stats.get("_source").keySet()); + + assertTrue(stats.hasField("field")); + // we sort by _score + assertEquals(Set.of(UsageContext.TERMS, UsageContext.POSTINGS, UsageContext.FREQS, UsageContext.NORMS), + stats.get("field").keySet()); + assertEquals(1L * numShards, stats.get("field").getTerms()); + + assertTrue(stats.hasField("field2")); + // positions because of span query + assertEquals(Set.of(UsageContext.TERMS, UsageContext.POSTINGS, UsageContext.FREQS, UsageContext.POSITIONS), + stats.get("field2").keySet()); + assertEquals(1L * numShards, stats.get("field2").getTerms()); + + assertTrue(stats.hasField("field.keyword")); + // terms agg does not use search as we've set search.aggs.rewrite_to_filter_by_filter to false + assertEquals(Set.of(UsageContext.DOC_VALUES), stats.get("field.keyword").keySet()); + assertEquals(1L * numShards, stats.get("field.keyword").getDocValues()); + + client().prepareSearch() + .setSearchType(SearchType.DEFAULT) + .setQuery(QueryBuilders.termQuery("field", "value")) + .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) + .setSize(0) + .setPreference("fixed") + .get(); + + stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); + logger.info("Stats after second query: {}", stats); + + assertEquals(2L * numShards, stats.get("field").getTerms()); + assertEquals(1L * numShards, stats.get("field2").getTerms()); + assertEquals(2L * numShards, stats.get("field.keyword").getDocValues()); + + assertFalse(stats.hasField("date_field")); + + // show that we also track stats in can_match + assertEquals(2L * numShards, client().admin().indices().prepareStats("test").clear().setSearch(true).get() + .getIndex("test").getTotal().getSearch().getTotal().getQueryCount()); + client().prepareSearch() + .setSearchType(SearchType.DEFAULT) + .setPreFilterShardSize(1) + .setQuery(QueryBuilders.rangeQuery("date_field").from("2016/01/01")) + .setSize(100) + .setPreference("fixed") + .get(); + + stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); + logger.info("Stats after third query: {}", stats); + + assertTrue(stats.hasField("date_field")); + assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet()); + // can_match does not enter search stats + // there is a special case though where we have no hit but we need to get at least one search response in order + // to produce a valid search result with all the aggs etc., so we hit one of the two shards + assertEquals((2 * numShards) + 1, client().admin().indices().prepareStats("test").clear().setSearch(true).get() + .getIndex("test").getTotal().getSearch().getTotal().getQueryCount()); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index fe8fca92977f6..82382b6782024 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -237,7 +237,7 @@ public void testOpenContexts() { protected int numAssignedShards(String... indices) { ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); + GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index f5b46919a4d06..35b6f767941cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -599,7 +599,7 @@ public void testSkipDuplicatesWithContexts() throws Exception { assertSuggestions("suggestions", completionSuggestionBuilder, expectedModulo); } - public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { + public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { SearchResponse searchResponse = client().prepareSearch(INDEX).suggest( new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index d994220b1d462..3ccb6d20d66f6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -28,6 +29,7 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryShardId; +import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.mockstore.MockRepository; @@ -74,7 +76,7 @@ public void testShardClone() throws Exception { final SnapshotId targetSnapshotId = new SnapshotId("target-snapshot", UUIDs.randomBase64UUID(random())); - final String currentShardGen; + final ShardGeneration currentShardGen; if (useBwCFormat) { currentShardGen = null; } else { @@ -83,11 +85,10 @@ public void testShardClone() throws Exception { final ShardSnapshotResult shardSnapshotResult = PlainActionFuture.get( f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, currentShardGen, f) ); - final String newShardGeneration = shardSnapshotResult.getGeneration(); + final ShardGeneration newShardGeneration = shardSnapshotResult.getGeneration(); if (useBwCFormat) { - final long gen = Long.parseLong(newShardGeneration); - assertEquals(gen, 1L); // Initial snapshot brought it to 0, clone increments it to 1 + assertEquals(newShardGeneration, new ShardGeneration(1L)); // Initial snapshot brought it to 0, clone increments it to 1 } final BlobStoreIndexShardSnapshot targetShardSnapshot = readShardSnapshot(repository, repositoryShardId, targetSnapshotId); @@ -495,6 +496,75 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { ); } + public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + + final String sourceSnapshot = "source-snapshot"; + blockDataNode(repoName, dataNode); + final Client masterClient = internalCluster().masterClient(); + final ActionFuture sourceSnapshotFuture = masterClient.admin() + .cluster() + .prepareCreateSnapshot(repoName, sourceSnapshot) + .setWaitForCompletion(true) + .execute(); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(dataNode, repoName); + internalCluster().restartNode(dataNode); + ensureGreen(); + assertThat(sourceSnapshotFuture.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); + final String sourceSnapshotHealthy = "source-snapshot-healthy"; + createFullSnapshot(repoName, "source-snapshot-healthy"); + + final ActionFuture sn1 = startFullSnapshot(repoName, "concurrent-snapshot-1"); + final ActionFuture clone1 = startClone( + masterClient, + repoName, + sourceSnapshotHealthy, + "target-snapshot-1", + testIndex + ); + final ActionFuture sn2 = startFullSnapshot(repoName, "concurrent-snapshot-2"); + final ActionFuture clone2 = startClone( + masterClient, + repoName, + sourceSnapshotHealthy, + "target-snapshot-2", + testIndex + ); + final ActionFuture sn3 = startFullSnapshot(repoName, "concurrent-snapshot-3"); + final ActionFuture clone3 = startClone( + masterClient, + repoName, + sourceSnapshotHealthy, + "target-snapshot-3", + testIndex + ); + final SnapshotException sne = expectThrows( + SnapshotException.class, + () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( + TimeValue.timeValueSeconds(30L) + ) + ); + assertThat( + sne.getMessage(), + containsString( + "Can't clone index [" + getRepositoryData(repoName).resolveIndexId(testIndex) + "] because its snapshot was not successful." + ) + ); + + assertSuccessful(sn1); + assertSuccessful(sn2); + assertSuccessful(sn3); + assertAcked(clone1.get()); + assertAcked(clone2.get()); + assertAcked(clone3.get()); + } + public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception { final String masterName = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String dataNode = internalCluster().startDataOnlyNode(); @@ -578,7 +648,7 @@ public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throw awaitClusterState(clusterState -> { final List entries = clusterState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY) .entries(); - return entries.size() == 2 && entries.get(1).clones().isEmpty() == false; + return entries.size() == 2 && entries.get(1).shardsByRepoShardId().isEmpty() == false; }); assertFalse(blockedSnapshot.isDone()); } finally { @@ -615,9 +685,9 @@ public void testStartCloneDuringRunningDelete() throws Exception { logger.info("--> waiting for snapshot clone to be fully initialized"); awaitClusterState(state -> { for (SnapshotsInProgress.Entry entry : state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries()) { - if (entry.clones().isEmpty() == false) { + if (entry.shardsByRepoShardId().isEmpty() == false) { assertEquals(sourceSnapshot, entry.source().getName()); - for (ObjectCursor value : entry.clones().values()) { + for (ObjectCursor value : entry.shardsByRepoShardId().values()) { assertSame(value.value, SnapshotsInProgress.ShardSnapshotStatus.UNASSIGNED_QUEUED); } return true; @@ -667,6 +737,66 @@ public void testManyConcurrentClonesStartOutOfOrder() throws Exception { assertAcked(clone2.get()); } + public void testRemoveFailedCloneFromCSWithoutIO() throws Exception { + final String masterNode = internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repoName, sourceSnapshot); + + final String targetSnapshot = "target-snapshot"; + blockAndFailMasterOnShardClone(repoName); + final ActionFuture cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, testIndex); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(masterNode, repoName); + unblockNode(repoName, masterNode); + expectThrows(SnapshotException.class, cloneFuture::actionGet); + awaitNoMoreRunningOperations(); + assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); + assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); + } + + public void testRemoveFailedCloneFromCSWithQueuedSnapshotInProgress() throws Exception { + // single threaded master snapshot pool so we can selectively fail part of a clone by letting it run shard by shard + final String masterNode = internalCluster().startMasterOnlyNode( + Settings.builder().put("thread_pool.snapshot.core", 1).put("thread_pool.snapshot.max", 1).build() + ); + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + final String testIndex = "index-test"; + final String testIndex2 = "index-test-2"; + createIndexWithContent(testIndex); + createIndexWithContent(testIndex2); + + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repoName, sourceSnapshot); + + final String targetSnapshot = "target-snapshot"; + blockAndFailMasterOnShardClone(repoName); + + createIndexWithContent("test-index-3"); + blockDataNode(repoName, dataNode); + final ActionFuture fullSnapshotFuture1 = startFullSnapshot(repoName, "full-snapshot-1"); + waitForBlock(dataNode, repoName); + final ActionFuture cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, testIndex, testIndex2); + awaitNumberOfSnapshotsInProgress(2); + waitForBlock(masterNode, repoName); + unblockNode(repoName, masterNode); + final ActionFuture fullSnapshotFuture2 = startFullSnapshot(repoName, "full-snapshot-2"); + expectThrows(SnapshotException.class, cloneFuture::actionGet); + unblockNode(repoName, dataNode); + awaitNoMoreRunningOperations(); + assertSuccessful(fullSnapshotFuture1); + assertSuccessful(fullSnapshotFuture2); + assertAllSnapshotsSuccessful(getRepositoryData(repoName), 3); + assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); + } + private ActionFuture startCloneFromDataNode( String repoName, String sourceSnapshot, @@ -703,6 +833,10 @@ private void blockMasterOnShardClone(String repoName) { AbstractSnapshotIntegTestCase.getRepositoryOnMaster(repoName).setBlockOnWriteShardLevelMeta(); } + private void blockAndFailMasterOnShardClone(String repoName) { + AbstractSnapshotIntegTestCase.getRepositoryOnMaster(repoName).setBlockAndFailOnWriteShardLevelMeta(); + } + /** * Assert that given {@link RepositoryData} contains exactly the given number of snapshots and all of them are successful. */ @@ -717,7 +851,7 @@ private static void assertAllSnapshotsSuccessful(RepositoryData repositoryData, private static BlobStoreIndexShardSnapshots readShardGeneration( BlobStoreRepository repository, RepositoryShardId repositoryShardId, - String generation + ShardGeneration generation ) { return PlainActionFuture.get( f -> repository.threadPool() @@ -726,8 +860,9 @@ private static BlobStoreIndexShardSnapshots readShardGeneration( ActionRunnable.supply( f, () -> BlobStoreRepository.INDEX_SHARD_SNAPSHOTS_FORMAT.read( + repository.getMetadata().name(), repository.shardContainer(repositoryShardId.index(), repositoryShardId.shardId()), - generation, + generation.toBlobNamePart(), NamedXContentRegistry.EMPTY ) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 53c36fe19f323..48231cd10c92d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -10,11 +10,13 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.support.GroupedActionListener; @@ -28,6 +30,7 @@ import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.discovery.AbstractDisruptionTestCase; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.ShardGenerations; @@ -45,6 +48,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -54,10 +59,12 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -343,7 +350,7 @@ public void testAbortOneOfMultipleSnapshots() throws Exception { logger.info("--> verify that the first snapshot is gone"); assertThat( - client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName), + client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), containsInAnyOrder(secondSnapshotInfo, thirdSnapshotInfo) ); } @@ -407,7 +414,7 @@ public void testCascadedAborts() throws Exception { assertAcked(allDeletedResponse.get()); logger.info("--> verify that all snapshots are gone"); - assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName), empty()); + assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); } public void testMasterFailOverWithQueuedDeletes() throws Exception { @@ -493,7 +500,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> verify that all snapshots are gone and no more work is left in the cluster state"); assertBusy(() -> { - assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName), empty()); + assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); final ClusterState state = clusterService().state(); final SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE); assertThat(snapshotsInProgress.entries(), empty()); @@ -557,7 +564,7 @@ public void testQueuedDeletesWithFailures() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName), empty()); + assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); } public void testQueuedDeletesWithOverlap() throws Exception { @@ -584,7 +591,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(repoName), empty()); + assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); } public void testQueuedOperationsOnMasterRestart() throws Exception { @@ -1366,6 +1373,376 @@ public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exc assertSuccessful(otherSnapshot); } + public void testConcurrentRestoreDeleteAndClone() throws Exception { + final String repository = "test-repo"; + createRepository(logger, repository, "fs"); + + final int nbIndices = randomIntBetween(10, 20); + + for (int i = 0; i < nbIndices; i++) { + final String index = "index-" + i; + createIndexWithContent(index); + final String snapshot = "snapshot-" + i; + createSnapshot(repository, snapshot, List.of(index)); + } + + final List> cloneFutures = new ArrayList<>(); + final List> restoreFutures = new ArrayList<>(); + + for (int i = 0; i < nbIndices; i++) { + if (randomBoolean()) { + restoreFutures.add( + client().admin() + .cluster() + .prepareRestoreSnapshot(repository, "snapshot-" + i) + .setIndices("index-" + i) + .setRenamePattern("(.+)") + .setRenameReplacement("$1-restored-" + i) + .setWaitForCompletion(true) + .execute() + ); + } else { + cloneFutures.add( + client().admin() + .cluster() + .prepareCloneSnapshot(repository, "snapshot-" + i, "clone-" + i) + .setIndices("index-" + i) + .execute() + ); + } + } + + // make deletes and clones complete concurrently + final List> deleteFutures = new ArrayList<>(nbIndices); + for (int i = 0; i < nbIndices; i++) { + deleteFutures.add(startDeleteSnapshot(repository, "snapshot-" + i)); + } + + for (ActionFuture operation : restoreFutures) { + try { + final RestoreInfo restoreResponse = operation.get().getRestoreInfo(); + assertThat(restoreResponse.successfulShards(), greaterThanOrEqualTo(1)); + assertEquals(0, restoreResponse.failedShards()); + } catch (ExecutionException e) { + final Throwable csee = ExceptionsHelper.unwrap(e, ConcurrentSnapshotExecutionException.class); + assertThat(csee, instanceOf(ConcurrentSnapshotExecutionException.class)); + } + } + for (ActionFuture operation : cloneFutures) { + try { + assertAcked(operation.get()); + } catch (ExecutionException e) { + final Throwable csee = ExceptionsHelper.unwrap(e, SnapshotException.class); + assertThat( + csee, + either(instanceOf(ConcurrentSnapshotExecutionException.class)).or(instanceOf(SnapshotMissingException.class)) + ); + } + } + for (ActionFuture operation : deleteFutures) { + try { + assertAcked(operation.get()); + } catch (ExecutionException e) { + final Throwable csee = ExceptionsHelper.unwrap(e, ConcurrentSnapshotExecutionException.class); + assertThat(csee, instanceOf(ConcurrentSnapshotExecutionException.class)); + } + } + awaitNoMoreRunningOperations(); + } + + public void testOutOfOrderFinalization() throws Exception { + internalCluster().startMasterOnlyNode(); + final List dataNodes = internalCluster().startDataOnlyNodes(2); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1, dataNodes.get(0), dataNodes.get(1)); + createIndexWithContent(index2, dataNodes.get(1), dataNodes.get(0)); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + blockNodeWithIndex(repository, index2); + + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .execute(); + awaitNumberOfSnapshotsInProgress(1); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index1) + .setWaitForCompletion(true) + .execute(); + assertSuccessful(snapshot2); + unblockAllDataNodes(repository); + final SnapshotInfo sn1 = assertSuccessful(snapshot1); + + assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + } + + public void testOutOfOrderAndConcurrentFinalization() throws Exception { + final String master = internalCluster().startMasterOnlyNode(); + final List dataNodes = internalCluster().startDataOnlyNodes(2); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1, dataNodes.get(0), dataNodes.get(1)); + createIndexWithContent(index2, dataNodes.get(1), dataNodes.get(0)); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + blockNodeWithIndex(repository, index2); + + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .execute(); + awaitNumberOfSnapshotsInProgress(1); + + blockMasterOnWriteIndexFile(repository); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index1) + .setWaitForCompletion(true) + .execute(); + + awaitClusterState(state -> { + final SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE); + return snapshotsInProgress.entries().size() == 2 && snapshotsInProgress.entries().get(1).state().completed(); + }); + + unblockAllDataNodes(repository); + awaitClusterState(state -> state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().get(0).state().completed()); + + unblockNode(repository, master); + assertSuccessful(snapshot2); + + final SnapshotInfo sn1 = assertSuccessful(snapshot1); + assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + } + + public void testOutOfOrderFinalizationWithConcurrentClone() throws Exception { + internalCluster().startMasterOnlyNode(); + final List dataNodes = internalCluster().startDataOnlyNodes(2); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1, dataNodes.get(0), dataNodes.get(1)); + createIndexWithContent(index2, dataNodes.get(1), dataNodes.get(0)); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repository, sourceSnapshot); + indexDoc(index2, "doc_id", "foo", "bar"); + + blockNodeWithIndex(repository, index2); + + final String sn1 = "snapshot-1"; + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, sn1) + .setIndices(index1, index2) + .setWaitForCompletion(true) + .execute(); + awaitNumberOfSnapshotsInProgress(1); + + final String targetSnapshot = "target-snapshot"; + final ActionFuture clone = clusterAdmin().prepareCloneSnapshot(repository, sourceSnapshot, targetSnapshot) + .setIndices(index1) + .execute(); + assertAcked(clone.get()); + + unblockAllDataNodes(repository); + assertSuccessful(snapshot1); + + logger.info("--> deleting snapshots [{},{}] from repo [{}]", sn1, sourceSnapshot, repository); + assertAcked(clusterAdmin().prepareDeleteSnapshot(repository).setSnapshots(sn1, sourceSnapshot).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots(targetSnapshot).setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + } + + public void testOutOfOrderCloneFinalization() throws Exception { + final String master = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startDataOnlyNode(); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1); + createIndexWithContent(index2); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repository, sourceSnapshot); + + final IndexId index1Id = getRepositoryData(repository).resolveIndexId(index1); + blockMasterOnShardLevelSnapshotFile(repository, index1Id.getId()); + + final String cloneTarget = "target-snapshot"; + final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + repository, + sourceSnapshot, + cloneTarget + ).setIndices(index1, index2).execute(); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(master, repository); + + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index2) + .setWaitForCompletion(true) + .execute(); + assertSuccessful(snapshot2); + + unblockNode(repository, master); + assertAcked(cloneSnapshot.get()); + assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + } + + public void testCorrectlyFinalizeOutOfOrderPartialFailures() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNode1 = internalCluster().startDataOnlyNode(); + final String dataNode2 = internalCluster().startDataOnlyNode(); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1, dataNode1, dataNode2); + createIndexWithContent(index2, dataNode2, dataNode1); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + createFullSnapshot(repository, "snapshot-1"); + index(index1, "some_doc", Map.of("foo", "bar")); + index(index2, "some_doc", Map.of("foo", "bar")); + blockAndFailDataNode(repository, dataNode1); + blockDataNode(repository, dataNode2); + final ActionFuture snapshotBlocked = startFullSnapshot(repository, "snapshot-2"); + waitForBlock(dataNode1, repository); + waitForBlock(dataNode2, repository); + + unblockNode(repository, dataNode1); + assertAcked(clusterAdmin().prepareCloneSnapshot(repository, "snapshot-1", "target-1").setIndices(index1).get()); + unblockNode(repository, dataNode2); + snapshotBlocked.get(); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("target-1").setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + + createFullSnapshot(repository, "snapshot-3"); + } + + public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { + final String master = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startDataOnlyNode(); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1); + createIndexWithContent(index2); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repository, sourceSnapshot); + + final IndexId index1Id = getRepositoryData(repository).resolveIndexId(index1); + blockMasterOnShardLevelSnapshotFile(repository, index1Id.getId()); + + final String cloneTarget = "target-snapshot"; + final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + repository, + sourceSnapshot, + cloneTarget + ).setIndices(index1, index2).execute(); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(master, repository); + + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .setPartial(true) + .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index2) + .setWaitForCompletion(true) + .execute(); + assertSuccessful(snapshot2); + awaitNumberOfSnapshotsInProgress(2); + assertFalse(snapshot3.isDone()); + assertAcked(admin().indices().prepareDelete(index1).get()); + assertSuccessful(snapshot3); + unblockNode(repository, master); + + assertAcked(cloneSnapshot.get()); + assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + hasSize(2) + ); + } + + public void testQueuedAfterFailedShardSnapshot() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + final String indexName = "test-idx"; + createIndexWithContent(indexName); + final String fullSnapshot = "full-snapshot"; + createFullSnapshot(repository, fullSnapshot); + + indexDoc(indexName, "some_id", "foo", "bar"); + blockAndFailDataNode(repository, dataNode); + final ActionFuture snapshotFutureFailure = startFullSnapshot(repository, "failing-snapshot"); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(dataNode, repository); + final ActionFuture snapshotFutureSuccess = startFullSnapshot(repository, "successful-snapshot"); + awaitNumberOfSnapshotsInProgress(2); + unblockNode(repository, dataNode); + + assertSuccessful(snapshotFutureSuccess); + final SnapshotInfo failedSnapshot = snapshotFutureFailure.get().getSnapshotInfo(); + assertEquals(SnapshotState.PARTIAL, failedSnapshot.state()); + + final SnapshotsStatusResponse snapshotsStatusResponse1 = clusterAdmin().prepareSnapshotStatus(repository) + .setSnapshots(fullSnapshot) + .get(); + + final String tmpSnapshot = "snapshot-tmp"; + createFullSnapshot(repository, tmpSnapshot); + assertAcked(startDeleteSnapshot(repository, tmpSnapshot).get()); + + final SnapshotsStatusResponse snapshotsStatusResponse2 = clusterAdmin().prepareSnapshotStatus(repository) + .setSnapshots(fullSnapshot) + .get(); + assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse2); + + assertAcked(startDeleteSnapshot(repository, "successful-snapshot").get()); + + final SnapshotsStatusResponse snapshotsStatusResponse3 = clusterAdmin().prepareSnapshotStatus(repository) + .setSnapshots(fullSnapshot) + .get(); + assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse3); + } + private static void assertSnapshotStatusCountOnRepo(String otherBlockedRepoName, int count) { final SnapshotsStatusResponse snapshotsStatusResponse = client().admin() .cluster() @@ -1435,7 +1812,7 @@ private static List currentSnapshots(String repoName) { .prepareGetSnapshots(repoName) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) .get() - .getSnapshots(repoName); + .getSnapshots(); } private ActionFuture startAndBlockOnDeleteSnapshot(String repoName, String snapshotName) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index c3fa3a18906a6..219b72516493f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -132,7 +133,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { logger.info("--> make sure snapshot doesn't exist"); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get().getSnapshots(repoName) + () -> client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get() ); } @@ -209,7 +210,7 @@ public void testFindDanglingLatestGeneration() throws Exception { logger.info("--> make sure snapshot doesn't exist"); expectThrows( SnapshotMissingException.class, - () -> client().admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get().getSnapshots(repoName) + () -> client().admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get() ); } @@ -430,7 +431,7 @@ public void testRepairBrokenShardGenerations() throws Exception { snapshotIds, snapshotIds.values().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotDetails)), repositoryData.getIndices().values().stream().collect(Collectors.toMap(Function.identity(), repositoryData::getSnapshots)), - ShardGenerations.builder().putAll(repositoryData.shardGenerations()).put(indexId, 0, "0").build(), + ShardGenerations.builder().putAll(repositoryData.shardGenerations()).put(indexId, 0, new ShardGeneration(0L)).build(), repositoryData.indexMetaDataGenerations(), repositoryData.getClusterUUID() ); @@ -453,6 +454,8 @@ public void testRepairBrokenShardGenerations() throws Exception { * Tests that a shard snapshot with a corrupted shard index file can still be used for restore and incremental snapshots. */ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { + disableRepoConsistencyCheck("This test intentionally corrupts the repository contents"); + final Client client = client(); final Path repo = randomRepoPath(); final String indexName = "test-idx"; @@ -490,7 +493,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { } logger.info("--> verifying snapshot state for [{}]", snapshot1); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshot1)); @@ -571,7 +574,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots("test-repo") + () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() ); for (String index : indices) { @@ -615,7 +618,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { logger.info("--> make sure snapshot doesn't exist"); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots("test-repo") + () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() ); } @@ -663,7 +666,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { logger.info("--> make sure snapshot doesn't exist"); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots("test-repo") + () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots() ); logger.info("--> make sure that we can create the snapshot again"); @@ -712,7 +715,7 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { } } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); @@ -722,10 +725,7 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get().getSnapshots("test-repo") - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get()); assertRequestBuilderThrows( clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), SnapshotMissingException.class diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java index c4720f9176931..01e92850bf191 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataSnapshotIT.java @@ -162,7 +162,7 @@ public TestCustomMetadataPlugin() { private void registerMetadataCustom( String name, Writeable.Reader reader, - Writeable.Reader diffReader, + Writeable.Reader> diffReader, CheckedFunction parser ) { namedWritables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, name, reader)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 8c9306bd9276d..742ce2fa5130e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -11,6 +11,9 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -32,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; @@ -46,6 +50,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; @@ -56,6 +61,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.disruption.BusyMasterServiceDisruption; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.rest.FakeRestRequest; @@ -88,6 +94,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -190,7 +197,7 @@ public void testSnapshotWithStuckNode() throws Exception { logger.info("--> making sure that snapshot no longer exists"); expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots("test-repo") + () -> client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() ); logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); @@ -889,8 +896,8 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); - assertEquals(1, snapshotsStatusResponse.getSnapshots("test-repo").size()); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + assertEquals(1, snapshotsStatusResponse.getSnapshots().size()); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); assertTrue(snapshotInfo.state().toString(), snapshotInfo.state().completed()); }, 60L, TimeUnit.SECONDS); } @@ -943,8 +950,8 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); - assertEquals(1, snapshotsStatusResponse.getSnapshots("test-repo").size()); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + assertEquals(1, snapshotsStatusResponse.getSnapshots().size()); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); assertTrue(snapshotInfo.state().toString(), snapshotInfo.state().completed()); assertThat(snapshotInfo.totalShards(), is(2)); assertThat(snapshotInfo.shardFailures(), hasSize(2)); @@ -1221,6 +1228,77 @@ public void testGetReposWithWildcard() { assertThat(repositoryMetadata, empty()); } + public void testConcurrentSnapshotAndRepoDelete() throws Exception { + internalCluster().startMasterOnlyNodes(1); + internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "fs"); + + // create a few snapshots so deletes will run for a while + final int snapshotCount = randomIntBetween(10, 25); + final List snapshotNames = createNSnapshots(repoName, snapshotCount); + + // concurrently trigger repository and snapshot deletes + final List> deleteFutures = new ArrayList<>(snapshotCount); + final ActionFuture deleteRepoFuture = clusterAdmin().prepareDeleteRepository(repoName).execute(); + for (String snapshotName : snapshotNames) { + deleteFutures.add(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute()); + } + + try { + assertAcked(deleteRepoFuture.actionGet()); + } catch (Exception e) { + assertThat( + e.getMessage(), + containsString( + "trying to modify or unregister repository [test-repo] that is currently used (snapshot deletion is in progress)" + ) + ); + } + for (ActionFuture deleteFuture : deleteFutures) { + try { + assertAcked(deleteFuture.actionGet()); + } catch (RepositoryException e) { + assertThat( + e.getMessage(), + either(containsString("[test-repo] repository is not in started state")).or(containsString("[test-repo] missing")) + ); + } + } + } + + public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessException { + internalCluster().startMasterOnlyNodes(1); + internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "fs"); + + final MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", BlobStoreRepository.class.getCanonicalName(), Level.WARN, "*") + ); + mockAppender.start(); + final Logger logger = LogManager.getLogger(BlobStoreRepository.class); + Loggers.addAppender(logger, mockAppender); + try { + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent("index-1"); + createIndexWithContent("index-2"); + createFullSnapshot(repoName, "full-snapshot"); + final String snapshot1 = "index-1-snapshot"; + final String snapshot2 = "index-2-snapshot"; + createSnapshot(repoName, snapshot1, List.of(index1)); + createSnapshot(repoName, snapshot2, List.of(index2)); + + clusterAdmin().prepareDeleteSnapshot(repoName, snapshot1, snapshot2).get(); + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(logger, mockAppender); + mockAppender.stop(); + } + } + private long calculateTotalFilesSize(List files) { return files.stream().mapToLong(f -> { try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 2c12224472a73..06a00a6c7162c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.threadpool.ThreadPool; @@ -55,7 +56,7 @@ public void testSortBy() throws Exception { } private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) { - final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(repoName); + final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); assertSnapshotListSorted( allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.NAME, order), @@ -95,23 +96,28 @@ public void testResponseSizeLimit() throws Exception { private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) { final List allSnapshotsSorted = allSnapshotsSorted(names, repoName, sort, order); - final List batch1 = sortedWithLimit(repoName, sort, null, 2, order); - assertEquals(batch1, allSnapshotsSorted.subList(0, 2)); - final List batch2 = sortedWithLimit(repoName, sort, batch1.get(1), 2, order); - assertEquals(batch2, allSnapshotsSorted.subList(2, 4)); - final int lastBatch = names.size() - batch1.size() - batch2.size(); - final List batch3 = sortedWithLimit(repoName, sort, batch2.get(1), lastBatch, order); - assertEquals(batch3, allSnapshotsSorted.subList(batch1.size() + batch2.size(), names.size())); - final List batch3NoLimit = sortedWithLimit(repoName, sort, batch2.get(1), GetSnapshotsRequest.NO_LIMIT, order); - assertEquals(batch3, batch3NoLimit); - final List batch3LargeLimit = sortedWithLimit( + final GetSnapshotsResponse batch1 = sortedWithLimit(repoName, sort, null, 2, order); + assertEquals(allSnapshotsSorted.subList(0, 2), batch1.getSnapshots()); + final GetSnapshotsResponse batch2 = sortedWithLimit(repoName, sort, batch1.next(), 2, order); + assertEquals(allSnapshotsSorted.subList(2, 4), batch2.getSnapshots()); + final int lastBatch = names.size() - batch1.getSnapshots().size() - batch2.getSnapshots().size(); + final GetSnapshotsResponse batch3 = sortedWithLimit(repoName, sort, batch2.next(), lastBatch, order); + assertEquals( + batch3.getSnapshots(), + allSnapshotsSorted.subList(batch1.getSnapshots().size() + batch2.getSnapshots().size(), names.size()) + ); + final GetSnapshotsResponse batch3NoLimit = sortedWithLimit(repoName, sort, batch2.next(), GetSnapshotsRequest.NO_LIMIT, order); + assertNull(batch3NoLimit.next()); + assertEquals(batch3.getSnapshots(), batch3NoLimit.getSnapshots()); + final GetSnapshotsResponse batch3LargeLimit = sortedWithLimit( repoName, sort, - batch2.get(1), + batch2.next(), lastBatch + randomIntBetween(1, 100), order ); - assertEquals(batch3, batch3LargeLimit); + assertEquals(batch3.getSnapshots(), batch3LargeLimit.getSnapshots()); + assertNull(batch3LargeLimit.next()); } public void testSortAndPaginateWithInProgress() throws Exception { @@ -177,15 +183,29 @@ private static void assertStablePagination(String repoName, Collection a final List allSorted = allSnapshotsSorted(allSnapshotNames, repoName, sort, order); for (int i = 1; i <= allSnapshotNames.size(); i++) { - final List subsetSorted = sortedWithLimit(repoName, sort, null, i, order); - assertEquals(subsetSorted, allSorted.subList(0, i)); + final GetSnapshotsResponse subsetSorted = sortedWithLimit(repoName, sort, null, i, order); + assertEquals(allSorted.subList(0, i), subsetSorted.getSnapshots()); } for (int j = 0; j < allSnapshotNames.size(); j++) { final SnapshotInfo after = allSorted.get(j); for (int i = 1; i < allSnapshotNames.size() - j; i++) { - final List subsetSorted = sortedWithLimit(repoName, sort, after, i, order); + final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( + repoName, + sort, + GetSnapshotsRequest.After.from(after, sort).asQueryParam(), + i, + order + ); + final GetSnapshotsResponse getSnapshotsResponseNumeric = sortedWithLimit(repoName, sort, j + 1, i, order); + final List subsetSorted = getSnapshotsResponse.getSnapshots(); + assertEquals(subsetSorted, getSnapshotsResponseNumeric.getSnapshots()); + assertEquals(subsetSorted, allSorted.subList(j + 1, j + i + 1)); + assertEquals(allSnapshotNames.size(), getSnapshotsResponse.totalCount()); + assertEquals(allSnapshotNames.size() - (j + i + 1), getSnapshotsResponse.remaining()); assertEquals(subsetSorted, allSorted.subList(j + 1, j + i + 1)); + assertEquals(getSnapshotsResponseNumeric.totalCount(), getSnapshotsResponse.totalCount()); + assertEquals(getSnapshotsResponseNumeric.remaining(), getSnapshotsResponse.remaining()); } } } @@ -196,22 +216,35 @@ private static List allSnapshotsSorted( GetSnapshotsRequest.SortBy sortBy, SortOrder order ) { - final List snapshotInfos = sortedWithLimit(repoName, sortBy, null, GetSnapshotsRequest.NO_LIMIT, order); + final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit(repoName, sortBy, null, GetSnapshotsRequest.NO_LIMIT, order); + final List snapshotInfos = getSnapshotsResponse.getSnapshots(); assertEquals(snapshotInfos.size(), allSnapshotNames.size()); + assertEquals(getSnapshotsResponse.totalCount(), allSnapshotNames.size()); + assertEquals(0, getSnapshotsResponse.remaining()); for (SnapshotInfo snapshotInfo : snapshotInfos) { assertThat(snapshotInfo.snapshotId().getName(), is(in(allSnapshotNames))); } return snapshotInfos; } - private static List sortedWithLimit( + private static GetSnapshotsResponse sortedWithLimit( + String repoName, + GetSnapshotsRequest.SortBy sortBy, + String after, + int size, + SortOrder order + ) { + return baseGetSnapshotsRequest(repoName).setAfter(after).setSort(sortBy).setSize(size).setOrder(order).get(); + } + + private static GetSnapshotsResponse sortedWithLimit( String repoName, GetSnapshotsRequest.SortBy sortBy, - SnapshotInfo after, + int offset, int size, SortOrder order ) { - return baseGetSnapshotsRequest(repoName).setAfter(after, sortBy).setSize(size).setOrder(order).get().getSnapshots(repoName); + return baseGetSnapshotsRequest(repoName).setOffset(offset).setSort(sortBy).setSize(size).setOrder(order).get(); } private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String repoName) { @@ -222,7 +255,7 @@ private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String repoNam .setSnapshots(AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*") .setIgnoreUnavailable(true) .get() - .getSnapshots(repoName) + .getSnapshots() .isEmpty() == false) { builder.setSnapshots(RANDOM_SNAPSHOT_NAME_PREFIX + "*"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index c333ad2cf843a..ec0090e2d1a90 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -81,7 +81,7 @@ public void testWhenMetadataAreLoaded() throws Exception { .addSnapshots("snap") .setVerbose(randomBoolean()) .get(); - assertThat(getSnapshotsResponse.getSnapshots("repository"), hasSize(1)); + assertThat(getSnapshotsResponse.getSnapshots(), hasSize(1)); assertGlobalMetadataLoads("snap", 0); assertIndexMetadataLoads("snap", "docs", 0); assertIndexMetadataLoads("snap", "others", 0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java index f15f6a1149f98..6f7fcd6abf6f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -7,10 +7,6 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -19,9 +15,8 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.FinalizeSnapshotContext; import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.RepositoryData; -import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.ESIntegTestCase; @@ -29,7 +24,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; @@ -89,24 +83,8 @@ public Map getRepositories( private final String initialMetaValue = metadata.settings().get(MASTER_SETTING_VALUE); @Override - public void finalizeSnapshot( - ShardGenerations shardGenerations, - long repositoryStateId, - Metadata clusterMetadata, - SnapshotInfo snapshotInfo, - Version repositoryMetaVersion, - Function stateTransformer, - ActionListener listener - ) { - super.finalizeSnapshot( - shardGenerations, - repositoryStateId, - clusterMetadata, - snapshotInfo, - repositoryMetaVersion, - stateTransformer, - listener - ); + public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { + super.finalizeSnapshot(finalizeSnapshotContext); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 99a2290784ad0..906be39192d75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -8,6 +8,9 @@ package org.elasticsearch.snapshots; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -18,14 +21,17 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.blobstore.FileRestoreContext; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.MockLogAppender; import java.nio.file.Path; import java.util.Arrays; @@ -876,4 +882,34 @@ public void testFailOnAncientVersion() throws Exception { ) ); } + + public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessException { + final String repoName = "test-repo"; + createRepository(repoName, FsRepository.TYPE); + final String indexName = "test-idx"; + createIndexWithContent(indexName); + final String snapshotName = "test-snapshot"; + createSnapshot(repoName, snapshotName, List.of(indexName)); + index(indexName, "some_id", Map.of("foo", "bar")); + assertAcked(admin().indices().prepareClose(indexName).get()); + final MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") + ); + mockAppender.start(); + final Logger logger = LogManager.getLogger(FileRestoreContext.class); + Loggers.addAppender(logger, mockAppender); + try { + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + .setIndices(indexName) + .setRestoreGlobalState(false) + .setWaitForCompletion(true) + .get(); + assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(logger, mockAppender); + mockAppender.stop(); + } + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7b0b372e7881a..20ec6259e2180 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -153,7 +153,7 @@ public void testBasicWorkFlow() throws Exception { List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")) .get() - .getSnapshots("test-repo"); + .getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); @@ -1052,8 +1052,7 @@ public void testReadonlyRepository() throws Exception { logger.info("--> list available shapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("readonly-repo").get(); - assertThat(getSnapshotsResponse.getSnapshots("readonly-repo"), notNullValue()); - assertThat(getSnapshotsResponse.getSnapshots("readonly-repo").size(), equalTo(1)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> try deleting snapshot"); assertRequestBuilderThrows( @@ -1205,8 +1204,8 @@ public void testSnapshotStatus() throws Exception { .setCurrentSnapshot() .execute() .actionGet(); - assertThat(getResponse.getSnapshots("test-repo").size(), equalTo(1)); - SnapshotInfo snapshotInfo = getResponse.getSnapshots("test-repo").get(0); + assertThat(getResponse.getSnapshots().size(), equalTo(1)); + SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); logger.info("--> unblocking blocked node"); @@ -1241,7 +1240,7 @@ public void testSnapshotStatus() throws Exception { .addSnapshots("_current") .execute() .actionGet() - .getSnapshots("test-repo") + .getSnapshots() .isEmpty(), equalTo(true) ); @@ -1555,7 +1554,7 @@ public void testSnapshotName() throws Exception { expectThrows(InvalidSnapshotNameException.class, () -> client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get()); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get().getSnapshots("test-repo") + () -> client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get() ); expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get()); expectThrows( @@ -1599,7 +1598,7 @@ public void testListCorruptedSnapshot() throws Exception { .prepareGetSnapshots("test-repo") .setIgnoreUnavailable(true) .get() - .getSnapshots("test-repo"); + .getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); @@ -1607,7 +1606,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotException ex = expectThrows( SnapshotException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get().getSnapshots("test-repo") + () -> client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get() ); assertThat(ex.getRepositoryName(), equalTo("test-repo")); assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); @@ -1636,7 +1635,7 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(repoName); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshotName)); @@ -1713,7 +1712,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); @@ -1942,18 +1941,18 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { logger.info("--> verify _all returns snapshot info"); GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("_all").setVerbose(false).get(); - assertEquals(indicesPerSnapshot.size(), response.getSnapshots("test-repo").size()); + assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify wildcard returns snapshot info"); response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-*").setVerbose(false).get(); - assertEquals(indicesPerSnapshot.size(), response.getSnapshots("test-repo").size()); + assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify individual requests return snapshot info"); for (int i = 0; i < numSnapshots; i++) { response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-" + i).setVerbose(false).get(); - assertEquals(1, response.getSnapshots("test-repo").size()); + assertEquals(1, response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); } } @@ -2116,7 +2115,7 @@ public void testBulkDeleteWithOverlappingPatterns() { logger.info("--> deleting all snapshots"); clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snap-*", "*").get(); final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("test-repo").get(); - assertThat(getSnapshotsResponse.getSnapshots("test-repo"), empty()); + assertThat(getSnapshotsResponse.getSnapshots(), empty()); } public void testHiddenIndicesIncludedInSnapshot() throws Exception { @@ -2150,7 +2149,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { .prepareGetSnapshots(repoName) .setSnapshots(randomFrom(snapName, "_all", "*", "*-snap", "test*")) .get() - .getSnapshots(repoName); + .getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); @@ -2272,7 +2271,7 @@ public void testIndexLatestFailuresIgnored() throws Exception { } private void verifySnapshotInfo(final GetSnapshotsResponse response, final Map> indicesPerSnapshot) { - for (SnapshotInfo snapshotInfo : response.getSnapshots("test-repo")) { + for (SnapshotInfo snapshotInfo : response.getSnapshots()) { final List expected = snapshotInfo.indices(); assertEquals(expected, indicesPerSnapshot.get(snapshotInfo.snapshotId().getName())); assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index e4c4267b45ed8..860d5544dd803 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; @@ -77,7 +76,7 @@ public void testStatusApiConsistency() throws Exception { createFullSnapshot("test-repo", "test-snap"); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots("test-repo"); + List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); @@ -131,11 +130,10 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - GetSnapshotsResponse snapshotsResponse = client().admin() - .cluster() - .getSnapshots(new GetSnapshotsRequest(new String[] { "test-repo" }, new String[] { "test-snap" })) - .actionGet(); - assertThat(snapshotsResponse.getFailedResponses().get("test-repo"), instanceOf(SnapshotMissingException.class)); + expectThrows( + SnapshotMissingException.class, + () -> client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() + ); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -185,7 +183,7 @@ public void testGetSnapshotsWithoutIndices() throws Exception { .prepareGetSnapshots("test-repo") .setVerbose(false) .get() - .getSnapshots("test-repo"); + .getSnapshots(); assertThat(snapshotInfos, hasSize(1)); final SnapshotInfo found = snapshotInfos.get(0); assertThat(found.snapshotId(), is(snapshotInfo.snapshotId())); @@ -301,9 +299,8 @@ public void testGetSnapshotsNoRepos() { .setSnapshots(randomFrom("_all", "*")) .get(); - assertTrue(getSnapshotsResponse.getRepositories().isEmpty()); - assertTrue(getSnapshotsResponse.getFailedResponses().isEmpty()); - assertTrue(getSnapshotsResponse.getSuccessfulResponses().isEmpty()); + assertTrue(getSnapshotsResponse.getSnapshots().isEmpty()); + assertTrue(getSnapshotsResponse.getFailures().isEmpty()); } public void testGetSnapshotsMultipleRepos() throws Exception { @@ -371,8 +368,11 @@ public void testGetSnapshotsMultipleRepos() throws Exception { for (Map.Entry> repo2Names : repo2SnapshotNames.entrySet()) { String repo = repo2Names.getKey(); List snapshotNames = repo2Names.getValue(); - List snapshots = getSnapshotsResponse.getSnapshots(repo); - assertEquals(snapshotNames, snapshots.stream().map(s -> s.snapshotId().getName()).collect(Collectors.toList())); + List snapshots = getSnapshotsResponse.getSnapshots(); + assertEquals( + snapshotNames, + snapshots.stream().filter(s -> s.repository().equals(repo)).map(s -> s.snapshotId().getName()).collect(Collectors.toList()) + ); } logger.info("--> specify all snapshot names with ignoreUnavailable=false"); @@ -384,7 +384,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { .get(); for (String repo : repoList) { - expectThrows(SnapshotMissingException.class, () -> getSnapshotsResponse2.getSnapshots(repo)); + assertThat(getSnapshotsResponse2.getFailures().get(repo), instanceOf(SnapshotMissingException.class)); } logger.info("--> specify all snapshot names with ignoreUnavailable=true"); @@ -398,8 +398,11 @@ public void testGetSnapshotsMultipleRepos() throws Exception { for (Map.Entry> repo2Names : repo2SnapshotNames.entrySet()) { String repo = repo2Names.getKey(); List snapshotNames = repo2Names.getValue(); - List snapshots = getSnapshotsResponse3.getSnapshots(repo); - assertEquals(snapshotNames, snapshots.stream().map(s -> s.snapshotId().getName()).collect(Collectors.toList())); + List snapshots = getSnapshotsResponse3.getSnapshots(); + assertEquals( + snapshotNames, + snapshots.stream().filter(s -> s.repository().equals(repo)).map(s -> s.snapshotId().getName()).collect(Collectors.toList()) + ); } } @@ -421,7 +424,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); - List snapshotInfoList = response1.getSnapshots("test-repo"); + List snapshotInfoList = response1.getSnapshots(); assertEquals(1, snapshotInfoList.size()); assertEquals(SnapshotState.IN_PROGRESS, snapshotInfoList.get(0).state()); @@ -432,15 +435,18 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { .setSnapshots(notExistedSnapshotName) .setIgnoreUnavailable(true) .get(); - assertEquals(0, response2.getSnapshots("test-repo").size()); + assertEquals(0, response2.getSnapshots().size()); - GetSnapshotsResponse response3 = client().admin() - .cluster() - .prepareGetSnapshots("test-repo") - .setSnapshots(notExistedSnapshotName) - .setIgnoreUnavailable(false) - .get(); - expectThrows(SnapshotMissingException.class, () -> response3.getSnapshots("test-repo")); + expectThrows( + SnapshotMissingException.class, + () -> client().admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots(notExistedSnapshotName) + .setIgnoreUnavailable(false) + .execute() + .actionGet() + ); logger.info("--> unblock all data nodes"); unblockAllDataNodes("test-repo"); @@ -482,12 +488,7 @@ public void testGetSnapshotsRequest() throws Exception { logger.info("--> get snapshots on an empty repository"); expectThrows( SnapshotMissingException.class, - () -> client.admin() - .cluster() - .prepareGetSnapshots(repositoryName) - .addSnapshots("non-existent-snapshot") - .get() - .getSnapshots(repositoryName) + () -> client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot").get() ); // with ignore unavailable set to true, should not throw an exception GetSnapshotsResponse getSnapshotsResponse = client.admin() @@ -496,7 +497,7 @@ public void testGetSnapshotsRequest() throws Exception { .setIgnoreUnavailable(true) .addSnapshots("non-existent-snapshot") .get(); - assertThat(getSnapshotsResponse.getSnapshots(repositoryName).size(), equalTo(0)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0)); logger.info("--> creating an index and indexing documents"); // Create index on 2 nodes and make sure each node has a primary by setting no replicas @@ -520,8 +521,8 @@ public void testGetSnapshotsRequest() throws Exception { .prepareGetSnapshots("test-repo") .setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")) .get(); - assertEquals(1, getSnapshotsResponse.getSnapshots("test-repo").size()); - assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots("test-repo").get(0).snapshotId().getName()); + assertEquals(1, getSnapshotsResponse.getSnapshots().size()); + assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName()); unblockNode(repositoryName, initialBlockedNode); // unblock node startDeleteSnapshot(repositoryName, "snap-on-empty-repo").get(); @@ -577,26 +578,18 @@ public void testGetSnapshotsRequest() throws Exception { .get(); List sortedNames = Arrays.asList(snapshotNames); Collections.sort(sortedNames); - assertThat(getSnapshotsResponse.getSnapshots(repositoryName).size(), equalTo(numSnapshots)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); assertThat( - getSnapshotsResponse.getSnapshots(repositoryName) - .stream() - .map(s -> s.snapshotId().getName()) - .sorted() - .collect(Collectors.toList()), + getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames) ); getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get(); sortedNames = Arrays.asList(snapshotNames); Collections.sort(sortedNames); - assertThat(getSnapshotsResponse.getSnapshots(repositoryName).size(), equalTo(numSnapshots)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); assertThat( - getSnapshotsResponse.getSnapshots(repositoryName) - .stream() - .map(s -> s.snapshotId().getName()) - .sorted() - .collect(Collectors.toList()), + getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames) ); @@ -611,13 +604,9 @@ public void testGetSnapshotsRequest() throws Exception { .addSnapshots(snapshotNames) .addSnapshots(firstRegex, secondRegex) .get(); - assertThat(getSnapshotsResponse.getSnapshots(repositoryName).size(), equalTo(numSnapshots)); + assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); assertThat( - getSnapshotsResponse.getSnapshots(repositoryName) - .stream() - .map(s -> s.snapshotId().getName()) - .sorted() - .collect(Collectors.toList()), + getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames) ); @@ -649,7 +638,7 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { } } for (ActionFuture get : gets) { - final List snapshotInfos = get.get().getSnapshots(repoName); + final List snapshotInfos = get.get().getSnapshots(); assertThat(snapshotInfos, hasSize(snapshots)); for (SnapshotInfo snapshotInfo : snapshotInfos) { assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index 7b6abf0ec2b84..fa73d9ef54b33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -116,7 +116,7 @@ public void testSnapshotWithoutGlobalState() { clusterAdmin().prepareGetRepositories(REPO_NAME).get(); Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) .get() - .getSnapshots(REPO_NAME) + .getSnapshots() .stream() .map(SnapshotInfo::indices) .flatMap(Collection::stream) @@ -274,7 +274,7 @@ public void testSnapshotAndRestoreAssociatedIndices() { // verify the correctness of the snapshot Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) .get() - .getSnapshots(REPO_NAME) + .getSnapshots() .stream() .map(SnapshotInfo::indices) .flatMap(Collection::stream) @@ -719,7 +719,7 @@ public void testNoneFeatureStateOnCreation() { // Verify that the system index was not included Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) .get() - .getSnapshots(REPO_NAME) + .getSnapshots() .stream() .map(SnapshotInfo::indices) .flatMap(Collection::stream) @@ -843,7 +843,7 @@ public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Except .prepareGetSnapshots(REPO_NAME) .setSnapshots(partialSnapName) .get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(REPO_NAME).get(0); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); assertNotNull(snapshotInfo); assertThat(snapshotInfo.failedShards(), lessThan(snapshotInfo.totalShards())); List statesInSnapshot = snapshotInfo.featureStates() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index c982e948f21cd..417367c320865 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -65,6 +65,11 @@ public class UpdateIT extends ESIntegTestCase { private static final String UPSERT_SCRIPT = "scripted_upsert"; private static final String EXTRACT_CTX_SCRIPT = "extract_ctx"; + @SuppressWarnings("unchecked") + private static Map get(Map source, String key) { + return (Map) source.get(key); + } + public static class UpdateScriptsPlugin extends MockScriptPlugin { @Override public String pluginScriptLang() { @@ -74,38 +79,39 @@ public String pluginScriptLang() { protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); scripts.put(PUT_VALUES_SCRIPT, vars -> { - Map ctx = (Map) vars.get("ctx"); + Map ctx = get(vars, "ctx"); assertNotNull(ctx); - Map params = new HashMap<>((Map) vars.get("params")); + Map params = new HashMap<>(get(vars, "params")); + @SuppressWarnings("unchecked") Map newCtx = (Map) params.remove("_ctx"); if (newCtx != null) { assertFalse(newCtx.containsKey("_source")); ctx.putAll(newCtx); } - Map source = (Map) ctx.get("_source"); + Map source = get(ctx, "_source"); params.remove("ctx"); source.putAll(params); return ctx; }); scripts.put(FIELD_INC_SCRIPT, vars -> { - Map params = (Map) vars.get("params"); + Map params = get(vars, "params"); String fieldname = (String) vars.get("field"); - Map ctx = (Map) vars.get("ctx"); + Map ctx = get(vars, "ctx"); assertNotNull(ctx); - Map source = (Map) ctx.get("_source"); + Map source = get(ctx, "_source"); Number currentValue = (Number) source.get(fieldname); Number inc = (Number) params.getOrDefault("inc", 1); source.put(fieldname, currentValue.longValue() + inc.longValue()); return ctx; }); scripts.put(UPSERT_SCRIPT, vars -> { - Map ctx = (Map) vars.get("ctx"); + Map ctx = get(vars, "ctx"); assertNotNull(ctx); - Map source = (Map) ctx.get("_source"); + Map source = get(ctx, "_source"); Number payment = (Number) vars.get("payment"); Number oldBalance = (Number) source.get("balance"); int deduction = "create".equals(ctx.get("op")) ? payment.intValue() / 2 : payment.intValue(); @@ -113,10 +119,10 @@ protected Map, Object>> pluginScripts() { return ctx; }); scripts.put(EXTRACT_CTX_SCRIPT, vars -> { - Map ctx = (Map) vars.get("ctx"); + Map ctx = get(vars, "ctx"); assertNotNull(ctx); - Map source = (Map) ctx.get("_source"); + Map source = get(ctx, "_source"); Map ctxWithoutSource = new HashMap<>(ctx); ctxWithoutSource.remove("_source"); source.put("update_context", ctxWithoutSource); @@ -392,12 +398,12 @@ public void testUpdate() throws Exception { .setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet(); for (int i = 0; i < 5; i++) { GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); - Map map1 = (Map) getResponse.getSourceAsMap().get("map"); + Map map1 = get(getResponse.getSourceAsMap(), "map"); assertThat(map1.size(), equalTo(3)); assertThat(map1.containsKey("map1"), equalTo(true)); assertThat(map1.containsKey("map3"), equalTo(true)); assertThat(map1.containsKey("commonkey"), equalTo(true)); - Map map2 = (Map) map1.get("commonkey"); + Map map2 = get(map1, "commonkey"); assertThat(map2.size(), equalTo(3)); assertThat(map2.containsKey("map1"), equalTo(true)); assertThat(map2.containsKey("map2"), equalTo(true)); @@ -508,7 +514,7 @@ public void testContextVariables() throws Exception { assertEquals(2, updateResponse.getVersion()); GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").execute().actionGet(); - Map updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); + Map updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id1", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); @@ -522,7 +528,7 @@ public void testContextVariables() throws Exception { assertEquals(2, updateResponse.getVersion()); getResponse = client().prepareGet("test", "id2").execute().actionGet(); - updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); + updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id2", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index 23a65f7b2db2f..bb54662ef9445 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -54,7 +54,7 @@ public void onFailure(Exception e) { client().admin().indices().prepareRefresh().execute().actionGet(); logger.info("done indexing, check all have the same field value"); - Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); + Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); for (int i = 0; i < (cluster().size() * 5); i++) { assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); } diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index 0303c5cd7157a..3cb62e8428559 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -116,7 +116,7 @@ public String toString(String field) { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (sameClassAs(o) == false) return false; BinaryDocValuesRangeQuery that = (BinaryDocValuesRangeQuery) o; return Objects.equals(fieldName, that.fieldName) && queryType == that.queryType && @@ -127,7 +127,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(getClass(), fieldName, queryType, lengthType, from, to); + return Objects.hash(classHash(), fieldName, queryType, lengthType, from, to); } public enum QueryType { diff --git a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java index cd8248229972e..d9e5a10b2343f 100644 --- a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java @@ -46,7 +46,7 @@ public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) { this.sort = Objects.requireNonNull(sort); this.after = after; int numFields = sort.getSort().length; - this.fieldComparators = new FieldComparator[numFields]; + this.fieldComparators = new FieldComparator[numFields]; this.reverseMuls = new int[numFields]; for (int i = 0; i < numFields; i++) { SortField sortField = sort.getSort()[i]; diff --git a/server/src/main/java/org/apache/lucene/search/XCombinedFieldQuery.java b/server/src/main/java/org/apache/lucene/search/XCombinedFieldQuery.java new file mode 100644 index 0000000000000..76c1ffbeb1024 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/search/XCombinedFieldQuery.java @@ -0,0 +1,508 @@ +/* @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2020 Elasticsearch B.V. + */ +package org.apache.lucene.search; + +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermState; +import org.apache.lucene.index.TermStates; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.DFRSimilarity; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.SimilarityBase; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.SmallFloat; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; + +/** + * + * Copy of {@link CombinedFieldQuery} that contains a fix for LUCENE-9999. + * TODO: remove once LUCENE-9999 is fixed and integrated + * + * A {@link Query} that treats multiple fields as a single stream and scores terms as if you had + * indexed them as a single term in a single field. + * + *

The query works as follows: + * + *

    + *
  1. Given a list of fields and weights, it pretends there is a synthetic combined field where + * all terms have been indexed. It computes new term and collection statistics for this + * combined field. + *
  2. It uses a disjunction iterator and {@link IndexSearcher#getSimilarity} to score documents. + *
+ * + *

In order for a similarity to be compatible, {@link Similarity#computeNorm} must be additive: + * the norm of the combined field is the sum of norms for each individual field. The norms must also + * be encoded using {@link SmallFloat#intToByte4}. These requirements hold for all similarities that + * compute norms the same way as {@link SimilarityBase#computeNorm}, which includes {@link + * BM25Similarity} and {@link DFRSimilarity}. Per-field similarities are not supported. + * + *

The query also requires that either all fields or no fields have norms enabled. Having only + * some fields with norms enabled can result in errors. + * + *

The scoring is based on BM25F's simple formula described in: + * http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf. This query implements the + * same approach but allows other similarities besides {@link + * org.apache.lucene.search.similarities.BM25Similarity}. + * + */ +public final class XCombinedFieldQuery extends Query implements Accountable { + private static final long BASE_RAM_BYTES = + RamUsageEstimator.shallowSizeOfInstance(XCombinedFieldQuery.class); + + /** A builder for {@link XCombinedFieldQuery}. */ + public static class Builder { + private final Map fieldAndWeights = new HashMap<>(); + private final Set termsSet = new HashSet<>(); + + /** + * Adds a field to this builder. + * + * @param field The field name. + */ + public Builder addField(String field) { + return addField(field, 1f); + } + + /** + * Adds a field to this builder. + * + * @param field The field name. + * @param weight The weight associated to this field. + */ + public Builder addField(String field, float weight) { + if (weight < 1) { + throw new IllegalArgumentException("weight must be greater or equal to 1"); + } + fieldAndWeights.put(field, new FieldAndWeight(field, weight)); + return this; + } + + /** Adds a term to this builder. */ + public Builder addTerm(BytesRef term) { + if (termsSet.size() > BooleanQuery.getMaxClauseCount()) { + throw new BooleanQuery.TooManyClauses(); + } + termsSet.add(term); + return this; + } + + /** Builds the {@link XCombinedFieldQuery}. */ + public XCombinedFieldQuery build() { + int size = fieldAndWeights.size() * termsSet.size(); + if (size > BooleanQuery.getMaxClauseCount()) { + throw new BooleanQuery.TooManyClauses(); + } + BytesRef[] terms = termsSet.toArray(new BytesRef[0]); + return new XCombinedFieldQuery(new TreeMap<>(fieldAndWeights), terms); + } + } + + static class FieldAndWeight { + final String field; + final float weight; + + FieldAndWeight(String field, float weight) { + this.field = field; + this.weight = weight; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FieldAndWeight that = (FieldAndWeight) o; + return Float.compare(that.weight, weight) == 0 && Objects.equals(field, that.field); + } + + @Override + public int hashCode() { + return Objects.hash(field, weight); + } + } + + // sorted map for fields. + private final TreeMap fieldAndWeights; + // array of terms, sorted. + private final BytesRef terms[]; + // array of terms per field, sorted + private final Term fieldTerms[]; + + private final long ramBytesUsed; + + private XCombinedFieldQuery(TreeMap fieldAndWeights, BytesRef[] terms) { + this.fieldAndWeights = fieldAndWeights; + this.terms = terms; + int numFieldTerms = fieldAndWeights.size() * terms.length; + if (numFieldTerms > BooleanQuery.getMaxClauseCount()) { + throw new BooleanQuery.TooManyClauses(); + } + this.fieldTerms = new Term[numFieldTerms]; + Arrays.sort(terms); + int pos = 0; + for (String field : fieldAndWeights.keySet()) { + for (BytesRef term : terms) { + fieldTerms[pos++] = new Term(field, term); + } + } + + this.ramBytesUsed = + BASE_RAM_BYTES + + RamUsageEstimator.sizeOfObject(fieldAndWeights) + + RamUsageEstimator.sizeOfObject(fieldTerms) + + RamUsageEstimator.sizeOfObject(terms); + } + + public List getTerms() { + return Collections.unmodifiableList(Arrays.asList(fieldTerms)); + } + + @Override + public String toString(String field) { + StringBuilder builder = new StringBuilder("CombinedFieldQuery(("); + int pos = 0; + for (FieldAndWeight fieldWeight : fieldAndWeights.values()) { + if (pos++ != 0) { + builder.append(" "); + } + builder.append(fieldWeight.field); + if (fieldWeight.weight != 1f) { + builder.append("^"); + builder.append(fieldWeight.weight); + } + } + builder.append(")("); + pos = 0; + for (BytesRef term : terms) { + if (pos++ != 0) { + builder.append(" "); + } + builder.append(term.utf8ToString()); + } + builder.append("))"); + return builder.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (sameClassAs(o) == false) return false; + XCombinedFieldQuery that = (XCombinedFieldQuery) o; + return Objects.equals(fieldAndWeights, that.fieldAndWeights) && Arrays.equals(terms, that.terms); + } + + @Override + public int hashCode() { + int result = classHash(); + result = 31 * result + Objects.hash(fieldAndWeights); + result = 31 * result + Arrays.hashCode(terms); + return result; + } + + @Override + public long ramBytesUsed() { + return ramBytesUsed; + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + // optimize zero and single field cases + if (terms.length == 0) { + return new BooleanQuery.Builder().build(); + } + return this; + } + + @Override + public void visit(QueryVisitor visitor) { + Term[] selectedTerms = + Arrays.stream(fieldTerms).filter(t -> visitor.acceptField(t.field())).toArray(Term[]::new); + if (selectedTerms.length > 0) { + QueryVisitor v = visitor.getSubVisitor(BooleanClause.Occur.SHOULD, this); + v.consumeTerms(this, selectedTerms); + } + } + + private BooleanQuery rewriteToBoolean() { + // rewrite to a simple disjunction if the score is not needed. + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + for (Term term : fieldTerms) { + bq.add(new TermQuery(term), BooleanClause.Occur.SHOULD); + } + return bq.build(); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + validateConsistentNorms(searcher.getIndexReader()); + if (scoreMode.needsScores()) { + return new CombinedFieldWeight(this, searcher, scoreMode, boost); + } else { + // rewrite to a simple disjunction if the score is not needed. + Query bq = rewriteToBoolean(); + return searcher.rewrite(bq).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); + } + } + + private void validateConsistentNorms(IndexReader reader) { + boolean allFieldsHaveNorms = true; + boolean noFieldsHaveNorms = true; + + for (LeafReaderContext context : reader.leaves()) { + FieldInfos fieldInfos = context.reader().getFieldInfos(); + for (String field : fieldAndWeights.keySet()) { + FieldInfo fieldInfo = fieldInfos.fieldInfo(field); + if (fieldInfo != null) { + allFieldsHaveNorms &= fieldInfo.hasNorms(); + noFieldsHaveNorms &= fieldInfo.omitsNorms(); + } + } + } + + if (allFieldsHaveNorms == false && noFieldsHaveNorms == false) { + throw new IllegalArgumentException( + getClass().getSimpleName() + + " requires norms to be consistent across fields: some fields cannot " + + " have norms enabled, while others have norms disabled"); + } + } + + class CombinedFieldWeight extends Weight { + private final IndexSearcher searcher; + private final TermStates termStates[]; + private final Similarity.SimScorer simWeight; + + CombinedFieldWeight(Query query, IndexSearcher searcher, ScoreMode scoreMode, float boost) + throws IOException { + super(query); + assert scoreMode.needsScores(); + this.searcher = searcher; + long docFreq = 0; + long totalTermFreq = 0; + termStates = new TermStates[fieldTerms.length]; + for (int i = 0; i < termStates.length; i++) { + FieldAndWeight field = fieldAndWeights.get(fieldTerms[i].field()); + TermStates ts = TermStates.build(searcher.getTopReaderContext(), fieldTerms[i], true); + termStates[i] = ts; + if (ts.docFreq() > 0) { + TermStatistics termStats = + searcher.termStatistics(fieldTerms[i], ts.docFreq(), ts.totalTermFreq()); + docFreq = Math.max(termStats.docFreq(), docFreq); + totalTermFreq += (double) field.weight * termStats.totalTermFreq(); + } + } + if (docFreq > 0) { + CollectionStatistics pseudoCollectionStats = mergeCollectionStatistics(searcher); + TermStatistics pseudoTermStatistics = + new TermStatistics(new BytesRef("pseudo_term"), docFreq, Math.max(1, totalTermFreq)); + this.simWeight = + searcher.getSimilarity().scorer(boost, pseudoCollectionStats, pseudoTermStatistics); + } else { + this.simWeight = null; + } + } + + private CollectionStatistics mergeCollectionStatistics(IndexSearcher searcher) + throws IOException { + long maxDoc = searcher.getIndexReader().maxDoc(); + long docCount = 0; + long sumTotalTermFreq = 0; + long sumDocFreq = 0; + for (FieldAndWeight fieldWeight : fieldAndWeights.values()) { + CollectionStatistics collectionStats = searcher.collectionStatistics(fieldWeight.field); + if (collectionStats != null) { + docCount = Math.max(collectionStats.docCount(), docCount); + sumDocFreq = Math.max(collectionStats.sumDocFreq(), sumDocFreq); + sumTotalTermFreq += (double) fieldWeight.weight * collectionStats.sumTotalTermFreq(); + } + } + + return new CollectionStatistics( + "pseudo_field", maxDoc, docCount, sumTotalTermFreq, sumDocFreq); + } + + @Override + public void extractTerms(Set termSet) { + termSet.addAll(Arrays.asList(fieldTerms)); + } + + @Override + public Matches matches(LeafReaderContext context, int doc) throws IOException { + Weight weight = + searcher.rewrite(rewriteToBoolean()).createWeight(searcher, ScoreMode.COMPLETE, 1f); + return weight.matches(context, doc); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + Scorer scorer = scorer(context); + if (scorer != null) { + int newDoc = scorer.iterator().advance(doc); + if (newDoc == doc) { + final float freq; + if (scorer instanceof CombinedFieldScorer) { + freq = ((CombinedFieldScorer) scorer).freq(); + } else { + assert scorer instanceof TermScorer; + freq = ((TermScorer) scorer).freq(); + } + final XMultiNormsLeafSimScorer docScorer = + new XMultiNormsLeafSimScorer( + simWeight, context.reader(), fieldAndWeights.values(), true); + Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq); + Explanation scoreExplanation = docScorer.explain(doc, freqExplanation); + return Explanation.match( + scoreExplanation.getValue(), + "weight(" + getQuery() + " in " + doc + "), result of:", + scoreExplanation); + } + } + return Explanation.noMatch("no matching term"); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + List iterators = new ArrayList<>(); + List fields = new ArrayList<>(); + for (int i = 0; i < fieldTerms.length; i++) { + TermState state = termStates[i].get(context); + if (state != null) { + TermsEnum termsEnum = context.reader().terms(fieldTerms[i].field()).iterator(); + termsEnum.seekExact(fieldTerms[i].bytes(), state); + PostingsEnum postingsEnum = termsEnum.postings(null, PostingsEnum.FREQS); + iterators.add(postingsEnum); + fields.add(fieldAndWeights.get(fieldTerms[i].field())); + } + } + + if (iterators.isEmpty()) { + return null; + } + + // we must optimize this case (term not in segment), disjunctions require >= 2 subs + if (iterators.size() == 1) { + final LeafSimScorer scoringSimScorer = + new LeafSimScorer(simWeight, context.reader(), fields.get(0).field, true); + return new TermScorer(this, iterators.get(0), scoringSimScorer); + } + final XMultiNormsLeafSimScorer scoringSimScorer = + new XMultiNormsLeafSimScorer(simWeight, context.reader(), fields, true); + LeafSimScorer nonScoringSimScorer = + new LeafSimScorer(simWeight, context.reader(), "pseudo_field", false); + // we use termscorers + disjunction as an impl detail + DisiPriorityQueue queue = new DisiPriorityQueue(iterators.size()); + for (int i = 0; i < iterators.size(); i++) { + float weight = fields.get(i).weight; + queue.add( + new WeightedDisiWrapper( + new TermScorer(this, iterators.get(i), nonScoringSimScorer), weight)); + } + // Even though it is called approximation, it is accurate since none of + // the sub iterators are two-phase iterators. + DocIdSetIterator iterator = new DisjunctionDISIApproximation(queue); + return new CombinedFieldScorer(this, queue, iterator, scoringSimScorer); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + } + + private static class WeightedDisiWrapper extends DisiWrapper { + final float weight; + + WeightedDisiWrapper(Scorer scorer, float weight) { + super(scorer); + this.weight = weight; + } + + float freq() throws IOException { + return weight * ((PostingsEnum) iterator).freq(); + } + } + + private static class CombinedFieldScorer extends Scorer { + private final DisiPriorityQueue queue; + private final DocIdSetIterator iterator; + private final XMultiNormsLeafSimScorer simScorer; + + CombinedFieldScorer( + Weight weight, + DisiPriorityQueue queue, + DocIdSetIterator iterator, + XMultiNormsLeafSimScorer simScorer) { + super(weight); + this.queue = queue; + this.iterator = iterator; + this.simScorer = simScorer; + } + + @Override + public int docID() { + return iterator.docID(); + } + + float freq() throws IOException { + DisiWrapper w = queue.topList(); + float freq = ((WeightedDisiWrapper) w).freq(); + for (w = w.next; w != null; w = w.next) { + freq += ((WeightedDisiWrapper) w).freq(); + if (freq < 0) { // overflow + return Integer.MAX_VALUE; + } + } + return freq; + } + + @Override + public float score() throws IOException { + return simScorer.score(iterator.docID(), freq()); + } + + @Override + public DocIdSetIterator iterator() { + return iterator; + } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.POSITIVE_INFINITY; + } + } +} diff --git a/server/src/main/java/org/apache/lucene/search/XMultiNormsLeafSimScorer.java b/server/src/main/java/org/apache/lucene/search/XMultiNormsLeafSimScorer.java new file mode 100644 index 0000000000000..0a4fdf0ff4a14 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/search/XMultiNormsLeafSimScorer.java @@ -0,0 +1,171 @@ +/* @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2020 Elasticsearch B.V. + */ +package org.apache.lucene.search; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.search.XCombinedFieldQuery.FieldAndWeight; +import org.apache.lucene.search.similarities.Similarity.SimScorer; +import org.apache.lucene.util.SmallFloat; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +/** + * Copy of {@link MultiNormsLeafSimScorer} that contains a fix for LUCENE-9999. + * TODO: remove once LUCENE-9999 is fixed and integrated + * + *

For all fields, norms must be encoded using {@link SmallFloat#intToByte4}. This scorer also + * requires that either all fields or no fields have norms enabled. Having only some fields with + * norms enabled can result in errors or undefined behavior. + */ +final class XMultiNormsLeafSimScorer { + /** Cache of decoded norms. */ + private static final float[] LENGTH_TABLE = new float[256]; + + static { + for (int i = 0; i < 256; i++) { + LENGTH_TABLE[i] = SmallFloat.byte4ToInt((byte) i); + } + } + + private final SimScorer scorer; + private final NumericDocValues norms; + + /** Sole constructor: Score documents of {@code reader} with {@code scorer}. */ + XMultiNormsLeafSimScorer( + SimScorer scorer, + LeafReader reader, + Collection normFields, + boolean needsScores) + throws IOException { + this.scorer = Objects.requireNonNull(scorer); + if (needsScores) { + final List normsList = new ArrayList<>(); + final List weightList = new ArrayList<>(); + for (FieldAndWeight field : normFields) { + NumericDocValues norms = reader.getNormValues(field.field); + if (norms != null) { + normsList.add(norms); + weightList.add(field.weight); + } + } + + if (normsList.isEmpty()) { + norms = null; + } else if (normsList.size() == 1) { + norms = normsList.get(0); + } else { + final NumericDocValues[] normsArr = normsList.toArray(new NumericDocValues[0]); + final float[] weightArr = new float[normsList.size()]; + for (int i = 0; i < weightList.size(); i++) { + weightArr[i] = weightList.get(i); + } + norms = new MultiFieldNormValues(normsArr, weightArr); + } + } else { + norms = null; + } + } + + private long getNormValue(int doc) throws IOException { + if (norms != null) { + boolean found = norms.advanceExact(doc); + assert found; + return norms.longValue(); + } else { + return 1L; // default norm + } + } + + /** + * Score the provided document assuming the given term document frequency. This method must be + * called on non-decreasing sequences of doc ids. + * + * @see SimScorer#score(float, long) + */ + public float score(int doc, float freq) throws IOException { + return scorer.score(freq, getNormValue(doc)); + } + + /** + * Explain the score for the provided document assuming the given term document frequency. This + * method must be called on non-decreasing sequences of doc ids. + * + * @see SimScorer#explain(Explanation, long) + */ + public Explanation explain(int doc, Explanation freqExpl) throws IOException { + return scorer.explain(freqExpl, getNormValue(doc)); + } + + private static class MultiFieldNormValues extends NumericDocValues { + private final NumericDocValues[] normsArr; + private final float[] weightArr; + private long current; + private int docID = -1; + + MultiFieldNormValues(NumericDocValues[] normsArr, float[] weightArr) { + this.normsArr = normsArr; + this.weightArr = weightArr; + } + + @Override + public long longValue() { + return current; + } + + @Override + public boolean advanceExact(int target) throws IOException { + float normValue = 0; + boolean found = false; + for (int i = 0; i < normsArr.length; i++) { + if (normsArr[i].advanceExact(target)) { + normValue += + weightArr[i] * LENGTH_TABLE[Byte.toUnsignedInt((byte) normsArr[i].longValue())]; + found = true; + } + } + current = SmallFloat.intToByte4(Math.round(normValue)); + return found; + } + + @Override + public int docID() { + return docID; + } + + @Override + public int nextDoc() { + throw new UnsupportedOperationException(); + } + + @Override + public int advance(int target) { + throw new UnsupportedOperationException(); + } + + @Override + public long cost() { + throw new UnsupportedOperationException(); + } + } +} diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index e80158a97b83a..dd12837fce8c0 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -12,13 +12,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BoostQuery; -import org.apache.lucene.search.CombinedFieldQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.XCombinedFieldQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; @@ -75,8 +75,8 @@ protected void flatten(Query sourceQuery, IndexReader reader, Collection for (Term term : synQuery.getTerms()) { flatten(new TermQuery(term), reader, flatQueries, boost); } - } else if (sourceQuery instanceof CombinedFieldQuery) { - CombinedFieldQuery combinedFieldQuery = (CombinedFieldQuery) sourceQuery; + } else if (sourceQuery instanceof XCombinedFieldQuery) { + XCombinedFieldQuery combinedFieldQuery = (XCombinedFieldQuery) sourceQuery; for (Term term : combinedFieldQuery.getTerms()) { flatten(new TermQuery(term), reader, flatQueries, boost); } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 2a5463ac90b57..40722a09b08bb 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -84,7 +84,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_13_1 = new Version(7130199, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_13_2 = new Version(7130299, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_13_3 = new Version(7130399, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_4 = new Version(7130499, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_14_0 = new Version(7140099, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_1 = new Version(7140199, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version CURRENT = V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 066334cc94423..1ea04c1825ef7 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -64,6 +64,8 @@ import org.elasticsearch.action.admin.cluster.snapshots.features.TransportSnapshottableFeaturesAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; import org.elasticsearch.action.admin.cluster.snapshots.get.TransportGetSnapshotsAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.shard.TransportGetShardSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.restore.TransportRestoreSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; @@ -110,6 +112,8 @@ import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; +import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction; +import org.elasticsearch.action.admin.indices.diskusage.TransportAnalyzeIndexDiskUsageAction; import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; @@ -150,7 +154,9 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; +import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.TransportFieldUsageAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; @@ -185,7 +191,6 @@ import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; -import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesIndexAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.TransportGetAction; @@ -300,6 +305,7 @@ import org.elasticsearch.rest.action.admin.cluster.dangling.RestListDanglingIndicesAction; import org.elasticsearch.rest.action.admin.indices.RestAddIndexBlockAction; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; +import org.elasticsearch.rest.action.admin.indices.RestAnalyzeIndexDiskUsageAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; import org.elasticsearch.rest.action.admin.indices.RestCreateIndexAction; @@ -307,6 +313,7 @@ import org.elasticsearch.rest.action.admin.indices.RestDeleteComposableIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestDeleteIndexAction; import org.elasticsearch.rest.action.admin.indices.RestDeleteIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.RestFieldUsageStatsAction; import org.elasticsearch.rest.action.admin.indices.RestFlushAction; import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; @@ -337,6 +344,7 @@ import org.elasticsearch.rest.action.admin.indices.RestSimulateTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.indices.RestUpgradeActionDeprecated; import org.elasticsearch.rest.action.admin.indices.RestValidateQueryAction; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestAliasAction; @@ -434,7 +442,10 @@ public ActionModule(Settings settings, IndexNameExpressionResolver indexNameExpr destructiveOperations = new DestructiveOperations(settings, clusterSettings); Set headers = Stream.concat( actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()), - Stream.of(new RestHeaderDefinition(Task.X_OPAQUE_ID, false)) + Stream.of( + new RestHeaderDefinition(Task.X_OPAQUE_ID, false), + new RestHeaderDefinition(Task.TRACE_PARENT, false) + ) ).collect(Collectors.toSet()); UnaryOperator restWrapper = null; for (ActionPlugin plugin : actionPlugins) { @@ -516,6 +527,7 @@ public void reg actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); actions.register(SnapshottableFeaturesAction.INSTANCE, TransportSnapshottableFeaturesAction.class); actions.register(ResetFeatureStateAction.INSTANCE, TransportResetFeatureStateAction.class); + actions.register(GetShardSnapshotAction.INSTANCE, TransportGetShardSnapshotAction.class); actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); @@ -577,6 +589,8 @@ public void reg actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class); actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); + actions.register(AnalyzeIndexDiskUsageAction.INSTANCE, TransportAnalyzeIndexDiskUsageAction.class); + actions.register(FieldUsageStatsAction.INSTANCE, TransportFieldUsageAction.class); //Indexed scripts actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); @@ -586,7 +600,6 @@ public void reg actions.register(GetScriptLanguageAction.INSTANCE, TransportGetScriptLanguageAction.class); actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class); - actions.register(TransportFieldCapabilitiesIndexAction.TYPE, TransportFieldCapabilitiesIndexAction.class); actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); @@ -789,6 +802,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + registerHandler.accept(new RestAnalyzeIndexDiskUsageAction()); + registerHandler.accept(new RestFieldUsageStatsAction()); + + registerHandler.accept(new RestUpgradeActionDeprecated()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)) { diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 36bb4aadc3f9b..4c5189a50a896 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -249,7 +249,7 @@ static void writeDocumentRequestThin(StreamOutput out, DocWriteRequest reques } static ActionRequestValidationException validateSeqNoBasedCASParams( - DocWriteRequest request, ActionRequestValidationException validationException) { + DocWriteRequest request, ActionRequestValidationException validationException) { final long version = request.version(); final VersionType versionType = request.versionType(); if (versionType.validateVersionForWrites(version) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/IndicesRequest.java b/server/src/main/java/org/elasticsearch/action/IndicesRequest.java index ef856826057f3..ef79ac16e4b55 100644 --- a/server/src/main/java/org/elasticsearch/action/IndicesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/IndicesRequest.java @@ -29,6 +29,13 @@ public interface IndicesRequest { */ IndicesOptions indicesOptions(); + /** + * Determines whether the request can contain indices on a remote cluster. + */ + default boolean allowsRemoteIndices() { + return false; + } + /** * Determines whether the request should be applied to data streams. When {@code false}, none of the names or * wildcard expressions in {@link #indices} should be applied to or expanded to any data streams. All layers diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java index 95dffc11c4897..e3691c8d606e0 100644 --- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -19,7 +19,7 @@ * An exception indicating that a failure occurred performing an operation on the shard. * */ -public abstract class ShardOperationFailedException implements Writeable, ToXContentObject { +public abstract class ShardOperationFailedException extends Exception implements Writeable, ToXContentObject { protected String index; protected int shardId = -1; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 9ecc361cafef8..534f8c964f602 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.allocation; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -36,15 +37,27 @@ */ public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { + static final String NO_SHARD_SPECIFIED_MESSAGE = "No shard was specified in the explain API request, so this response " + + "explains a randomly chosen unassigned shard. There may be other unassigned shards in this cluster which cannot be assigned for " + + "different reasons. It may not be possible to assign this shard until one of the other shards is assigned correctly. To explain " + + "the allocation of other shards (whether assigned or unassigned) you must specify the target shard in the request to this API."; + + private final boolean specificShard; private final ShardRouting shardRouting; private final DiscoveryNode currentNode; private final DiscoveryNode relocationTargetNode; private final ClusterInfo clusterInfo; private final ShardAllocationDecision shardAllocationDecision; - public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable DiscoveryNode currentNode, - @Nullable DiscoveryNode relocationTargetNode, @Nullable ClusterInfo clusterInfo, - ShardAllocationDecision shardAllocationDecision) { + public ClusterAllocationExplanation( + boolean specificShard, + ShardRouting shardRouting, + @Nullable DiscoveryNode currentNode, + @Nullable DiscoveryNode relocationTargetNode, + @Nullable ClusterInfo clusterInfo, + ShardAllocationDecision shardAllocationDecision) { + + this.specificShard = specificShard; this.shardRouting = shardRouting; this.currentNode = currentNode; this.relocationTargetNode = relocationTargetNode; @@ -53,6 +66,11 @@ public ClusterAllocationExplanation(ShardRouting shardRouting, @Nullable Discove } public ClusterAllocationExplanation(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_7_15_0)) { + this.specificShard = in.readBoolean(); + } else { + this.specificShard = true; // suppress "this is a random shard" warning in BwC situations + } this.shardRouting = new ShardRouting(in); this.currentNode = in.readOptionalWriteable(DiscoveryNode::new); this.relocationTargetNode = in.readOptionalWriteable(DiscoveryNode::new); @@ -62,6 +80,9 @@ public ClusterAllocationExplanation(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + out.writeBoolean(specificShard); + } // else suppress "this is a random shard" warning in BwC situations shardRouting.writeTo(out); out.writeOptionalWriteable(currentNode); out.writeOptionalWriteable(relocationTargetNode); @@ -69,6 +90,10 @@ public void writeTo(StreamOutput out) throws IOException { shardAllocationDecision.writeTo(out); } + public boolean isSpecificShard() { + return specificShard; + } + /** * Returns the shard that the explanation is about. */ @@ -131,6 +156,9 @@ public ShardAllocationDecision getShardAllocationDecision() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); { + if (isSpecificShard() == false) { + builder.field("note", NO_SHARD_SPECIFIED_MESSAGE); + } builder.field("index", shardRouting.getIndexName()); builder.field("shard", shardRouting.getId()); builder.field("primary", shardRouting.primary()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index a795818ac2a63..d6a1e5f2d9727 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -81,15 +81,25 @@ protected void masterOperation(Task task, final ClusterAllocationExplainRequest ShardRouting shardRouting = findShardToExplain(request, allocation); logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); - ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, - request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), allocationService); + ClusterAllocationExplanation cae = explainShard( + shardRouting, + allocation, + request.includeDiskInfo() ? clusterInfo : null, + request.includeYesDecisions(), + request.useAnyUnassignedShard() == false, + allocationService); listener.onResponse(new ClusterAllocationExplainResponse(cae)); } // public for testing - public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation, - ClusterInfo clusterInfo, boolean includeYesDecisions, - AllocationService allocationService) { + public static ClusterAllocationExplanation explainShard( + ShardRouting shardRouting, + RoutingAllocation allocation, + ClusterInfo clusterInfo, + boolean includeYesDecisions, + boolean isSpecificShard, + AllocationService allocationService) { + allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS); ShardAllocationDecision shardDecision; @@ -99,23 +109,32 @@ public static ClusterAllocationExplanation explainShard(ShardRouting shardRoutin shardDecision = allocationService.explainShardAllocation(shardRouting, allocation); } - return new ClusterAllocationExplanation(shardRouting, + return new ClusterAllocationExplanation( + isSpecificShard, + shardRouting, shardRouting.currentNodeId() != null ? allocation.nodes().get(shardRouting.currentNodeId()) : null, shardRouting.relocatingNodeId() != null ? allocation.nodes().get(shardRouting.relocatingNodeId()) : null, - clusterInfo, shardDecision); + clusterInfo, + shardDecision); } // public for testing public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest request, RoutingAllocation allocation) { ShardRouting foundShard = null; if (request.useAnyUnassignedShard()) { - // If we can use any shard, just pick the first unassigned one (if there are any) - RoutingNodes.UnassignedShards.UnassignedIterator ui = allocation.routingNodes().unassigned().iterator(); - if (ui.hasNext()) { - foundShard = ui.next(); + // If we can use any shard, return the first unassigned primary (if there is one) or the first unassigned replica (if not) + for (ShardRouting unassigned : allocation.routingNodes().unassigned()) { + if (foundShard == null || unassigned.primary()) { + foundShard = unassigned; + } + if (foundShard.primary()) { + break; + } } if (foundShard == null) { - throw new IllegalArgumentException("unable to find any unassigned shards to explain [" + request + "]"); + throw new IllegalArgumentException("No shard was specified in the request which means the response should explain a " + + "randomly-chosen unassigned shard, but there are no unassigned shards in this cluster. To explain the allocation of " + + "an assigned shard you must specify the target shard in the request."); } } else { String index = request.getIndex(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 824390cda289f..20e49fad5f372 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -173,6 +173,7 @@ private void cleanupRepo(String repositoryName, ActionListener userMetadata; public CreateSnapshotRequest() {} @@ -338,11 +340,19 @@ public boolean includeGlobalState() { return includeGlobalState; } + /** + * @return user metadata map that should be stored with the snapshot or {@code null} if there is no user metadata to be associated with + * this snapshot + */ + @Nullable public Map userMetadata() { return userMetadata; } - public CreateSnapshotRequest userMetadata(Map userMetadata) { + /** + * @param userMetadata user metadata map that should be stored with the snapshot + */ + public CreateSnapshotRequest userMetadata(@Nullable Map userMetadata) { this.userMetadata = userMetadata; return this; } @@ -379,29 +389,35 @@ public CreateSnapshotRequest featureStates(List featureStates) { public CreateSnapshotRequest source(Map source) { for (Map.Entry entry : source.entrySet()) { String name = entry.getKey(); - if (name.equals("indices")) { - if (entry.getValue() instanceof String) { - indices(Strings.splitStringByCommaToArray((String) entry.getValue())); - } else if (entry.getValue() instanceof List) { - indices((List) entry.getValue()); - } else { - throw new IllegalArgumentException("malformed indices section, should be an array of strings"); - } - } else if (name.equals("feature_states")) { - if (entry.getValue() instanceof List) { - featureStates((List) entry.getValue()); - } else { - throw new IllegalArgumentException("malformed feature_states section, should be an array of strings"); - } - } else if (name.equals("partial")) { - partial(nodeBooleanValue(entry.getValue(), "partial")); - } else if (name.equals("include_global_state")) { - includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state"); - } else if (name.equals("metadata")) { - if (entry.getValue() != null && (entry.getValue() instanceof Map == false)) { - throw new IllegalArgumentException("malformed metadata, should be an object"); - } - userMetadata((Map) entry.getValue()); + switch (name) { + case "indices": + if (entry.getValue() instanceof String) { + indices(Strings.splitStringByCommaToArray((String) entry.getValue())); + } else if (entry.getValue() instanceof List) { + indices((List) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed indices section, should be an array of strings"); + } + break; + case "feature_states": + if (entry.getValue() instanceof List) { + featureStates((List) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed feature_states section, should be an array of strings"); + } + break; + case "partial": + partial(nodeBooleanValue(entry.getValue(), "partial")); + break; + case "include_global_state": + includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state"); + break; + case "metadata": + if (entry.getValue() != null && (entry.getValue() instanceof Map == false)) { + throw new IllegalArgumentException("malformed metadata, should be an object"); + } + userMetadata((Map) entry.getValue()); + break; } } indicesOptions(IndicesOptions.fromMap(source, indicesOptions)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 33fd896294ecf..b663b2c11dbf7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -11,6 +11,9 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.core.Nullable; + +import java.util.Map; /** * Create snapshot request builder @@ -124,4 +127,15 @@ public CreateSnapshotRequestBuilder setFeatureStates(String... featureStates) { request.featureStates(featureStates); return this; } + + /** + * Provide a map of user metadata that should be included in the snapshot metadata. + * + * @param metadata user metadata map + * @return this builder + */ + public CreateSnapshotRequestBuilder setUserMetadata(@Nullable Map metadata) { + request.userMetadata(metadata); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index c3c5595930f96..e7a57abebc712 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -95,7 +95,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); if (snapshotInfo != null) { builder.field("snapshot"); - snapshotInfo.toXContent(builder, params); + snapshotInfo.toXContentExternal(builder, params); } else { builder.field("accepted", true); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java index e2145f62994ba..db2df62af7e6b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java @@ -8,24 +8,41 @@ package org.elasticsearch.action.admin.cluster.snapshots.features; -import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; /** Request for resetting feature state */ -public class ResetFeatureStateRequest extends ActionRequest { +public class ResetFeatureStateRequest extends MasterNodeRequest { + + private static final Version FEATURE_RESET_ON_MASTER = Version.V_7_14_0; + + public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOException { + if (in.getVersion().before(FEATURE_RESET_ON_MASTER)) { + throw new IllegalStateException( + "feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER + ); + } + return new ResetFeatureStateRequest(in); + } public ResetFeatureStateRequest() {} - public ResetFeatureStateRequest(StreamInput in) throws IOException { + private ResetFeatureStateRequest(StreamInput in) throws IOException { super(in); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().before(FEATURE_RESET_ON_MASTER)) { + throw new IllegalStateException( + "feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER + ); + } super.writeTo(out); } @@ -33,4 +50,5 @@ public void writeTo(StreamOutput out) throws IOException { public ActionRequestValidationException validate() { return null; } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java index de717942dabf2..89208c60f9104 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java @@ -11,12 +11,17 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -25,29 +30,43 @@ /** * Transport action for cleaning up feature index state. */ -public class TransportResetFeatureStateAction extends HandledTransportAction { +public class TransportResetFeatureStateAction extends TransportMasterNodeAction { private final SystemIndices systemIndices; private final NodeClient client; - private final ClusterService clusterService; @Inject public TransportResetFeatureStateAction( TransportService transportService, + ThreadPool threadPool, ActionFilters actionFilters, SystemIndices systemIndices, NodeClient client, - ClusterService clusterService + ClusterService clusterService, + IndexNameExpressionResolver indexNameExpressionResolver ) { - super(ResetFeatureStateAction.NAME, transportService, actionFilters, ResetFeatureStateRequest::new); + super( + ResetFeatureStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ResetFeatureStateRequest::fromStream, + indexNameExpressionResolver, + ResetFeatureStateResponse::new, + ThreadPool.Names.SAME + ); this.systemIndices = systemIndices; this.client = client; - this.clusterService = clusterService; } @Override - protected void doExecute(Task task, ResetFeatureStateRequest request, ActionListener listener) { - + protected void masterOperation( + Task task, + ResetFeatureStateRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { if (systemIndices.getFeatures().size() == 0) { listener.onResponse(new ResetFeatureStateResponse(Collections.emptyList())); } @@ -65,4 +84,9 @@ protected void doExecute(Task task, ResetFeatureStateRequest request, ActionList feature.getCleanUpFunction().apply(clusterService, client, groupedActionListener); } } + + @Override + protected ClusterBlockException checkBlock(ResetFeatureStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 4f1abfe5b2b7c..64531064c1f9f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.SnapshotInfo; @@ -23,6 +24,8 @@ import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -36,9 +39,11 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final String CURRENT_SNAPSHOT = "_current"; public static final boolean DEFAULT_VERBOSE_MODE = true; - public static final Version MULTIPLE_REPOSITORIES_SUPPORT_ADDED = Version.V_8_0_0; + public static final Version MULTIPLE_REPOSITORIES_SUPPORT_ADDED = Version.V_7_14_0; - public static final Version PAGINATED_GET_SNAPSHOTS_VERSION = Version.V_8_0_0; + public static final Version PAGINATED_GET_SNAPSHOTS_VERSION = Version.V_7_14_0; + + public static final Version NUMERIC_PAGINATION_VERSION = Version.V_7_15_0; public static final int NO_LIMIT = -1; @@ -47,6 +52,11 @@ public class GetSnapshotsRequest extends MasterNodeRequest */ private int size = NO_LIMIT; + /** + * Numeric offset at which to start fetching snapshots. Mutually exclusive with {@link After} if not equal to {@code 0}. + */ + private int offset = 0; + @Nullable private After after; @@ -99,6 +109,9 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { sort = in.readEnum(SortBy.class); size = in.readVInt(); order = SortOrder.readFromStream(in); + if (in.getVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { + offset = in.readVInt(); + } } } @@ -125,6 +138,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(sort); out.writeVInt(size); order.writeTo(out); + if (out.getVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { + out.writeVInt(offset); + } else if (offset != 0) { + throw new IllegalArgumentException( + "can't use numeric offset in get snapshots request with node version [" + out.getVersion() + "]" + ); + } } else if (sort != SortBy.START_TIME || size != NO_LIMIT || after != null || order != SortOrder.ASC) { throw new IllegalArgumentException("can't use paginated get snapshots request with node version [" + out.getVersion() + "]"); } @@ -146,12 +166,17 @@ public ActionRequestValidationException validate() { if (size > 0) { validationException = addValidationError("can't use size limit with verbose=false", validationException); } + if (offset > 0) { + validationException = addValidationError("can't use offset with verbose=false", validationException); + } if (after != null) { validationException = addValidationError("can't use after with verbose=false", validationException); } if (order != SortOrder.ASC) { validationException = addValidationError("can't use non-default sort order with verbose=false", validationException); } + } else if (after != null && offset > 0) { + validationException = addValidationError("can't use after and offset simultaneously", validationException); } return validationException; } @@ -176,6 +201,13 @@ public String[] repositories() { return this.repositories; } + public boolean isSingleRepositoryRequest() { + return repositories.length == 1 + && repositories[0] != null + && "_all".equals(repositories[0]) == false + && Regex.isSimpleMatchPattern(repositories[0]) == false; + } + /** * Returns the names of the snapshots. * @@ -253,6 +285,15 @@ public int size() { return size; } + public int offset() { + return offset; + } + + public GetSnapshotsRequest offset(int offset) { + this.offset = offset; + return this; + } + public SortOrder order() { return order; } @@ -311,10 +352,20 @@ public static final class After implements Writeable { private final String value; + private final String repoName; + private final String snapshotName; After(StreamInput in) throws IOException { - this(in.readString(), in.readString()); + this(in.readString(), in.readString(), in.readString()); + } + + public static After fromQueryParam(String param) { + final String[] parts = new String(Base64.getUrlDecoder().decode(param), StandardCharsets.UTF_8).split(","); + if (parts.length != 3) { + throw new IllegalArgumentException("invalid ?after parameter [" + param + "]"); + } + return new After(parts[0], parts[1], parts[2]); } @Nullable @@ -339,11 +390,12 @@ public static After from(@Nullable SnapshotInfo snapshotInfo, SortBy sortBy) { default: throw new AssertionError("unknown sort column [" + sortBy + "]"); } - return new After(afterValue, snapshotInfo.snapshotId().getName()); + return new After(afterValue, snapshotInfo.repository(), snapshotInfo.snapshotId().getName()); } - public After(String value, String snapshotName) { + public After(String value, String repoName, String snapshotName) { this.value = value; + this.repoName = repoName; this.snapshotName = snapshotName; } @@ -355,9 +407,18 @@ public String snapshotName() { return snapshotName; } + public String repoName() { + return repoName; + } + + public String asQueryParam() { + return Base64.getUrlEncoder().encodeToString((value + "," + repoName + "," + snapshotName).getBytes(StandardCharsets.UTF_8)); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); + out.writeString(repoName); out.writeString(snapshotName); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 2a4c9ee61bec1..1a0ae82cc3929 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.snapshots.SnapshotInfo; /** * Get snapshots request builder @@ -98,8 +97,8 @@ public GetSnapshotsRequestBuilder setVerbose(boolean verbose) { return this; } - public GetSnapshotsRequestBuilder setAfter(@Nullable SnapshotInfo after, GetSnapshotsRequest.SortBy sortBy) { - return setAfter(GetSnapshotsRequest.After.from(after, sortBy)).setSort(sortBy); + public GetSnapshotsRequestBuilder setAfter(String after) { + return setAfter(after == null ? null : GetSnapshotsRequest.After.fromQueryParam(after)); } public GetSnapshotsRequestBuilder setAfter(@Nullable GetSnapshotsRequest.After after) { @@ -117,6 +116,11 @@ public GetSnapshotsRequestBuilder setSize(int size) { return this; } + public GetSnapshotsRequestBuilder setOffset(int offset) { + request.offset(offset); + return this; + } + public GetSnapshotsRequestBuilder setOrder(SortOrder order) { request.order(order); return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index df7755acc9082..fc323f114246b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -13,217 +13,204 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ParseField; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.Objects; /** * Get snapshots response */ public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + private static final int UNKNOWN_COUNT = -1; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser GET_SNAPSHOT_PARSER = new ConstructingObjectParser<>( GetSnapshotsResponse.class.getName(), true, - (args) -> new GetSnapshotsResponse((List) args[0]) + (args) -> new GetSnapshotsResponse( + (List) args[0], + (Map) args[1], + (String) args[2], + args[3] == null ? UNKNOWN_COUNT : (int) args[3], + args[4] == null ? UNKNOWN_COUNT : (int) args[4] + ) ); static { - PARSER.declareObjectArray( + GET_SNAPSHOT_PARSER.declareObjectArray( ConstructingObjectParser.constructorArg(), - (p, c) -> Response.fromXContent(p), - new ParseField("responses") + (p, c) -> SnapshotInfo.SNAPSHOT_INFO_PARSER.apply(p, c).build(), + new ParseField("snapshots") ); + GET_SNAPSHOT_PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> p.map(HashMap::new, ElasticsearchException::fromXContent), + new ParseField("failures") + ); + GET_SNAPSHOT_PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField("next")); + GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("total")); + GET_SNAPSHOT_PARSER.declareIntOrNull(ConstructingObjectParser.optionalConstructorArg(), UNKNOWN_COUNT, new ParseField("remaining")); } - public GetSnapshotsResponse(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - Map> successfulResponses = in.readMapOfLists(StreamInput::readString, SnapshotInfo::readFrom); - Map failedResponses = in.readMap(StreamInput::readString, StreamInput::readException); - this.successfulResponses = Collections.unmodifiableMap(successfulResponses); - this.failedResponses = Collections.unmodifiableMap(failedResponses); - } else { - this.successfulResponses = Collections.singletonMap("unknown", in.readList(SnapshotInfo::readFrom)); - this.failedResponses = Collections.emptyMap(); - } - } - - public static class Response { - private final String repository; - private final List snapshots; - private final ElasticsearchException error; + private final List snapshots; - private static final ConstructingObjectParser RESPONSE_PARSER = new ConstructingObjectParser<>( - Response.class.getName(), - true, - (args) -> new Response((String) args[0], (List) args[1], (ElasticsearchException) args[2]) - ); - - static { - RESPONSE_PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("repository")); - RESPONSE_PARSER.declareObjectArray( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> SnapshotInfo.SNAPSHOT_INFO_PARSER.apply(p, c).build(), - new ParseField("snapshots") - ); - RESPONSE_PARSER.declareObject( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - new ParseField("error") - ); - } + private final Map failures; - private Response(String repository, List snapshots, ElasticsearchException error) { - this.repository = repository; - this.snapshots = snapshots; - this.error = error; - } + @Nullable + private final String next; - public static Response snapshots(String repository, List snapshots) { - return new Response(repository, snapshots, null); - } + private final int total; - public static Response error(String repository, ElasticsearchException error) { - return new Response(repository, null, error); - } + private final int remaining; - private static Response fromXContent(XContentParser parser) throws IOException { - return RESPONSE_PARSER.parse(parser, null); - } + public GetSnapshotsResponse( + List snapshots, + Map failures, + @Nullable String next, + final int total, + final int remaining + ) { + this.snapshots = List.copyOf(snapshots); + this.failures = failures == null ? Map.of() : Map.copyOf(failures); + this.next = next; + this.total = total; + this.remaining = remaining; } - private final Map> successfulResponses; - private final Map failedResponses; - - public GetSnapshotsResponse(Collection responses) { - Map> successfulResponses = new HashMap<>(); - Map failedResponses = new HashMap<>(); - for (Response response : responses) { - if (response.snapshots != null) { - assert response.error == null; - successfulResponses.put(response.repository, response.snapshots); - } else { - assert response.snapshots == null; - failedResponses.put(response.repository, response.error); - } - } - this.successfulResponses = Collections.unmodifiableMap(successfulResponses); - this.failedResponses = Collections.unmodifiableMap(failedResponses); - } - - /** - * Returns list of snapshots for the specified repository. - * @param repo - repository name. - * @return list of snapshots. - * @throws IllegalArgumentException if there is no such repository in the response. - * @throws ElasticsearchException if an exception occurred when retrieving snapshots from the repository. - */ - public List getSnapshots(String repo) { - List snapshots = successfulResponses.get(repo); - if (snapshots != null) { - return snapshots; + public GetSnapshotsResponse(StreamInput in) throws IOException { + this.snapshots = in.readList(SnapshotInfo::readFrom); + if (in.getVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { + final Map failedResponses = in.readMap(StreamInput::readString, StreamInput::readException); + this.failures = Collections.unmodifiableMap(failedResponses); + this.next = in.readOptionalString(); + } else { + this.failures = Collections.emptyMap(); + this.next = null; } - ElasticsearchException error = failedResponses.get(repo); - if (error == null) { - throw new IllegalArgumentException("No such repository"); + if (in.getVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { + this.total = in.readVInt(); + this.remaining = in.readVInt(); + } else { + this.total = UNKNOWN_COUNT; + this.remaining = UNKNOWN_COUNT; } - throw error; } /** - * Returns list of repositories for both successful and unsuccessful responses. + * Returns the list of snapshots + * + * @return the list of snapshots */ - public Set getRepositories() { - return Sets.union(successfulResponses.keySet(), failedResponses.keySet()); + public List getSnapshots() { + return snapshots; } /** - * Returns a map of repository name to the list of {@link SnapshotInfo} for each successful response. + * Returns a map of repository name to {@link ElasticsearchException} for each unsuccessful response. */ - public Map> getSuccessfulResponses() { - return successfulResponses; + public Map getFailures() { + return failures; } - /** - * Returns a map of repository name to {@link ElasticsearchException} for each unsuccessful response. - */ - public Map getFailedResponses() { - return failedResponses; + @Nullable + public String next() { + return next; } /** * Returns true if there is a least one failed response. */ public boolean isFailed() { - return failedResponses.isEmpty() == false; + return failures.isEmpty() == false; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startArray("responses"); - - for (Map.Entry> snapshots : successfulResponses.entrySet()) { - builder.startObject(); - builder.field("repository", snapshots.getKey()); - builder.startArray("snapshots"); - for (SnapshotInfo snapshot : snapshots.getValue()) { - snapshot.toXContent(builder, params); - } - builder.endArray(); - builder.endObject(); - } - - for (Map.Entry error : failedResponses.entrySet()) { - builder.startObject(); - builder.field("repository", error.getKey()); - ElasticsearchException.generateFailureXContent(builder, params, error.getValue(), true); - builder.endObject(); - } + public int totalCount() { + return total; + } - builder.endArray(); - builder.endObject(); - return builder; + public int remaining() { + return remaining; } @Override public void writeTo(StreamOutput out) throws IOException { + out.writeList(snapshots); if (out.getVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - out.writeMapOfLists(successfulResponses, StreamOutput::writeString, (o, s) -> s.writeTo(o)); - out.writeMap(failedResponses, StreamOutput::writeString, StreamOutput::writeException); + out.writeMap(failures, StreamOutput::writeString, StreamOutput::writeException); + out.writeOptionalString(next); } else { - if (successfulResponses.size() + failedResponses.size() != 1) { - throw new IllegalArgumentException( - "Requesting snapshots from multiple repositories is not supported in versions prior " - + "to " - + GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED.toString() - ); - } - - if (successfulResponses.size() == 1) { - out.writeList(successfulResponses.values().iterator().next()); + if (failures.isEmpty() == false) { + assert false : "transport action should have thrown directly for old version but saw " + failures; + throw failures.values().iterator().next(); } + } + if (out.getVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { + out.writeVInt(total); + out.writeVInt(remaining); + } + } - if (failedResponses.isEmpty() == false) { - throw failedResponses.values().iterator().next(); + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("snapshots"); + for (SnapshotInfo snapshotInfo : snapshots) { + snapshotInfo.toXContentExternal(builder, params); + } + builder.endArray(); + if (failures.isEmpty() == false) { + builder.startObject("failures"); + for (Map.Entry error : failures.entrySet()) { + builder.field(error.getKey(), (b, pa) -> { + b.startObject(); + error.getValue().toXContent(b, pa); + b.endObject(); + return b; + }); } + builder.endObject(); } + if (next != null) { + builder.field("next", next); + } + if (total >= 0) { + builder.field("total", total); + } + if (remaining >= 0) { + builder.field("remaining", remaining); + } + builder.endObject(); + return builder; } public static GetSnapshotsResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); + return GET_SNAPSHOT_PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetSnapshotsResponse that = (GetSnapshotsResponse) o; + return Objects.equals(snapshots, that.snapshots) && Objects.equals(failures, that.failures) && Objects.equals(next, that.next); + } + + @Override + public int hashCode() { + return Objects.hash(snapshots, failures, next); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 7a605f21776bb..2da538f628e76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; @@ -35,13 +36,13 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -53,6 +54,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Predicate; import java.util.function.ToLongFunction; @@ -106,6 +108,7 @@ protected void masterOperation( assert task instanceof CancellableTask : task + " not cancellable"; getMultipleReposSnapshotInfo( + request.isSingleRepositoryRequest() == false, state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY), TransportGetRepositoriesAction.getRepositories(state, request.repositories()), request.snapshots(), @@ -114,6 +117,7 @@ protected void masterOperation( (CancellableTask) task, request.sort(), request.after(), + request.offset(), request.size(), request.order(), listener @@ -121,6 +125,7 @@ protected void masterOperation( } private void getMultipleReposSnapshotInfo( + boolean isMultiRepoRequest, SnapshotsInProgress snapshotsInProgress, List repos, String[] snapshots, @@ -129,22 +134,45 @@ private void getMultipleReposSnapshotInfo( CancellableTask cancellableTask, GetSnapshotsRequest.SortBy sortBy, @Nullable GetSnapshotsRequest.After after, + int offset, int size, SortOrder order, ActionListener listener ) { // short-circuit if there are no repos, because we can not create GroupedActionListener of size 0 if (repos.isEmpty()) { - listener.onResponse(new GetSnapshotsResponse(Collections.emptyList())); + listener.onResponse(new GetSnapshotsResponse(Collections.emptyList(), Collections.emptyMap(), null, 0, 0)); return; } - final GroupedActionListener groupedActionListener = new GroupedActionListener<>( - listener.map(responses -> { + final GroupedActionListener, SnapshotsInRepo>> groupedActionListener = + new GroupedActionListener<>(listener.map(responses -> { assert repos.size() == responses.size(); - return new GetSnapshotsResponse(responses); - }), - repos.size() - ); + final List allSnapshots = responses.stream() + .map(Tuple::v2) + .filter(Objects::nonNull) + .flatMap(snapshotsInRepo -> snapshotsInRepo.snapshotInfos.stream()) + .collect(Collectors.toUnmodifiableList()); + final Map failures = responses.stream() + .map(Tuple::v1) + .filter(Objects::nonNull) + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + final SnapshotsInRepo snInfos = sortSnapshots(allSnapshots, sortBy, after, offset, size, order); + final List snapshotInfos = snInfos.snapshotInfos; + final int remaining = snInfos.remaining + responses.stream() + .map(Tuple::v2) + .filter(Objects::nonNull) + .mapToInt(s -> s.remaining) + .sum(); + return new GetSnapshotsResponse( + snapshotInfos, + failures, + remaining > 0 + ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() + : null, + responses.stream().map(Tuple::v2).filter(Objects::nonNull).mapToInt(s -> s.totalCount).sum(), + remaining + ); + }), repos.size()); for (final RepositoryMetadata repo : repos) { final String repoName = repo.name(); @@ -157,15 +185,14 @@ private void getMultipleReposSnapshotInfo( cancellableTask, sortBy, after, - size, order, groupedActionListener.delegateResponse((groupedListener, e) -> { - if (e instanceof ElasticsearchException) { - groupedListener.onResponse(GetSnapshotsResponse.Response.error(repoName, (ElasticsearchException) e)); + if (isMultiRepoRequest && e instanceof ElasticsearchException) { + groupedListener.onResponse(Tuple.tuple(Tuple.tuple(repoName, (ElasticsearchException) e), null)); } else { groupedListener.onFailure(e); } - }).map(snInfos -> GetSnapshotsResponse.Response.snapshots(repoName, snInfos)) + }).map(snInfos -> Tuple.tuple(null, snInfos)) ); } } @@ -179,15 +206,14 @@ private void getSingleRepoSnapshotInfo( CancellableTask task, GetSnapshotsRequest.SortBy sortBy, @Nullable final GetSnapshotsRequest.After after, - int size, SortOrder order, - ActionListener> listener + ActionListener listener ) { - final Map allSnapshotIds = new HashMap<>(); + final Map allSnapshotIds = new HashMap<>(); final List currentSnapshots = new ArrayList<>(); - for (SnapshotInfo snapshotInfo : sortedCurrentSnapshots(snapshotsInProgress, repo, sortBy, after, size, order)) { - SnapshotId snapshotId = snapshotInfo.snapshotId(); - allSnapshotIds.put(snapshotId.getName(), snapshotId); + for (SnapshotInfo snapshotInfo : currentSnapshots(snapshotsInProgress, repo)) { + Snapshot snapshot = snapshotInfo.snapshot(); + allSnapshotIds.put(snapshot.getSnapshotId().getName(), snapshot); currentSnapshots.add(snapshotInfo); } @@ -211,7 +237,6 @@ private void getSingleRepoSnapshotInfo( task, sortBy, after, - size, order, listener ), @@ -226,14 +251,7 @@ private void getSingleRepoSnapshotInfo( * @param repositoryName repository name * @return list of snapshots */ - private static List sortedCurrentSnapshots( - SnapshotsInProgress snapshotsInProgress, - String repositoryName, - GetSnapshotsRequest.SortBy sortBy, - @Nullable final GetSnapshotsRequest.After after, - int size, - SortOrder order - ) { + private static List currentSnapshots(SnapshotsInProgress snapshotsInProgress, String repositoryName) { List snapshotList = new ArrayList<>(); List entries = SnapshotsService.currentSnapshots( snapshotsInProgress, @@ -243,7 +261,7 @@ private static List sortedCurrentSnapshots( for (SnapshotsInProgress.Entry entry : entries) { snapshotList.add(new SnapshotInfo(entry)); } - return sortSnapshots(snapshotList, sortBy, after, size, order); + return snapshotList; } private void loadSnapshotInfos( @@ -252,34 +270,32 @@ private void loadSnapshotInfos( String[] snapshots, boolean ignoreUnavailable, boolean verbose, - Map allSnapshotIds, + Map allSnapshotIds, List currentSnapshots, @Nullable RepositoryData repositoryData, CancellableTask task, GetSnapshotsRequest.SortBy sortBy, @Nullable final GetSnapshotsRequest.After after, - int size, SortOrder order, - ActionListener> listener + ActionListener listener ) { - if (task.isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); + if (task.notifyIfCancelled(listener)) { return; } if (repositoryData != null) { for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - allSnapshotIds.put(snapshotId.getName(), snapshotId); + allSnapshotIds.put(snapshotId.getName(), new Snapshot(repo, snapshotId)); } } - final Set toResolve = new HashSet<>(); + final Set toResolve = new HashSet<>(); if (isAllSnapshots(snapshots)) { toResolve.addAll(allSnapshotIds.values()); } else { for (String snapshotOrPattern : snapshots) { if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { - toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshotId).collect(Collectors.toList())); + toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshot).collect(Collectors.toList())); } else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { if (allSnapshotIds.containsKey(snapshotOrPattern)) { toResolve.add(allSnapshotIds.get(snapshotOrPattern)); @@ -287,7 +303,7 @@ private void loadSnapshotInfos( throw new SnapshotMissingException(repo, snapshotOrPattern); } } else { - for (Map.Entry entry : allSnapshotIds.entrySet()) { + for (Map.Entry entry : allSnapshotIds.entrySet()) { if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) { toResolve.add(entry.getValue()); } @@ -301,19 +317,30 @@ private void loadSnapshotInfos( } if (verbose) { - snapshots(snapshotsInProgress, repo, toResolve, ignoreUnavailable, task, sortBy, after, size, order, listener); + snapshots( + snapshotsInProgress, + repo, + toResolve.stream().map(Snapshot::getSnapshotId).collect(Collectors.toUnmodifiableList()), + ignoreUnavailable, + task, + sortBy, + after, + order, + listener + ); } else { - final List snapshotInfos; + final SnapshotsInRepo snapshotInfos; if (repositoryData != null) { // want non-current snapshots as well, which are found in the repository data - snapshotInfos = buildSimpleSnapshotInfos(toResolve, repositoryData, currentSnapshots, sortBy, after, size, order); + snapshotInfos = buildSimpleSnapshotInfos(toResolve, repo, repositoryData, currentSnapshots, sortBy, after, order); } else { // only want current snapshots snapshotInfos = sortSnapshots( currentSnapshots.stream().map(SnapshotInfo::basic).collect(Collectors.toList()), sortBy, after, - size, + 0, + GetSnapshotsRequest.NO_LIMIT, order ); } @@ -336,12 +363,10 @@ private void snapshots( CancellableTask task, GetSnapshotsRequest.SortBy sortBy, @Nullable GetSnapshotsRequest.After after, - int size, SortOrder order, - ActionListener> listener + ActionListener listener ) { - if (task.isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); + if (task.notifyIfCancelled(listener)) { return; } final Set snapshotSet = new HashSet<>(); @@ -367,7 +392,7 @@ private void snapshots( final ActionListener allDoneListener = listener.delegateFailure((l, v) -> { final ArrayList snapshotList = new ArrayList<>(snapshotInfos); snapshotList.addAll(snapshotSet); - listener.onResponse(sortSnapshots(snapshotList, sortBy, after, size, order)); + listener.onResponse(sortSnapshots(snapshotList, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order)); }); if (snapshotIdsToIterate.isEmpty()) { allDoneListener.onResponse(null); @@ -410,43 +435,43 @@ private boolean isCurrentSnapshotsOnly(String[] snapshots) { return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0])); } - private static List buildSimpleSnapshotInfos( - final Set toResolve, + private static SnapshotsInRepo buildSimpleSnapshotInfos( + final Set toResolve, + final String repoName, final RepositoryData repositoryData, final List currentSnapshots, final GetSnapshotsRequest.SortBy sortBy, @Nullable final GetSnapshotsRequest.After after, - final int size, final SortOrder order ) { List snapshotInfos = new ArrayList<>(); for (SnapshotInfo snapshotInfo : currentSnapshots) { - if (toResolve.remove(snapshotInfo.snapshotId())) { + if (toResolve.remove(snapshotInfo.snapshot())) { snapshotInfos.add(snapshotInfo.basic()); } } Map> snapshotsToIndices = new HashMap<>(); for (IndexId indexId : repositoryData.getIndices().values()) { for (SnapshotId snapshotId : repositoryData.getSnapshots(indexId)) { - if (toResolve.contains(snapshotId)) { + if (toResolve.contains(new Snapshot(repoName, snapshotId))) { snapshotsToIndices.computeIfAbsent(snapshotId, (k) -> new ArrayList<>()).add(indexId.getName()); } } } - for (SnapshotId snapshotId : toResolve) { - final List indices = snapshotsToIndices.getOrDefault(snapshotId, Collections.emptyList()); + for (Snapshot snapshot : toResolve) { + final List indices = snapshotsToIndices.getOrDefault(snapshot.getSnapshotId(), Collections.emptyList()); CollectionUtil.timSort(indices); snapshotInfos.add( new SnapshotInfo( - snapshotId, + snapshot, indices, Collections.emptyList(), Collections.emptyList(), - repositoryData.getSnapshotState(snapshotId) + repositoryData.getSnapshotState(snapshot.getSnapshotId()) ) ); } - return sortSnapshots(snapshotInfos, sortBy, after, size, order); + return sortSnapshots(snapshotInfos, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order); } private static final Comparator BY_START_TIME = Comparator.comparingLong(SnapshotInfo::startTime) @@ -461,12 +486,13 @@ private static List buildSimpleSnapshotInfos( private static final Comparator BY_NAME = Comparator.comparing(sni -> sni.snapshotId().getName()); - private static List sortSnapshots( - List snapshotInfos, - GetSnapshotsRequest.SortBy sortBy, - @Nullable GetSnapshotsRequest.After after, - int size, - SortOrder order + private static SnapshotsInRepo sortSnapshots( + final List snapshotInfos, + final GetSnapshotsRequest.SortBy sortBy, + final @Nullable GetSnapshotsRequest.After after, + final int offset, + final int size, + final SortOrder order ) { final Comparator comparator; switch (sortBy) { @@ -489,49 +515,93 @@ private static List sortSnapshots( Stream infos = snapshotInfos.stream(); if (after != null) { + assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; final Predicate isAfter; - final String name = after.snapshotName(); + final String snapshotName = after.snapshotName(); + final String repoName = after.repoName(); switch (sortBy) { case START_TIME: - isAfter = filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(after.value()), name, order); + isAfter = filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(after.value()), snapshotName, repoName, order); break; case NAME: - isAfter = order == SortOrder.ASC ? (info -> compareName(name, info) < 0) : (info -> compareName(name, info) > 0); + isAfter = order == SortOrder.ASC + ? (info -> compareName(snapshotName, repoName, info) < 0) + : (info -> compareName(snapshotName, repoName, info) > 0); break; case DURATION: - isAfter = filterByLongOffset(info -> info.endTime() - info.startTime(), Long.parseLong(after.value()), name, order); + isAfter = filterByLongOffset( + info -> info.endTime() - info.startTime(), + Long.parseLong(after.value()), + snapshotName, + repoName, + order + ); break; case INDICES: - isAfter = filterByLongOffset(info -> info.indices().size(), Integer.parseInt(after.value()), name, order); + isAfter = filterByLongOffset( + info -> info.indices().size(), + Integer.parseInt(after.value()), + snapshotName, + repoName, + order + ); break; default: throw new AssertionError("unexpected sort column [" + sortBy + "]"); } infos = infos.filter(isAfter); } - infos = infos.sorted(order == SortOrder.DESC ? comparator.reversed() : comparator); + infos = infos.sorted(order == SortOrder.DESC ? comparator.reversed() : comparator).skip(offset); + final List allSnapshots = infos.collect(Collectors.toUnmodifiableList()); + final List snapshots; if (size != GetSnapshotsRequest.NO_LIMIT) { - infos = infos.limit(size); + snapshots = allSnapshots.stream().limit(size + 1).collect(Collectors.toUnmodifiableList()); + } else { + snapshots = allSnapshots; } - return infos.collect(Collectors.toUnmodifiableList()); + final List resultSet = size != GetSnapshotsRequest.NO_LIMIT && size < snapshots.size() + ? snapshots.subList(0, size) + : snapshots; + return new SnapshotsInRepo(resultSet, snapshotInfos.size(), allSnapshots.size() - resultSet.size()); } private static Predicate filterByLongOffset( ToLongFunction extractor, long after, - String name, + String snapshotName, + String repoName, SortOrder order ) { return order == SortOrder.ASC ? info -> { final long val = extractor.applyAsLong(info); - return after < val || (after == val && compareName(name, info) < 0); + + return after < val || (after == val && compareName(snapshotName, repoName, info) < 0); } : info -> { final long val = extractor.applyAsLong(info); - return after > val || (after == val && compareName(name, info) > 0); + return after > val || (after == val && compareName(snapshotName, repoName, info) > 0); }; } - private static int compareName(String name, SnapshotInfo info) { - return name.compareTo(info.snapshotId().getName()); + private static int compareName(String name, String repoName, SnapshotInfo info) { + final int res = name.compareTo(info.snapshotId().getName()); + if (res != 0) { + return res; + } + return repoName.compareTo(info.repository()); + } + + private static final class SnapshotsInRepo { + + private final List snapshotInfos; + + private final int totalCount; + + private final int remaining; + + SnapshotsInRepo(List snapshotInfos, int totalCount, int remaining) { + this.snapshotInfos = snapshotInfos; + this.totalCount = totalCount; + this.remaining = remaining; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotAction.java new file mode 100644 index 0000000000000..bdf822d79f4c5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotAction.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get.shard; + +import org.elasticsearch.action.ActionType; + +public class GetShardSnapshotAction extends ActionType { + + public static final GetShardSnapshotAction INSTANCE = new GetShardSnapshotAction(); + public static final String NAME = "internal:admin/snapshot/get_shard"; + + public GetShardSnapshotAction() { + super(NAME, GetShardSnapshotResponse::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java new file mode 100644 index 0000000000000..aa471d9d0fdfb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get.shard; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class GetShardSnapshotRequest extends MasterNodeRequest { + private static final String ALL_REPOSITORIES = "_all"; + + private final List repositories; + private final ShardId shardId; + + GetShardSnapshotRequest(List repositories, ShardId shardId) { + assert repositories.isEmpty() == false; + assert repositories.stream().noneMatch(Objects::isNull); + assert repositories.size() == 1 || repositories.stream().noneMatch(repo -> repo.equals(ALL_REPOSITORIES)); + this.repositories = Objects.requireNonNull(repositories); + this.shardId = Objects.requireNonNull(shardId); + } + + public GetShardSnapshotRequest(StreamInput in) throws IOException { + super(in); + this.repositories = in.readStringList(); + this.shardId = new ShardId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringCollection(repositories); + shardId.writeTo(out); + } + + public static GetShardSnapshotRequest latestSnapshotInAllRepositories(ShardId shardId) { + return new GetShardSnapshotRequest(Collections.singletonList(ALL_REPOSITORIES), shardId); + } + + public static GetShardSnapshotRequest latestSnapshotInRepositories(ShardId shardId, List repositories) { + if (repositories.isEmpty()) { + throw new IllegalArgumentException("Expected at least 1 repository but got none"); + } + + if (repositories.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("null values are not allowed in the repository list"); + } + return new GetShardSnapshotRequest(repositories, shardId); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (repositories.size() == 0) { + validationException = addValidationError("repositories are missing", validationException); + } + + return validationException; + } + + public boolean getFromAllRepositories() { + return repositories.size() == 1 && ALL_REPOSITORIES.equalsIgnoreCase(repositories.get(0)); + } + + public boolean isSingleRepositoryRequest() { + return repositories.size() == 1 && ALL_REPOSITORIES.equalsIgnoreCase(repositories.get(0)) == false; + } + + public ShardId getShardId() { + return shardId; + } + + public List getRepositories() { + return repositories; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetShardSnapshotRequest request = (GetShardSnapshotRequest) o; + return Objects.equals(repositories, request.repositories) && Objects.equals(shardId, request.shardId); + } + + @Override + public int hashCode() { + return Objects.hash(repositories, shardId); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponse.java new file mode 100644 index 0000000000000..08870248f2b8d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get.shard; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.ShardSnapshotInfo; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; + +public class GetShardSnapshotResponse extends ActionResponse { + public static GetShardSnapshotResponse EMPTY = new GetShardSnapshotResponse(null, Collections.emptyMap()); + + private final ShardSnapshotInfo latestShardSnapshot; + private final Map repositoryFailures; + + GetShardSnapshotResponse(@Nullable ShardSnapshotInfo latestShardSnapshot, Map repositoryFailures) { + this.latestShardSnapshot = latestShardSnapshot; + this.repositoryFailures = repositoryFailures; + } + + GetShardSnapshotResponse(StreamInput in) throws IOException { + super(in); + this.latestShardSnapshot = in.readOptionalWriteable(ShardSnapshotInfo::new); + this.repositoryFailures = in.readMap(StreamInput::readString, RepositoryException::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(latestShardSnapshot); + out.writeMap(repositoryFailures, StreamOutput::writeString, (o, err) -> err.writeTo(o)); + } + + public Optional getFailureForRepository(String repository) { + return Optional.ofNullable(repositoryFailures.get(repository)); + } + + public Optional getLatestShardSnapshot() { + return Optional.ofNullable(latestShardSnapshot); + } + + public Map getRepositoryFailures() { + return repositoryFailures; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java new file mode 100644 index 0000000000000..3e1faee7aa7d1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.get.shard; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.RepositoriesMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexSnapshotsService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.ShardSnapshotInfo; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.Comparator; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class TransportGetShardSnapshotAction extends TransportMasterNodeAction { + private static final Comparator LATEST_SNAPSHOT_COMPARATOR = Comparator.comparing(ShardSnapshotInfo::getStartedAt) + .thenComparing(snapshotInfo -> snapshotInfo.getSnapshot().getSnapshotId()); + private final IndexSnapshotsService indexSnapshotsService; + + @Inject + public TransportGetShardSnapshotAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + RepositoriesService repositoriesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetShardSnapshotAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetShardSnapshotRequest::new, + indexNameExpressionResolver, + GetShardSnapshotResponse::new, + ThreadPool.Names.SAME + ); + this.indexSnapshotsService = new IndexSnapshotsService(repositoriesService); + } + + @Override + protected void masterOperation( + Task task, + GetShardSnapshotRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + final Set repositories = getRequestedRepositories(request, state); + final ShardId shardId = request.getShardId(); + + if (repositories.isEmpty()) { + listener.onResponse(GetShardSnapshotResponse.EMPTY); + return; + } + + GroupedActionListener, RepositoryException>> groupedActionListener = new GroupedActionListener<>( + listener.map(this::transformToResponse), + repositories.size() + ); + + BlockingQueue repositoriesQueue = new LinkedBlockingQueue<>(repositories); + getShardSnapshots(repositoriesQueue, shardId, new ActionListener<>() { + @Override + public void onResponse(Optional shardSnapshotInfo) { + groupedActionListener.onResponse(Tuple.tuple(shardSnapshotInfo, null)); + } + + @Override + public void onFailure(Exception err) { + if (request.isSingleRepositoryRequest() == false && err instanceof RepositoryException) { + groupedActionListener.onResponse(Tuple.tuple(Optional.empty(), (RepositoryException) err)); + } else { + groupedActionListener.onFailure(err); + } + } + }); + } + + private void getShardSnapshots( + BlockingQueue repositories, + ShardId shardId, + ActionListener> listener + ) { + final String repository = repositories.poll(); + if (repository == null) { + return; + } + + indexSnapshotsService.getLatestSuccessfulSnapshotForShard( + repository, + shardId, + ActionListener.runAfter(listener, () -> getShardSnapshots(repositories, shardId, listener)) + ); + } + + private GetShardSnapshotResponse transformToResponse( + Collection, RepositoryException>> shardSnapshots + ) { + final Optional latestSnapshot = shardSnapshots.stream() + .map(Tuple::v1) + .filter(Objects::nonNull) + .filter(Optional::isPresent) + .map(Optional::get) + .max(LATEST_SNAPSHOT_COMPARATOR); + + final Map failures = shardSnapshots.stream() + .map(Tuple::v2) + .filter(Objects::nonNull) + .collect(Collectors.toMap(RepositoryException::repository, Function.identity())); + + return new GetShardSnapshotResponse(latestSnapshot.orElse(null), failures); + } + + private Set getRequestedRepositories(GetShardSnapshotRequest request, ClusterState state) { + RepositoriesMetadata repositories = state.metadata().custom(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY); + if (request.getFromAllRepositories()) { + return repositories.repositories().stream().map(RepositoryMetadata::name).collect(Collectors.toUnmodifiableSet()); + } + + return request.getRepositories().stream().filter(Objects::nonNull).collect(Collectors.toUnmodifiableSet()); + } + + @Override + protected ClusterBlockException checkBlock(GetShardSnapshotRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 3f76c07c5fe55..4edcaed358dc0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -46,7 +47,6 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -127,7 +127,7 @@ protected void masterOperation( Set nodesIds = new HashSet<>(); for (SnapshotsInProgress.Entry entry : currentSnapshots) { - for (ObjectCursor status : entry.shards().values()) { + for (ObjectCursor status : entry.shardsByRepoShardId().values()) { if (status.value.nodeId() != null) { nodesIds.add(status.value.nodeId()); } @@ -185,7 +185,8 @@ private void buildResponse( for (SnapshotsInProgress.Entry entry : currentSnapshotEntries) { currentSnapshotNames.add(entry.snapshot().getSnapshotId().getName()); List shardStatusBuilder = new ArrayList<>(); - for (ObjectObjectCursor shardEntry : entry.shards()) { + for (ObjectObjectCursor shardEntry : entry + .shardsByRepoShardId()) { SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.value; if (status.nodeId() != null) { // We should have information about this shard from the shard: @@ -193,7 +194,8 @@ private void buildResponse( if (nodeStatus != null) { Map shardStatues = nodeStatus.status().get(entry.snapshot()); if (shardStatues != null) { - SnapshotIndexShardStatus shardStatus = shardStatues.get(shardEntry.key); + final ShardId sid = entry.shardId(shardEntry.key); + SnapshotIndexShardStatus shardStatus = shardStatues.get(sid); if (shardStatus != null) { // We have full information about this shard if (shardStatus.getStage() == SnapshotIndexShardStage.DONE && shardEntry.value.state() != SUCCESS) { @@ -203,7 +205,7 @@ private void buildResponse( // technically if the data node failed before successfully reporting DONE state to master, then // this shards state would jump to a failed state. shardStatus = new SnapshotIndexShardStatus( - shardEntry.key, + sid, SnapshotIndexShardStage.FINALIZE, shardStatus.getStats(), shardStatus.getNodeId(), @@ -242,7 +244,7 @@ private void buildResponse( if (stage == SnapshotIndexShardStage.DONE) { // Shard snapshot completed successfully so we should be able to load the exact statistics for this // shard from the repository already. - final ShardId shardId = shardEntry.key; + final ShardId shardId = entry.shardId(shardEntry.key); shardStatus = new SnapshotIndexShardStatus( shardId, repositoriesService.repository(entry.repository()) @@ -254,7 +256,7 @@ private void buildResponse( .asCopy() ); } else { - shardStatus = new SnapshotIndexShardStatus(shardEntry.key, stage); + shardStatus = new SnapshotIndexShardStatus(entry.shardId(shardEntry.key), stage); } shardStatusBuilder.add(shardStatus); } @@ -293,7 +295,7 @@ private void loadRepositoryData( repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); final Collection snapshotIdsToLoad = new ArrayList<>(); repositoryDataListener.whenComplete(repositoryData -> { - ensureNotCancelled(task); + task.ensureNotCancelled(); final Map matchedSnapshotIds = repositoryData.getSnapshotIds() .stream() .filter(s -> requestedSnapshotNames.contains(s.getName())) @@ -398,7 +400,7 @@ private Map snapshotShards( final Map shardStatus = new HashMap<>(); for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); - ensureNotCancelled(task); + task.ensureNotCancelled(); IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, snapshotInfo.snapshotId(), indexId); if (indexMetadata != null) { int numberOfShards = indexMetadata.getNumberOfShards(); @@ -419,7 +421,7 @@ private Map snapshotShards( // could not be taken due to partial being set to false. shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped"); } else { - ensureNotCancelled(task); + task.ensureNotCancelled(); shardSnapshotStatus = repository.getShardSnapshotStatus(snapshotInfo.snapshotId(), indexId, shardId); } shardStatus.put(shardId, shardSnapshotStatus); @@ -430,12 +432,6 @@ private Map snapshotShards( return unmodifiableMap(shardStatus); } - private static void ensureNotCancelled(CancellableTask task) { - if (task.isCancelled()) { - throw new TaskCancelledException("task cancelled"); - } - } - private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index ecb6717c311e3..df3f26bd8eb96 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -84,9 +83,11 @@ protected void masterOperation(Task task, final ClusterStateRequest request, fin @Override public void onNewClusterState(ClusterState newState) { - if (cancellableTask.isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); - } else if (acceptableClusterStatePredicate.test(newState)) { + if (cancellableTask.notifyIfCancelled(listener)) { + return; + } + + if (acceptableClusterStatePredicate.test(newState)) { ActionListener.completeWith(listener, () -> buildResponse(request, newState)); } else { listener.onFailure(new NotMasterException( @@ -102,9 +103,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { try { - if (cancellableTask.isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); - } else { + if (cancellableTask.notifyIfCancelled(listener) == false) { listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); } } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 21d4747fa9f4e..0a4550b624847 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.node.NodeService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -134,9 +133,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq List shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { - if (cancellableTask.isCancelled()) { - throw new TaskCancelledException("task cancelled"); - } + cancellableTask.ensureNotCancelled(); if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards CommitStats commitStats; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 42644a9260056..7d2646e25ec69 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.cluster.metadata.AliasAction; @@ -48,7 +49,7 @@ /** * A request to add/remove aliases for one or more indices. */ -public class IndicesAliasesRequest extends AcknowledgedRequest implements ToXContentObject { +public class IndicesAliasesRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { private List allAliasActions = new ArrayList<>(); private String origin = ""; @@ -482,6 +483,11 @@ public String[] indices() { return indices; } + @Override + public boolean includeDataStreams() { + return true; + } + @Override public IndicesOptions indicesOptions() { return INDICES_OPTIONS; @@ -615,6 +621,18 @@ public IndicesOptions indicesOptions() { return INDICES_OPTIONS; } + @Override + public String[] indices() { + return allAliasActions.stream() + .flatMap(aliasActions -> Arrays.stream(aliasActions.indices())) + .toArray(String[]::new); + } + + @Override + public boolean includeDataStreams() { + return true; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 2b1ca606698a7..87c595690a543 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -21,14 +21,18 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasAction; +import org.elasticsearch.cluster.metadata.AliasAction.AddDataStreamAlias; import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexAliasesService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.Index; import org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException; import org.elasticsearch.tasks.Task; @@ -44,6 +48,7 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableList; @@ -94,23 +99,6 @@ protected void masterOperation(Task task, final IndicesAliasesRequest request, f List concreteDataStreams = indexNameExpressionResolver.dataStreamNames(state, request.indicesOptions(), action.indices()); if (concreteDataStreams.size() != 0) { - // Fail if parameters are used that data streams don't support: - if (action.filter() != null) { - throw new IllegalArgumentException("aliases that point to data streams don't support filters"); - } - if (action.routing() != null) { - throw new IllegalArgumentException("aliases that point to data streams don't support routing"); - } - if (action.indexRouting() != null) { - throw new IllegalArgumentException("aliases that point to data streams don't support index_routing"); - } - if (action.searchRouting() != null) { - throw new IllegalArgumentException("aliases that point to data streams don't support search_routing"); - } - if (action.isHidden() != null) { - throw new IllegalArgumentException("aliases that point to data streams don't support is_hidden"); - } - // Fail if expressions match both data streams and regular indices: String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), true, action.indices()); List nonBackingIndices = Arrays.stream(concreteIndices) @@ -118,27 +106,49 @@ protected void masterOperation(Task task, final IndicesAliasesRequest request, f .filter(ia -> ia.getParentDataStream() == null) .map(IndexAbstraction::getName) .collect(Collectors.toList()); - if (nonBackingIndices.isEmpty() == false) { - throw new IllegalArgumentException("expressions " + Arrays.toString(action.indices()) + - " that match with both data streams and regular indices are disallowed"); - } - switch (action.actionType()) { case ADD: + // Fail if parameters are used that data stream aliases don't support: + if (action.routing() != null) { + throw new IllegalArgumentException("aliases that point to data streams don't support routing"); + } + if (action.indexRouting() != null) { + throw new IllegalArgumentException("aliases that point to data streams don't support index_routing"); + } + if (action.searchRouting() != null) { + throw new IllegalArgumentException("aliases that point to data streams don't support search_routing"); + } + if (action.isHidden() != null) { + throw new IllegalArgumentException("aliases that point to data streams don't support is_hidden"); + } + // Fail if expressions match both data streams and regular indices: + if (nonBackingIndices.isEmpty() == false) { + throw new IllegalArgumentException("expressions " + Arrays.toString(action.indices()) + + " that match with both data streams and regular indices are disallowed"); + } for (String dataStreamName : concreteDataStreams) { - finalActions.add(new AliasAction.AddDataStreamAlias(action.aliases()[0], dataStreamName, action.writeIndex())); + for (String alias : concreteDataStreamAliases(action, state.metadata(), dataStreamName)) { + finalActions.add(new AddDataStreamAlias(alias, dataStreamName, action.writeIndex(), action.filter())); + } } - break; + continue; case REMOVE: for (String dataStreamName : concreteDataStreams) { - finalActions.add( - new AliasAction.RemoveDataStreamAlias(action.aliases()[0], dataStreamName, action.mustExist())); + for (String alias : concreteDataStreamAliases(action, state.metadata(), dataStreamName)) { + finalActions.add( + new AliasAction.RemoveDataStreamAlias(alias, dataStreamName, action.mustExist())); + } + } + if (nonBackingIndices.isEmpty() == false) { + // Regular aliases/indices match as well with the provided expression. + // (Only when adding new aliases, matching both data streams and indices is disallowed) + break; + } else { + continue; } - break; default: throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]"); } - continue; } final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), false, @@ -216,4 +226,23 @@ private static String[] concreteAliases(AliasActions action, Metadata metadata, return action.aliases(); } } + + private static String[] concreteDataStreamAliases(AliasActions action, Metadata metadata, String concreteDataStreamName) { + if (action.expandAliasesWildcards()) { + //for DELETE we expand the aliases + Stream stream = metadata.dataStreamAliases().values().stream() + .filter(alias -> alias.getDataStreams().contains(concreteDataStreamName)) + .map(DataStreamAlias::getName); + + String[] aliasPatterns = action.aliases(); + if (Strings.isAllOrWildcard(aliasPatterns) == false) { + stream = stream.filter(alias -> Regex.simpleMatch(aliasPatterns, alias)); + } + + return stream.toArray(String[]::new); + } else { + //for ADD and REMOVE_INDEX we just return the current aliases + return action.aliases(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 1ea9a9c0e4f4f..c5fe0cea2d9ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.transport.Transports; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -135,13 +134,18 @@ private static void checkSystemIndexAccess(GetAliasesRequest request, SystemIndi throw new IllegalArgumentException("Unexpected system index access level: " + systemIndexAccessLevel); } + List netNewSystemIndices = new ArrayList<>(); List systemIndicesNames = new ArrayList<>(); for (Iterator it = aliasesMap.keysIt(); it.hasNext(); ) { String indexName = it.next(); IndexMetadata index = state.metadata().index(indexName); if (index != null && index.isSystem()) { if (systemIndexAccessAllowPredicate.test(index) == false) { - systemIndicesNames.add(indexName); + if (systemIndices.isNetNewSystemIndex(indexName)) { + netNewSystemIndices.add(indexName); + } else { + systemIndicesNames.add(indexName); + } } } } @@ -149,9 +153,11 @@ private static void checkSystemIndexAccess(GetAliasesRequest request, SystemIndi deprecationLogger.deprecate(DeprecationCategory.API, "open_system_index_access", "this request accesses system indices: {}, but in a future major version, direct access to system " + "indices will be prevented by default", systemIndicesNames); - } else { - checkSystemAliasAccess(request, systemIndices, systemIndexAccessLevel, threadContext); } + if (netNewSystemIndices.isEmpty() == false) { + throw systemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemIndices); + } + checkSystemAliasAccess(request, systemIndices, systemIndexAccessLevel, threadContext); } private static void checkSystemAliasAccess(GetAliasesRequest request, SystemIndices systemIndices, @@ -165,14 +171,27 @@ private static void checkSystemAliasAccess(GetAliasesRequest request, SystemIndi throw new IllegalArgumentException("Unexpected system index access level: " + systemIndexAccessLevel); } - final List systemAliases = Arrays.stream(request.aliases()) - .filter(systemIndices::isSystemName) - .filter(systemIndexAccessAllowPredicate) - .collect(Collectors.toList()); + final List systemAliases = new ArrayList<>(); + final List netNewSystemAliases = new ArrayList<>(); + for (String alias : request.aliases()) { + if (systemIndices.isSystemName(alias)) { + if (systemIndexAccessAllowPredicate.test(alias)) { + if (systemIndices.isNetNewSystemIndex(alias)) { + netNewSystemAliases.add(alias); + } else { + systemAliases.add(alias); + } + } + } + } + if (systemAliases.isEmpty() == false) { deprecationLogger.deprecate(DeprecationCategory.API, "open_system_alias_access", - "this request accesses aliases with names reserved for system indices: {}, but in a future major version, direct" + + "this request accesses aliases with names reserved for system indices: {}, but in a future major version, direct " + "access to system indices and their aliases will not be allowed", systemAliases); } + if (netNewSystemAliases.isEmpty() == false) { + throw systemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemAliases); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 528abd93c9832..30d0de86e252b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -26,10 +26,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Objects; import java.util.Set; @@ -72,6 +74,21 @@ protected void masterOperation(Task task, final CreateIndexRequest request, fina final SystemIndexDescriptor mainDescriptor = systemIndices.findMatchingDescriptor(indexName); final boolean isSystemIndex = mainDescriptor != null && mainDescriptor.isAutomaticallyManaged(); + if (mainDescriptor != null && mainDescriptor.isNetNew()) { + final SystemIndexAccessLevel systemIndexAccessLevel = systemIndices.getSystemIndexAccessLevel(threadPool.getThreadContext()); + if (systemIndexAccessLevel != SystemIndexAccessLevel.ALL) { + if (systemIndexAccessLevel == SystemIndexAccessLevel.RESTRICTED) { + if (systemIndices.getProductSystemIndexNamePredicate(threadPool.getThreadContext()).test(indexName) == false) { + throw systemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName)); + } + } else { + // BACKWARDS_COMPATIBLE_ONLY should never be a possibility here, it cannot be returned from getSystemIndexAccessLevel + assert systemIndexAccessLevel == SystemIndexAccessLevel.NONE : + "Expected no system index access but level is " + systemIndexAccessLevel; + throw systemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName)); + } + } + } final CreateIndexClusterStateUpdateRequest updateRequest; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardRequest.java new file mode 100644 index 0000000000000..7f3273eedea4f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardRequest.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; + +final class AnalyzeDiskUsageShardRequest extends BroadcastShardRequest { + final boolean flush; + + AnalyzeDiskUsageShardRequest(ShardId shardId, AnalyzeIndexDiskUsageRequest request) { + super(shardId, request); + this.flush = request.flush; + } + + + AnalyzeDiskUsageShardRequest(StreamInput in) throws IOException { + super(in); + this.flush = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(flush); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return AnalyzeDiskUsageShardRequest.this.getDescription(); + } + }; + } + + @Override + public String getDescription() { + return "Analyze disk usage shard [" + shardId() + "], flush [" + flush + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardResponse.java new file mode 100644 index 0000000000000..331b97830f36b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeDiskUsageShardResponse.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; + +final class AnalyzeDiskUsageShardResponse extends BroadcastShardResponse { + final IndexDiskUsageStats stats; + + AnalyzeDiskUsageShardResponse(StreamInput in) throws IOException { + super(in); + stats = new IndexDiskUsageStats(in); + } + + AnalyzeDiskUsageShardResponse(ShardId shardId, IndexDiskUsageStats stats) { + super(shardId); + this.stats = Objects.requireNonNull(stats, "stats must be non null"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + stats.writeTo(out); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageAction.java new file mode 100644 index 0000000000000..ee6d7b14f1930 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageAction.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.ActionType; + +public class AnalyzeIndexDiskUsageAction extends ActionType { + public static final AnalyzeIndexDiskUsageAction INSTANCE = new AnalyzeIndexDiskUsageAction(); + public static final String NAME = "indices:admin/analyze_disk_usage"; + + public AnalyzeIndexDiskUsageAction() { + super(NAME, AnalyzeIndexDiskUsageResponse::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageRequest.java new file mode 100644 index 0000000000000..08ca99e316bed --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageRequest.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class AnalyzeIndexDiskUsageRequest extends BroadcastRequest { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, false, true); + final boolean flush; + + public AnalyzeIndexDiskUsageRequest(String[] indices, IndicesOptions indicesOptions, boolean flush) { + super(indices, indicesOptions); + this.flush = flush; + } + + public AnalyzeIndexDiskUsageRequest(StreamInput in) throws IOException { + super(in); + this.flush = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(flush); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationError = super.validate(); + if (indices.length == 0) { + validationError = addValidationError("indices must be specified for disk usage request", validationError); + } + return validationError; + } + + @Override + public void setParentTask(String parentTaskNode, long parentTaskId) { + super.setParentTask(parentTaskNode, parentTaskId); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, AnalyzeIndexDiskUsageAction.NAME, type, "", parentTaskId, headers) { + @Override + public String getDescription() { + return AnalyzeIndexDiskUsageRequest.this.getDescription(); + } + }; + } + + @Override + public String getDescription() { + return "analyze disk usage indices [" + String.join(",", indices) + "], flush [" + flush + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageResponse.java new file mode 100644 index 0000000000000..f390da3f9024d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/AnalyzeIndexDiskUsageResponse.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public final class AnalyzeIndexDiskUsageResponse extends BroadcastResponse { + private final Map stats; + + AnalyzeIndexDiskUsageResponse(int totalShards, int successfulShards, int failedShards, + List shardFailures, + Map stats) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.stats = stats; + } + + AnalyzeIndexDiskUsageResponse(StreamInput in) throws IOException { + super(in); + stats = in.readMap(StreamInput::readString, IndexDiskUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(stats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + } + + Map getStats() { + return stats; + } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + final List> entries = stats.entrySet().stream() + .sorted(Map.Entry.comparingByKey()) + .collect(Collectors.toList()); + for (Map.Entry entry : entries) { + builder.startObject(entry.getKey()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java new file mode 100644 index 0000000000000..972459d97a831 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -0,0 +1,738 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PointsReader; +import org.apache.lucene.codecs.StoredFieldsReader; +import org.apache.lucene.codecs.TermVectorsReader; +import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; +import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.TermState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FutureArrays; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.FilterIndexCommit; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.LuceneFilesExtensions; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Analyze the disk usage of each field in the index. + */ + final class IndexDiskUsageAnalyzer { + private final Logger logger; + private final IndexCommit commit; + private final TrackingReadBytesDirectory directory; + private final CancellationChecker cancellationChecker; + + private IndexDiskUsageAnalyzer(ShardId shardId, IndexCommit commit, Runnable checkForCancellation) { + this.logger = Loggers.getLogger(IndexDiskUsageAnalyzer.class, shardId); + this.directory = new TrackingReadBytesDirectory(commit.getDirectory()); + this.commit = new FilterIndexCommit(commit) { + @Override + public Directory getDirectory() { + return directory; + } + }; + this.cancellationChecker = new CancellationChecker(checkForCancellation); + } + + static IndexDiskUsageStats analyze(ShardId shardId, IndexCommit commit, Runnable checkForCancellation) throws IOException { + final IndexDiskUsageAnalyzer analyzer = new IndexDiskUsageAnalyzer(shardId, commit, checkForCancellation); + final IndexDiskUsageStats stats = new IndexDiskUsageStats(getIndexSize(commit)); + analyzer.doAnalyze(stats); + return stats; + } + + void doAnalyze(IndexDiskUsageStats stats) throws IOException { + long startTimeInNanos; + final ExecutionTime executionTime = new ExecutionTime(); + try (DirectoryReader directoryReader = DirectoryReader.open(commit)) { + directory.resetBytesRead(); + for (LeafReaderContext leaf : directoryReader.leaves()) { + cancellationChecker.checkForCancellation(); + final SegmentReader reader = Lucene.segmentReader(leaf.reader()); + + startTimeInNanos = System.nanoTime(); + analyzeInvertedIndex(reader, stats); + executionTime.invertedIndexTimeInNanos += System.nanoTime() - startTimeInNanos; + + startTimeInNanos = System.nanoTime(); + analyzeStoredFields(reader, stats); + executionTime.storedFieldsTimeInNanos += System.nanoTime() - startTimeInNanos; + + startTimeInNanos = System.nanoTime(); + analyzeDocValues(reader, stats); + executionTime.docValuesTimeInNanos += System.nanoTime() - startTimeInNanos; + + startTimeInNanos = System.nanoTime(); + analyzePoints(reader, stats); + executionTime.pointsTimeInNanos += System.nanoTime() - startTimeInNanos; + + startTimeInNanos = System.nanoTime(); + analyzeNorms(reader, stats); + executionTime.normsTimeInNanos += System.nanoTime() - startTimeInNanos; + + startTimeInNanos = System.nanoTime(); + analyzeTermVectors(reader, stats); + executionTime.termVectorsTimeInNanos += System.nanoTime() - startTimeInNanos; + } + } + logger.debug("analyzing the disk usage took {} stats: {}", executionTime, stats); + } + + void analyzeStoredFields(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + final StoredFieldsReader storedFieldsReader = reader.getFieldsReader().getMergeInstance(); + directory.resetBytesRead(); + final TrackingSizeStoredFieldVisitor visitor = new TrackingSizeStoredFieldVisitor(); + int docID = 0; + final int skipMask = 0x1FF; // 511 + while (docID < reader.maxDoc()) { + cancellationChecker.logEvent(); + storedFieldsReader.visitDocument(docID, visitor); + // As we already estimate the size of stored fields, we can trade off the accuracy for the speed of the estimate. + // Here we only visit 1/11 documents instead of all documents. Ideally, we should visit 1 doc then skip 10 docs + // to avoid missing many skew documents. But, documents are stored in chunks in compressed format and a chunk can + // have up to 4096 docs, we need to skip a large number of docs to avoid loading/decompressing some chunks. + if ((docID & skipMask) == skipMask && docID < reader.maxDoc() - 512) { + docID = Math.toIntExact(Math.min(docID + 5120L, reader.maxDoc() - 512L)); // always visit both ends + } else { + docID++; + } + } + if (visitor.fields.isEmpty() == false) { + // Computing the compression ratio for each chunk would provide a better estimate for each field individually. + // But it's okay to do this entire segment because source and _id are the only two stored fields in ES most the cases. + final long totalBytes = visitor.fields.values().stream().mapToLong(v -> v).sum(); + final double ratio = (double) directory.getBytesRead() / (double) totalBytes; + final FieldInfos fieldInfos = reader.getFieldInfos(); + for (Map.Entry field : visitor.fields.entrySet()) { + final String fieldName = fieldInfos.fieldInfo(field.getKey()).name; + final long fieldSize = (long) Math.ceil(field.getValue() * ratio); + stats.addStoredField(fieldName, fieldSize); + } + } + } + + private static class TrackingSizeStoredFieldVisitor extends StoredFieldVisitor { + private final Map fields = new HashMap<>(); + + private void trackField(FieldInfo fieldInfo, int fieldLength) { + final int totalBytes = fieldLength + Long.BYTES; // a Long for bitsAndInfo + fields.compute(fieldInfo.number, (k, v) -> v == null ? totalBytes : v + totalBytes); + } + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + trackField(fieldInfo, Integer.BYTES + value.length); + } + + @Override + public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { + trackField(fieldInfo, Integer.BYTES + value.length); + } + + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + trackField(fieldInfo, Integer.BYTES); + } + + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + trackField(fieldInfo, Long.BYTES); + } + + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + trackField(fieldInfo, Float.BYTES); + } + + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + trackField(fieldInfo, Double.BYTES); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return Status.YES; + } + } + + private DV iterateDocValues(int maxDocs, + CheckedSupplier dvReader, + CheckedConsumer valueAccessor) throws IOException { + // As we track the min/max positions of read bytes, we just visit the first and last values of the docValues iterator. + // Here we use a binary search like to visit the right most index that has values + DV dv = dvReader.get(); + int docID; + if ((docID = dv.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + valueAccessor.accept(dv); + long left = docID; + long right = 2L * (maxDocs - 1L) - left; // starts with the last index + while (left < maxDocs - 1L && left <= right) { + cancellationChecker.logEvent(); + final int mid = Math.toIntExact((left + right) >>> 1); + if ((docID = dv.advance(mid)) != DocIdSetIterator.NO_MORE_DOCS) { + valueAccessor.accept(dv); + left = docID + 1; + } else { + right = mid - 1; + dv = dvReader.get(); + } + } + assert dv.advance(Math.toIntExact(left + 1)) == DocIdSetIterator.NO_MORE_DOCS; + } + return dv; + } + + void analyzeDocValues(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + if (reader.getDocValuesReader() == null) { + return; + } + final DocValuesProducer docValuesReader = reader.getDocValuesReader().getMergeInstance(); + final int maxDocs = reader.maxDoc(); + for (FieldInfo field : reader.getFieldInfos()) { + final DocValuesType dvType = field.getDocValuesType(); + if (dvType == DocValuesType.NONE) { + continue; + } + cancellationChecker.checkForCancellation(); + directory.resetBytesRead(); + switch (dvType) { + case NUMERIC: + iterateDocValues(maxDocs, () -> docValuesReader.getNumeric(field), NumericDocValues::longValue); + break; + case SORTED_NUMERIC: + iterateDocValues(maxDocs, () -> docValuesReader.getSortedNumeric(field), dv -> { + for (int i = 0; i < dv.docValueCount(); i++) { + cancellationChecker.logEvent(); + dv.nextValue(); + } + }); + break; + case BINARY: + iterateDocValues(maxDocs, () -> docValuesReader.getBinary(field), BinaryDocValues::binaryValue); + break; + case SORTED: + SortedDocValues sorted = iterateDocValues(maxDocs, () -> docValuesReader.getSorted(field), SortedDocValues::ordValue); + sorted.lookupOrd(0); + sorted.lookupOrd(sorted.getValueCount() - 1); + break; + case SORTED_SET: + SortedSetDocValues sortedSet = iterateDocValues(maxDocs, () -> docValuesReader.getSortedSet(field), dv -> { + while (dv.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) { + cancellationChecker.logEvent(); + } + }); + sortedSet.lookupOrd(0); + sortedSet.lookupOrd(sortedSet.getValueCount() - 1); + break; + default: + assert false : "Unknown docValues type [" + dvType + "]"; + throw new IllegalStateException("Unknown docValues type [" + dvType + "]"); + } + stats.addDocValues(field.name, directory.getBytesRead()); + } + } + + private void readProximity(Terms terms, PostingsEnum postings) throws IOException { + if (terms.hasPositions()) { + for (int pos = 0; pos < postings.freq(); pos++) { + postings.nextPosition(); + postings.startOffset(); + postings.endOffset(); + postings.getPayload(); + } + } + } + + private BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException { + if (term != null && termsEnum.seekExact(term)) { + final TermState termState = termsEnum.termState(); + if (termState instanceof Lucene84PostingsFormat.IntBlockTermState) { + final Lucene84PostingsFormat.IntBlockTermState blockTermState = (Lucene84PostingsFormat.IntBlockTermState) termState; + return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); + } + if (termState instanceof Lucene50PostingsFormat.IntBlockTermState) { + final Lucene50PostingsFormat.IntBlockTermState blockTermState = (Lucene50PostingsFormat.IntBlockTermState) termState; + return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); + } + } + return null; + } + + private static class BlockTermState { + final long docStartFP; + final long posStartFP; + final long payloadFP; + + BlockTermState(long docStartFP, long posStartFP, long payloadFP) { + this.docStartFP = docStartFP; + this.posStartFP = posStartFP; + this.payloadFP = payloadFP; + } + + long distance(BlockTermState other) { + return this.docStartFP - other.docStartFP + this.posStartFP - other.posStartFP + this.payloadFP - other.payloadFP; + } + } + + void analyzeInvertedIndex(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + FieldsProducer postingsReader = reader.getPostingsReader(); + if (postingsReader == null) { + return; + } + postingsReader = postingsReader.getMergeInstance(); + PostingsEnum postings = null; + for (FieldInfo field : reader.getFieldInfos()) { + if (field.getIndexOptions() == IndexOptions.NONE) { + continue; + } + cancellationChecker.checkForCancellation(); + directory.resetBytesRead(); + final Terms terms = postingsReader.terms(field.name); + if (terms == null) { + continue; + } + // It's expensive to look up every term and visit every document of the postings lists of all terms. + // As we track the min/max positions of read bytes, we just visit the two ends of a partition containing + // the data. We might miss some small parts of the data, but it's an good trade-off to speed up the process. + TermsEnum termsEnum = terms.iterator(); + final BlockTermState minState = getBlockTermState(termsEnum, terms.getMin()); + if (minState != null) { + final BlockTermState maxState = Objects.requireNonNull( + getBlockTermState(termsEnum, terms.getMax()), "can't retrieve the block term state of the max term"); + final long skippedBytes = maxState.distance(minState); + stats.addInvertedIndex(field.name, skippedBytes); + termsEnum.seekExact(terms.getMax()); + postings = termsEnum.postings(postings, PostingsEnum.ALL); + if (postings.advance(termsEnum.docFreq() - 1) != DocIdSetIterator.NO_MORE_DOCS) { + postings.freq(); + readProximity(terms, postings); + } + final long bytesRead = directory.getBytesRead(); + int visitedTerms = 0; + final long totalTerms = terms.size(); + termsEnum = terms.iterator(); + // Iterate until we really access the first terms, but iterate all if the number of terms is small + while (termsEnum.next() != null) { + cancellationChecker.logEvent(); + ++visitedTerms; + if (totalTerms > 1000 && visitedTerms % 50 == 0 && directory.getBytesRead() > bytesRead) { + break; + } + } + } else { + // We aren't sure if the optimization can be applied for other implementations rather than the BlockTree + // based implementation. Hence, we just traverse every postings of all terms in this case. + while (termsEnum.next() != null) { + cancellationChecker.logEvent(); + termsEnum.docFreq(); + termsEnum.totalTermFreq(); + postings = termsEnum.postings(postings, PostingsEnum.ALL); + while (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + cancellationChecker.logEvent(); + postings.freq(); + readProximity(terms, postings); + } + } + } + stats.addInvertedIndex(field.name, directory.getBytesRead()); + } + } + + void analyzePoints(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + PointsReader pointsReader = reader.getPointsReader(); + if (pointsReader == null) { + return; + } + pointsReader = pointsReader.getMergeInstance(); + for (FieldInfo field : reader.getFieldInfos()) { + cancellationChecker.checkForCancellation(); + directory.resetBytesRead(); + if (field.getPointDimensionCount() > 0) { + final PointValues values = pointsReader.getValues(field.name); + values.intersect(new PointsVisitor(values.getMinPackedValue(), values.getNumDimensions(), values.getBytesPerDimension())); + values.intersect(new PointsVisitor(values.getMaxPackedValue(), values.getNumDimensions(), values.getBytesPerDimension())); + stats.addPoints(field.name, directory.getBytesRead()); + } + } + } + + private class PointsVisitor implements PointValues.IntersectVisitor { + private final byte[] point; + private final int numDims; + private final int bytesPerDim; + + PointsVisitor(byte[] point, int numDims, int bytesPerDim) { + this.point = point; + this.numDims = numDims; + this.bytesPerDim = bytesPerDim; + } + + @Override + public void visit(int docID) throws IOException { + cancellationChecker.logEvent(); + } + + @Override + public void visit(int docID, byte[] packedValue) throws IOException { + cancellationChecker.logEvent(); + } + + @Override + public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { + for (int dim = 0; dim < numDims; dim++) { + int offset = dim * bytesPerDim; + if (FutureArrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) > 0 || + FutureArrays.compareUnsigned(maxPackedValue, offset, offset + bytesPerDim, point, offset, offset + bytesPerDim) < 0) { + return PointValues.Relation.CELL_OUTSIDE_QUERY; + } + } + return PointValues.Relation.CELL_CROSSES_QUERY; + } + } + + void analyzeNorms(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + if (reader.getNormsReader() == null) { + return; + } + final NormsProducer normsReader = reader.getNormsReader().getMergeInstance(); + for (FieldInfo field : reader.getFieldInfos()) { + if (field.hasNorms()) { + cancellationChecker.checkForCancellation(); + directory.resetBytesRead(); + iterateDocValues(reader.maxDoc(), () -> normsReader.getNorms(field), norms -> { + cancellationChecker.logEvent(); + norms.longValue(); + }); + stats.addNorms(field.name, directory.getBytesRead()); + } + } + } + + void analyzeTermVectors(SegmentReader reader, IndexDiskUsageStats stats) throws IOException { + TermVectorsReader termVectorsReader = reader.getTermVectorsReader(); + if (termVectorsReader == null) { + return; + } + termVectorsReader = termVectorsReader.getMergeInstance(); + directory.resetBytesRead(); + final TermVectorsVisitor visitor = new TermVectorsVisitor(); + // TODO: Traverse 10-20% documents + for (int docID = 0; docID < reader.numDocs(); docID++) { + cancellationChecker.logEvent(); + final Fields vectors = termVectorsReader.get(docID); + if (vectors != null) { + for (String field : vectors) { + cancellationChecker.logEvent(); + visitor.visitField(vectors, field); + } + } + } + if (visitor.fields.isEmpty() == false) { + final long totalBytes = visitor.fields.values().stream().mapToLong(v -> v).sum(); + final double ratio = (double) (directory.getBytesRead()) / (double) (totalBytes); + for (Map.Entry field : visitor.fields.entrySet()) { + final long fieldBytes = (long) Math.ceil(field.getValue() * ratio); + stats.addTermVectors(field.getKey(), fieldBytes); + } + } + } + + private class TermVectorsVisitor { + final Map fields = new HashMap<>(); + private PostingsEnum docsAndPositions; // to reuse + + void visitField(Fields vectors, String fieldName) throws IOException { + final Terms terms = vectors.terms(fieldName); + if (terms == null) { + return; + } + final boolean hasPositions = terms.hasPositions(); + final boolean hasOffsets = terms.hasOffsets(); + final boolean hasPayloads = terms.hasPayloads(); + assert hasPayloads == false || hasPositions; + long fieldLength = 1; // flags + final TermsEnum termsEnum = terms.iterator(); + BytesRef bytesRef; + while ((bytesRef = termsEnum.next()) != null) { + cancellationChecker.logEvent(); + fieldLength += Integer.BYTES + bytesRef.length; // term + final int freq = (int) termsEnum.totalTermFreq(); + fieldLength += Integer.BYTES; // freq + if (hasPositions || hasOffsets) { + docsAndPositions = termsEnum.postings(docsAndPositions, PostingsEnum.OFFSETS | PostingsEnum.PAYLOADS); + assert docsAndPositions != null; + while (docsAndPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + cancellationChecker.logEvent(); + assert docsAndPositions.freq() == freq; + for (int posUpTo = 0; posUpTo < freq; posUpTo++) { + final int pos = docsAndPositions.nextPosition(); + fieldLength += Integer.BYTES; // position + docsAndPositions.startOffset(); + fieldLength += Integer.BYTES; // start offset + docsAndPositions.endOffset(); + fieldLength += Integer.BYTES; // end offset + final BytesRef payload = docsAndPositions.getPayload(); + if (payload != null) { + fieldLength += Integer.BYTES + payload.length; // payload + } + assert hasPositions == false || pos >= 0; + } + } + } + } + final long finalLength = fieldLength; + fields.compute(fieldName, (k, v) -> v == null ? finalLength : v + finalLength); + } + } + + private static class TrackingReadBytesDirectory extends FilterDirectory { + private final Map trackers = new HashMap<>(); + + TrackingReadBytesDirectory(Directory in) { + super(in); + } + + long getBytesRead() { + return trackers.values().stream().mapToLong(BytesReadTracker::getBytesRead).sum(); + } + + void resetBytesRead() { + trackers.values().forEach(BytesReadTracker::resetBytesRead); + } + + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + IndexInput in = super.openInput(name, context); + try { + final BytesReadTracker tracker = trackers.computeIfAbsent(name, k -> { + if (LuceneFilesExtensions.fromFile(name) == LuceneFilesExtensions.CFS) { + return new CompoundFileBytesReaderTracker(); + } else { + return new BytesReadTracker(); + } + }); + final TrackingReadBytesIndexInput wrapped = new TrackingReadBytesIndexInput(in, 0L, tracker); + in = null; + return wrapped; + } finally { + IOUtils.close(in); + } + } + } + + private static class TrackingReadBytesIndexInput extends IndexInput { + final IndexInput in; + final BytesReadTracker bytesReadTracker; + final long fileOffset; + + TrackingReadBytesIndexInput(IndexInput in, long fileOffset, BytesReadTracker bytesReadTracker) { + super(in.toString()); + this.in = in; + this.fileOffset = fileOffset; + this.bytesReadTracker = bytesReadTracker; + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public long getFilePointer() { + return in.getFilePointer(); + } + + @Override + public void seek(long pos) throws IOException { + in.seek(pos); + } + + @Override + public long length() { + return in.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + final IndexInput slice = in.slice(sliceDescription, offset, length); + return new TrackingReadBytesIndexInput(slice, fileOffset + offset, bytesReadTracker.createSliceTracker(offset)); + } + + @Override + public IndexInput clone() { + return new TrackingReadBytesIndexInput(in.clone(), fileOffset, bytesReadTracker); + } + + @Override + public byte readByte() throws IOException { + bytesReadTracker.trackPositions(fileOffset + getFilePointer(), 1); + return in.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + bytesReadTracker.trackPositions(fileOffset + getFilePointer(), len); + in.readBytes(b, offset, len); + } + } + + /** + * Lucene Codec organizes data field by field for doc values, points, postings, and norms; and document by document + * for stored fields and term vectors. BytesReadTracker then can simply track the min and max read positions. + * This would allow us to traverse only two ends of each partition. + */ + private static class BytesReadTracker { + private long minPosition = Long.MAX_VALUE; + private long maxPosition = Long.MIN_VALUE; + + BytesReadTracker createSliceTracker(long offset) { + return this; + } + + void trackPositions(long position, int length) { + minPosition = Math.min(minPosition, position); + maxPosition = Math.max(maxPosition, position + length - 1); + } + + + void resetBytesRead() { + minPosition = Long.MAX_VALUE; + maxPosition = Long.MIN_VALUE; + } + + long getBytesRead() { + if (minPosition <= maxPosition) { + return maxPosition - minPosition + 1; + } else { + return 0L; + } + } + } + + private static class CompoundFileBytesReaderTracker extends BytesReadTracker { + private final Map slicedTrackers = new HashMap<>(); + + @Override + BytesReadTracker createSliceTracker(long offset) { + return slicedTrackers.computeIfAbsent(offset, k -> new BytesReadTracker()); + } + + @Override + void trackPositions(long position, int length) { + // already tracked by a child tracker except for the header and footer, but we can ignore them. + } + + @Override + void resetBytesRead() { + slicedTrackers.values().forEach(BytesReadTracker::resetBytesRead); + } + + @Override + long getBytesRead() { + return slicedTrackers.values().stream().mapToLong(BytesReadTracker::getBytesRead).sum(); + } + } + + static long getIndexSize(IndexCommit commit) throws IOException { + long total = 0; + for (String file : commit.getFileNames()) { + total += commit.getDirectory().fileLength(file); + } + return total; + } + + /** + * Periodically checks if the task was cancelled so the analyzing process can abort quickly. + */ + private static class CancellationChecker { + static final long THRESHOLD = 10_000; + private long iterations; + private final Runnable checkForCancellationRunner; + + CancellationChecker(Runnable checkForCancellationRunner) { + this.checkForCancellationRunner = checkForCancellationRunner; + } + + void logEvent() { + if (iterations == THRESHOLD) { + checkForCancellation(); + } else { + iterations++; + } + } + + void checkForCancellation() { + iterations = 0; + checkForCancellationRunner.run(); + } + } + + private static class ExecutionTime { + long invertedIndexTimeInNanos; + long storedFieldsTimeInNanos; + long docValuesTimeInNanos; + long pointsTimeInNanos; + long normsTimeInNanos; + long termVectorsTimeInNanos; + + long totalInNanos() { + return invertedIndexTimeInNanos + storedFieldsTimeInNanos + docValuesTimeInNanos + + pointsTimeInNanos + normsTimeInNanos + termVectorsTimeInNanos; + } + + @Override + public String toString() { + return "total: " + totalInNanos() / 1000_000 + "ms" + + ", inverted index: " + invertedIndexTimeInNanos / 1000_000 + "ms" + + ", stored fields: " + storedFieldsTimeInNanos / 1000_000 + "ms" + + ", doc values: " + docValuesTimeInNanos / 1000_000 + "ms" + + ", points: " + pointsTimeInNanos / 1000_000 + "ms" + + ", norms: " + normsTimeInNanos / 1000_000 + "ms" + + ", term vectors: " + termVectorsTimeInNanos / 1000_000 + "ms"; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java new file mode 100644 index 0000000000000..e6abb6359084d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * The result of analyzing disk usage of each field in a shard/index + */ +public final class IndexDiskUsageStats implements ToXContentFragment, Writeable { + public static final String TOTAL = "total"; + public static final String TOTAL_IN_BYTES = "total_in_bytes"; + public static final String INVERTED_INDEX = "inverted_index"; + public static final String STORED_FIELDS = "stored_fields"; + public static final String STORED_FIELDS_IN_BYTES = "stored_fields_in_bytes"; + public static final String DOC_VALUES = "doc_values"; + public static final String DOC_VALUES_IN_BYTES = "doc_values_in_bytes"; + public static final String POINTS = "points"; + public static final String POINTS_IN_BYTES = "points_in_bytes"; + public static final String NORMS = "norms"; + public static final String NORMS_IN_BYTES = "norms_in_bytes"; + public static final String TERM_VECTORS = "term_vectors"; + public static final String TERM_VECTORS_IN_BYTES = "term_vectors_in_bytes"; + + public static final String STORE_SIZE = "store_size"; + public static final String STORE_SIZE_IN_BYTES = "store_size_in_bytes"; + + private final Map fields; + private long indexSizeInBytes; + + public IndexDiskUsageStats(long indexSizeInBytes) { + fields = new HashMap<>(); + this.indexSizeInBytes = indexSizeInBytes; + } + + public IndexDiskUsageStats(StreamInput in) throws IOException { + this.fields = in.readMap(StreamInput::readString, PerFieldDiskUsage::new); + this.indexSizeInBytes = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(fields, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeVLong(indexSizeInBytes); + } + + PerFieldDiskUsage total() { + final PerFieldDiskUsage total = new PerFieldDiskUsage(); + for (PerFieldDiskUsage value : fields.values()) { + total.add(value); + } + return total; + } + + Map getFields() { + return fields; + } + + long getIndexSizeInBytes() { + return indexSizeInBytes; + } + + private void checkByteSize(long bytes) { + if (bytes < 0) { + throw new IllegalArgumentException("Bytes must be non-negative; got " + bytes); + } + } + + private PerFieldDiskUsage getOrAdd(String fieldName) { + Objects.requireNonNull(fieldName, "fieldName must be non-null"); + return fields.computeIfAbsent(fieldName, k -> new PerFieldDiskUsage()); + } + + public void addInvertedIndex(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).invertedIndexBytes += bytes; + } + + public void addStoredField(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).storedFieldBytes += bytes; + } + + public void addDocValues(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).docValuesBytes += bytes; + } + + public void addPoints(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).pointsBytes += bytes; + } + + public void addNorms(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).normsBytes += bytes; + } + + public void addTermVectors(String fieldName, long bytes) { + checkByteSize(bytes); + getOrAdd(fieldName).termVectorsBytes += bytes; + } + + public IndexDiskUsageStats add(IndexDiskUsageStats other) { + other.fields.forEach((k, v) -> getOrAdd(k).add(v)); + this.indexSizeInBytes += other.indexSizeInBytes; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + final PerFieldDiskUsage total = total(); + builder.field(STORE_SIZE, new ByteSizeValue(indexSizeInBytes)); + builder.field(STORE_SIZE_IN_BYTES, indexSizeInBytes); + + // all fields + builder.startObject("all_fields"); + total.toXContent(builder, params); + builder.endObject(); + + // per field + builder.startObject("fields"); + { + final List> entries = fields.entrySet().stream() + .sorted(Map.Entry.comparingByKey()).collect(Collectors.toList()); + for (Map.Entry entry : entries) { + builder.startObject(entry.getKey()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + /** + * Disk usage stats for a single field + */ + public static final class PerFieldDiskUsage implements ToXContentFragment, Writeable { + private long invertedIndexBytes; + private long storedFieldBytes; + private long docValuesBytes; + private long pointsBytes; + private long normsBytes; + private long termVectorsBytes; + + private PerFieldDiskUsage() { + + } + + private PerFieldDiskUsage(StreamInput in) throws IOException { + invertedIndexBytes = in.readVLong(); + storedFieldBytes = in.readVLong(); + docValuesBytes = in.readVLong(); + pointsBytes = in.readVLong(); + normsBytes = in.readVLong(); + termVectorsBytes = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(invertedIndexBytes); + out.writeVLong(storedFieldBytes); + out.writeVLong(docValuesBytes); + out.writeVLong(pointsBytes); + out.writeVLong(normsBytes); + out.writeVLong(termVectorsBytes); + } + + private void add(PerFieldDiskUsage other) { + invertedIndexBytes += other.invertedIndexBytes; + storedFieldBytes += other.storedFieldBytes; + docValuesBytes += other.docValuesBytes; + pointsBytes += other.pointsBytes; + normsBytes += other.normsBytes; + termVectorsBytes += other.termVectorsBytes; + } + + public long getInvertedIndexBytes() { + return invertedIndexBytes; + } + + public long getStoredFieldBytes() { + return storedFieldBytes; + } + + public long getDocValuesBytes() { + return docValuesBytes; + } + + public long getPointsBytes() { + return pointsBytes; + } + + public long getNormsBytes() { + return normsBytes; + } + + public long getTermVectorsBytes() { + return termVectorsBytes; + } + + + long totalBytes() { + return invertedIndexBytes + storedFieldBytes + docValuesBytes + pointsBytes + normsBytes + termVectorsBytes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + final long totalBytes = totalBytes(); + builder.field(TOTAL, new ByteSizeValue(totalBytes)); + builder.field(TOTAL_IN_BYTES, totalBytes); + + builder.startObject(INVERTED_INDEX); + builder.field(TOTAL, new ByteSizeValue(invertedIndexBytes)); + builder.field(TOTAL_IN_BYTES, invertedIndexBytes); + builder.endObject(); + + builder.field(STORED_FIELDS, new ByteSizeValue(storedFieldBytes)); + builder.field(STORED_FIELDS_IN_BYTES, storedFieldBytes); + + builder.field(DOC_VALUES, new ByteSizeValue(docValuesBytes)); + builder.field(DOC_VALUES_IN_BYTES, docValuesBytes); + + builder.field(POINTS, new ByteSizeValue(pointsBytes)); + builder.field(POINTS_IN_BYTES, pointsBytes); + + builder.field(NORMS, new ByteSizeValue(normsBytes)); + builder.field(NORMS_IN_BYTES, normsBytes); + + builder.field(TERM_VECTORS, new ByteSizeValue(termVectorsBytes)); + builder.field(TERM_VECTORS_IN_BYTES, termVectorsBytes); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java new file mode 100644 index 0000000000000..a64e8c9d3061d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.diskusage; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportAnalyzeIndexDiskUsageAction extends TransportBroadcastAction< + AnalyzeIndexDiskUsageRequest, AnalyzeIndexDiskUsageResponse, + AnalyzeDiskUsageShardRequest, AnalyzeDiskUsageShardResponse> { + private final IndicesService indicesService; + + @Inject + public TransportAnalyzeIndexDiskUsageAction(ClusterService clusterService, + TransportService transportService, + IndicesService indexServices, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(AnalyzeIndexDiskUsageAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, + AnalyzeIndexDiskUsageRequest::new, AnalyzeDiskUsageShardRequest::new, ThreadPool.Names.ANALYZE); + this.indicesService = indexServices; + } + + @Override + protected void doExecute(Task task, AnalyzeIndexDiskUsageRequest request, ActionListener listener) { + super.doExecute(task, request, listener); + } + + @Override + protected AnalyzeDiskUsageShardRequest newShardRequest(int numShards, ShardRouting shard, AnalyzeIndexDiskUsageRequest request) { + return new AnalyzeDiskUsageShardRequest(shard.shardId(), request); + } + + @Override + protected AnalyzeDiskUsageShardResponse readShardResponse(StreamInput in) throws IOException { + return new AnalyzeDiskUsageShardResponse(in); + } + + @Override + protected AnalyzeDiskUsageShardResponse shardOperation(AnalyzeDiskUsageShardRequest request, Task task) throws IOException { + final ShardId shardId = request.shardId(); + assert task instanceof CancellableTask : "AnalyzeDiskUsageShardRequest must create a cancellable task"; + final CancellableTask cancellableTask = (CancellableTask) task; + final Runnable checkForCancellation = cancellableTask::ensureNotCancelled; + final IndexShard shard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); + try (Engine.IndexCommitRef commitRef = shard.acquireLastIndexCommit(request.flush)) { + final IndexDiskUsageStats stats = IndexDiskUsageAnalyzer.analyze(shardId, commitRef.getIndexCommit(), checkForCancellation); + return new AnalyzeDiskUsageShardResponse(shardId, stats); + } + } + + @Override + protected AnalyzeIndexDiskUsageResponse newResponse(AnalyzeIndexDiskUsageRequest request, + AtomicReferenceArray shardsResponses, + ClusterState clusterState) { + int successfulShards = 0; + final List shardFailures = new ArrayList<>(); + final Map combined = new HashMap<>(); + for (int i = 0; i < shardsResponses.length(); i++) { + final Object r = shardsResponses.get(i); + if (r instanceof AnalyzeDiskUsageShardResponse) { + ++successfulShards; + AnalyzeDiskUsageShardResponse resp = (AnalyzeDiskUsageShardResponse) r; + combined.compute(resp.getIndex(), (k, v) -> v == null ? resp.stats : v.add(resp.stats)); + } else if (r instanceof DefaultShardOperationFailedException) { + shardFailures.add((DefaultShardOperationFailedException) r); + } else { + assert false : "unknown response [" + r + "]"; + throw new IllegalStateException("unknown response [" + r + "]"); + } + } + return new AnalyzeIndexDiskUsageResponse( + shardsResponses.length(), + successfulShards, + shardFailures.size(), + shardFailures, + combined); + } + + @Override + protected GroupShardsIterator shards(ClusterState clusterState, + AnalyzeIndexDiskUsageRequest request, + String[] concreteIndices) { + final GroupShardsIterator groups = clusterService + .operationRouting() + .searchShards(clusterState, concreteIndices, null, null); + for (ShardIterator group : groups) { + // fails fast if any non-active groups + if (group.size() == 0) { + throw new NoShardAvailableActionException(group.shardId()); + } + } + return groups; + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, AnalyzeIndexDiskUsageRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, AnalyzeIndexDiskUsageRequest request, + String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 80e50e8642c94..c8c76192b281f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -70,13 +70,13 @@ protected ForceMergeRequest readRequestFrom(StreamInput in) throws IOException { @Override protected void shardOperation(ForceMergeRequest request, ShardRouting shardRouting, Task task, ActionListener listener) { - threadPool.executor(ThreadPool.Names.FORCE_MERGE).execute(ActionRunnable.run(listener, + assert (task instanceof CancellableTask) == false; // TODO: add cancellation handling here once the task supports it + threadPool.executor(ThreadPool.Names.FORCE_MERGE).execute(ActionRunnable.supply(listener, () -> { - assert (task instanceof CancellableTask) == false; // TODO: add cancellation handling here once the task supports it IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()) .getShard(shardRouting.shardId().id()); indexShard.forceMerge(request); - listener.onResponse(EmptyResult.INSTANCE); + return EmptyResult.INSTANCE; })); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 9ab979a489a0e..1efd025ade564 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -59,7 +59,7 @@ public PutMappingRequestBuilder setSource(XContentBuilder mappingBuilder) { /** * The mapping source definition. */ - public PutMappingRequestBuilder setSource(Map mappingSource) { + public PutMappingRequestBuilder setSource(Map mappingSource) { request.source(mappingSource); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 5ce084c09590a..66470343b4986 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -10,14 +10,15 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import java.io.IOException; @@ -66,7 +67,7 @@ public String toString() { return Strings.toString(this); } - public static class AddBlockResult implements Writeable, ToXContentFragment { + public static class AddBlockResult implements Writeable, ToXContentObject { private final Index index; private final @Nullable Exception exception; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index e572f2d3ef3d1..7e04b78dbbc1f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -127,6 +127,11 @@ public IndicesRequest indices(String... indices) { return this; } + @Override + public boolean allowsRemoteIndices() { + return true; + } + @Override public boolean includeDataStreams() { return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 1563b2996a2f9..1b97df88a28e2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -72,7 +72,9 @@ public class RolloverRequest extends AcknowledgedRequest implem if (includeTypeName) { //expecting one type only for (Map.Entry mappingsEntry : parser.map().entrySet()) { - request.createIndexRequest.mapping((Map) mappingsEntry.getValue()); + @SuppressWarnings("unchecked") + final Map value = (Map) mappingsEntry.getValue(); + request.createIndexRequest.mapping(value); } } else { // a type is not included, add a dummy _doc type diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 8f6b8fff2ea64..17cfd38e93f7d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -113,7 +113,7 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.field(Fields.NUM_DOCS, segment.getNumDocs()); builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize()); - builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes())); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(0)); builder.field(Fields.COMMITTED, segment.isCommitted()); builder.field(Fields.SEARCH, segment.isSearch()); if (segment.getVersion() != null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 3561ac39b5400..f5270ec448f49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -10,8 +10,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; -import org.elasticsearch.index.bulk.stats.BulkStats; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -19,6 +17,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.bulk.stats.BulkStats; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.engine.SegmentsStats; @@ -32,6 +32,7 @@ import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.index.shard.ShardCountStats; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.warmer.WarmerStats; @@ -96,6 +97,9 @@ public class CommonStats implements Writeable, ToXContentFragment { @Nullable public BulkStats bulk; + @Nullable + public ShardCountStats shards; + public CommonStats() { this(CommonStatsFlags.NONE); } @@ -156,6 +160,9 @@ public CommonStats(CommonStatsFlags flags) { case Bulk: bulk = new BulkStats(); break; + case Shards: + shards = new ShardCountStats(); + break; default: throw new IllegalStateException("Unknown Flag: " + flag); } @@ -218,6 +225,10 @@ public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, C case Bulk: bulk = indexShard.bulkStats(); break; + case Shards: + // Setting to 1 because the single IndexShard passed to this method implies 1 shard + shards = new ShardCountStats(1); + break; default: throw new IllegalStateException("Unknown Flag: " + flag); } @@ -247,6 +258,9 @@ public CommonStats(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_8_0_0)) { bulk = in.readOptionalWriteable(BulkStats::new); } + if (in.getVersion().onOrAfter(Version.V_7_15_0)) { + shards = in.readOptionalWriteable(ShardCountStats::new); + } } @Override @@ -270,6 +284,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeOptionalWriteable(bulk); } + if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + out.writeOptionalWriteable(shards); + } } public void add(CommonStats stats) { @@ -410,6 +427,14 @@ public void add(CommonStats stats) { } else { bulk.add(stats.getBulk()); } + if (stats.shards != null) { + if (shards == null) { + shards = stats.shards; + } + else { + shards = shards.add(stats.shards); + } + } } @Nullable @@ -497,9 +522,14 @@ public BulkStats getBulk() { return bulk; } + @Nullable + public ShardCountStats getShards() { + return shards; + } + /** * Utility method which computes total memory by adding - * FieldData, PercolatorCache, Segments (memory, index writer, version map) + * FieldData, PercolatorCache, Segments (index writer, version map) */ public ByteSizeValue getTotalMemory() { long size = 0; @@ -510,8 +540,7 @@ public ByteSizeValue getTotalMemory() { size += this.getQueryCache().getMemorySizeInBytes(); } if (this.getSegments() != null) { - size += this.getSegments().getMemoryInBytes() + - this.getSegments().getIndexWriterMemoryInBytes() + + size += this.getSegments().getIndexWriterMemoryInBytes() + this.getSegments().getVersionMapMemoryInBytes(); } @@ -522,7 +551,7 @@ public ByteSizeValue getTotalMemory() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { final Stream stream = Arrays.stream(new ToXContent[] { - docs, store, indexing, get, search, merge, refresh, flush, warmer, queryCache, + docs, shards, store, indexing, get, search, merge, refresh, flush, warmer, queryCache, fieldData, completion, segments, translog, requestCache, recoveryStats, bulk}) .filter(Objects::nonNull); for (ToXContent toXContent : ((Iterable)stream::iterator)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 70e677cb6e055..dd89fa3323d02 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -217,7 +217,8 @@ public enum Flag { // 14 was previously used for Suggest RequestCache("request_cache", 15), Recovery("recovery", 16), - Bulk("bulk", 17); + Bulk("bulk", 17), + Shards("shards", 18); private final String restName; private final int index; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java new file mode 100644 index 0000000000000..4f17d065e62a4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +public class FieldUsageShardRequest extends BroadcastShardRequest { + + private final String[] fields; + + FieldUsageShardRequest(ShardId shardId, FieldUsageStatsRequest request) { + super(shardId, request); + this.fields = request.fields(); + } + + + FieldUsageShardRequest(StreamInput in) throws IOException { + super(in); + this.fields = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(fields); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return FieldUsageShardRequest.this.getDescription(); + } + }; + } + + @Override + public String getDescription() { + return "get field usage for shard: [" + shardId() + "], fields: " + Arrays.toString(fields); + } + + public String[] fields() { + return fields; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java new file mode 100644 index 0000000000000..b33b4ab369c8a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.search.stats.FieldUsageStats; + +import java.io.IOException; +import java.util.Objects; + +public class FieldUsageShardResponse implements Writeable, ToXContentObject { + + final String trackingId; + final ShardRouting shardRouting; + final long trackingStartTime; + final FieldUsageStats stats; + + FieldUsageShardResponse(StreamInput in) throws IOException { + trackingId = in.readString(); + shardRouting = new ShardRouting(in); + trackingStartTime = in.readVLong(); + stats = new FieldUsageStats(in); + } + + FieldUsageShardResponse(String trackingId, ShardRouting shardRouting, long trackingStartTime, FieldUsageStats stats) { + this.trackingId = Objects.requireNonNull(trackingId, "trackingId must be non null"); + this.shardRouting = Objects.requireNonNull(shardRouting, "routing must be non null"); + this.trackingStartTime = trackingStartTime; + this.stats = Objects.requireNonNull(stats, "stats must be non null"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(trackingId); + shardRouting.writeTo(out); + out.writeVLong(trackingStartTime); + stats.writeTo(out); + } + + public String getTrackingId() { + return trackingId; + } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public long getTrackingStartTime() { + return trackingStartTime; + } + + public FieldUsageStats getStats() { + return stats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.TRACKING_ID, trackingId); + builder.timeField(Fields.TRACKING_STARTED_AT_MILLIS, Fields.TRACKING_STARTED_AT, trackingStartTime); + builder.startObject(Fields.ROUTING) + .field(Fields.STATE, shardRouting.state()) + .field(Fields.PRIMARY, shardRouting.primary()) + .field(Fields.NODE, shardRouting.currentNodeId()) + .field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId()) + .endObject(); + builder.field(Fields.STATS, stats, params); + builder.endObject(); + return builder; + } + + static final class Fields { + static final String TRACKING_ID = "tracking_id"; + static final String TRACKING_STARTED_AT_MILLIS = "tracking_started_at_millis"; + static final String TRACKING_STARTED_AT = "tracking_started_at"; + static final String STATS = "stats"; + static final String ROUTING = "routing"; + static final String STATE = "state"; + static final String PRIMARY = "primary"; + static final String NODE = "node"; + static final String RELOCATING_NODE = "relocating_node"; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java new file mode 100644 index 0000000000000..825b66f63d812 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.action.ActionType; + +public class FieldUsageStatsAction extends ActionType { + + public static final FieldUsageStatsAction INSTANCE = new FieldUsageStatsAction(); + public static final String NAME = "indices:monitor/field_usage_stats"; + + private FieldUsageStatsAction() { + super(NAME, FieldUsageStatsResponse::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java new file mode 100644 index 0000000000000..795241fe59b21 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +public class FieldUsageStatsRequest extends BroadcastRequest { + + private String[] fields = Strings.EMPTY_ARRAY; + + public FieldUsageStatsRequest(String... indices) { + super(indices); + } + + public FieldUsageStatsRequest(String[] indices, IndicesOptions indicesOptions) { + super(indices, indicesOptions); + } + + public FieldUsageStatsRequest(StreamInput in) throws IOException { + super(in); + fields = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(fields); + } + + public FieldUsageStatsRequest fields(String... fields) { + this.fields = fields; + return this; + } + + public String[] fields() { + return this.fields; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, FieldUsageStatsAction.NAME, type, "", parentTaskId, headers) { + @Override + public String getDescription() { + return FieldUsageStatsRequest.this.getDescription(); + } + }; + } + + @Override + public String getDescription() { + return "get field usage for indices [" + String.join(",", indices) + "], fields " + Arrays.toString(fields); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java new file mode 100644 index 0000000000000..730598e347d4c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class FieldUsageStatsResponse extends BroadcastResponse { + private final Map> stats; + + FieldUsageStatsResponse(int totalShards, int successfulShards, int failedShards, + List shardFailures, + Map> stats) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.stats = stats; + } + + FieldUsageStatsResponse(StreamInput in) throws IOException { + super(in); + stats = in.readMap(StreamInput::readString, i -> i.readList(FieldUsageShardResponse::new)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(stats, StreamOutput::writeString, StreamOutput::writeList); + } + + public Map> getStats() { + return stats; + } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + final List>> sortedEntries = + stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).collect(Collectors.toList()); + for (Map.Entry> entry : sortedEntries) { + builder.startObject(entry.getKey()); + builder.startArray("shards"); + for (FieldUsageShardResponse resp : entry.getValue()) { + resp.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java new file mode 100644 index 0000000000000..d15ea4a95577e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TransportFieldUsageAction extends TransportBroadcastByNodeAction { + + private final IndicesService indicesService; + + @Inject + public TransportFieldUsageAction(ClusterService clusterService, + TransportService transportService, + IndicesService indexServices, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(FieldUsageStatsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, + FieldUsageStatsRequest::new, ThreadPool.Names.SAME); + this.indicesService = indexServices; + } + + @Override + protected FieldUsageShardResponse readShardResult(StreamInput in) throws IOException { + return new FieldUsageShardResponse(in); + } + + @Override + protected FieldUsageStatsResponse newResponse(FieldUsageStatsRequest request, int totalShards, int successfulShards, int failedShards, + List fieldUsages, + List shardFailures, + ClusterState clusterState) { + final Map> combined = new HashMap<>(); + for (FieldUsageShardResponse response : fieldUsages) { + combined.computeIfAbsent(response.shardRouting.shardId().getIndexName(), i -> new ArrayList<>()).add(response); + } + return new FieldUsageStatsResponse( + totalShards, + successfulShards, + shardFailures.size(), + shardFailures, + combined); + } + + @Override + protected FieldUsageStatsRequest readRequestFrom(StreamInput in) throws IOException { + return new FieldUsageStatsRequest(in); + } + + @Override + protected void shardOperation(FieldUsageStatsRequest request, ShardRouting shardRouting, Task task, + ActionListener listener) { + ActionListener.completeWith(listener, () -> { + final ShardId shardId = shardRouting.shardId(); + final IndexShard shard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); + return new FieldUsageShardResponse(shard.getShardUuid(), shardRouting, + shard.getShardCreationTime(), shard.fieldUsageStats(request.fields())); + }); + } + + @Override + protected ShardsIterator shards(ClusterState clusterState, FieldUsageStatsRequest request, String[] concreteIndices) { + return clusterState.routingTable().allActiveShards(concreteIndices); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, FieldUsageStatsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, FieldUsageStatsRequest request, + String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 225d71ca570c8..0f54322a25b8c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; @@ -118,7 +119,7 @@ protected ShardValidateQueryResponse readShardResponse(StreamInput in) throws IO } @Override - protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) { + protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) { final String routing; if (request.allShards()) { routing = null; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index eda5658f696da..c922f593fd12b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -77,9 +77,8 @@ void setPrimaryResponse(BulkItemResponse primaryResponse) { */ public void abort(String index, Exception cause) { if (primaryResponse == null) { - final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), - Objects.requireNonNull(cause), true); - setPrimaryResponse(new BulkItemResponse(id, request.opType(), failure)); + final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), Objects.requireNonNull(cause), true); + setPrimaryResponse(BulkItemResponse.failure(id, request.opType(), failure)); } else { assert primaryResponse.isFailed() && primaryResponse.getFailure().isAborted() : "response [" + Strings.toString(primaryResponse) + "]; cause [" + cause + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index e6c1c7d61fe76..b109514f70e76 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -147,9 +147,9 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw BulkItemResponse bulkItemResponse; if (exception != null) { Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getId(), exception, status); - bulkItemResponse = new BulkItemResponse(id, opType, failure); + bulkItemResponse = BulkItemResponse.failure(id, opType, failure); } else { - bulkItemResponse = new BulkItemResponse(id, opType, builder.build()); + bulkItemResponse = BulkItemResponse.success(id, opType, builder.build()); } return bulkItemResponse; } @@ -319,7 +319,7 @@ public boolean isAborted() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(INDEX_FIELD, index); if (builder.getRestApiVersion() == RestApiVersion.V_7) { - builder.field(MapperService.TYPE_FIELD_NAME, MapperService.SINGLE_MAPPING_NAME); + builder.field("type", MapperService.SINGLE_MAPPING_NAME); } if (id != null) { builder.field(ID_FIELD, id); @@ -341,66 +341,48 @@ public String toString() { } } - private int id; - - private OpType opType; + private final int id; - private DocWriteResponse response; + private final OpType opType; - private Failure failure; + private final DocWriteResponse response; - BulkItemResponse() {} + private final Failure failure; BulkItemResponse(ShardId shardId, StreamInput in) throws IOException { id = in.readVInt(); opType = OpType.fromId(in.readByte()); - - byte type = in.readByte(); - if (type == 0) { - response = new IndexResponse(shardId, in); - } else if (type == 1) { - response = new DeleteResponse(shardId, in); - } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses' - response = new UpdateResponse(shardId, in); - } else if (type != 2) { - throw new IllegalArgumentException("Unexpected type [" + type + "]"); - } - - if (in.readBoolean()) { - failure = new Failure(in); - } + response = readResponse(shardId, in); + failure = in.readBoolean() ? new Failure(in) : null; + assertConsistent(); } BulkItemResponse(StreamInput in) throws IOException { id = in.readVInt(); opType = OpType.fromId(in.readByte()); - - byte type = in.readByte(); - if (type == 0) { - response = new IndexResponse(in); - } else if (type == 1) { - response = new DeleteResponse(in); - } else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses' - response = new UpdateResponse(in); - } else if (type != 2) { - throw new IllegalArgumentException("Unexpected type [" + type + "]"); - } - - if (in.readBoolean()) { - failure = new Failure(in); - } + response = readResponse(in); + failure = in.readBoolean() ? new Failure(in) : null; + assertConsistent(); } - public BulkItemResponse(int id, OpType opType, DocWriteResponse response) { + private BulkItemResponse(int id, OpType opType, DocWriteResponse response, Failure failure) { this.id = id; this.response = response; this.opType = opType; + this.failure = failure; + assertConsistent(); } - public BulkItemResponse(int id, OpType opType, Failure failure) { - this.id = id; - this.opType = opType; - this.failure = failure; + private void assertConsistent() { + assert (response == null) ^ (failure == null) : "only one of response or failure may be set"; + } + + public static BulkItemResponse success(int id, OpType opType, DocWriteResponse response) { + return new BulkItemResponse(id, opType, response, null); + } + + public static BulkItemResponse failure(int id, OpType opType, Failure failure) { + return new BulkItemResponse(id, opType, null, failure); } /** @@ -451,6 +433,7 @@ public long getVersion() { * The actual response ({@link IndexResponse} or {@link DeleteResponse}). {@code null} in * case of failure. */ + @SuppressWarnings("unchecked") public T getResponse() { return (T) response; } @@ -527,4 +510,36 @@ private void writeResponseType(StreamOutput out) throws IOException { throw new IllegalStateException("Unexpected response type found [" + response.getClass() + "]"); } } + + private static DocWriteResponse readResponse(ShardId shardId, StreamInput in) throws IOException { + int type = in.readByte(); + switch (type) { + case 0: + return new IndexResponse(shardId, in); + case 1: + return new DeleteResponse(shardId, in); + case 2: + return null; + case 3: + return new UpdateResponse(shardId, in); + default: + throw new IllegalArgumentException("Unexpected type [" + type + "]"); + } + } + + private static DocWriteResponse readResponse(StreamInput in) throws IOException { + int type = in.readByte(); + switch (type) { + case 0: + return new IndexResponse(in); + case 1: + return new DeleteResponse(in); + case 2: + return null; + case 3: + return new UpdateResponse(in); + default: + throw new IllegalArgumentException("Unexpected type [" + type + "]"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 9b3e7e8b7372a..4a12b1d70e8b5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -61,7 +61,7 @@ enum ItemProcessingState { private int currentIndex = -1; private ItemProcessingState currentItemState; - private DocWriteRequest requestToExecute; + private DocWriteRequest requestToExecute; private BulkItemResponse executionResult; private int retryCounter; @@ -182,7 +182,7 @@ public IndexShard getPrimary() { * sets the request that should actually be executed on the primary. This can be different then the request * received from the user (specifically, an update request is translated to an indexing or delete request). */ - public void setRequestToExecute(DocWriteRequest writeRequest) { + public void setRequestToExecute(DocWriteRequest writeRequest) { assert assertInvariants(ItemProcessingState.INITIAL); requestToExecute = writeRequest; currentItemState = ItemProcessingState.TRANSLATED; @@ -190,6 +190,7 @@ public void setRequestToExecute(DocWriteRequest writeRequest) { } /** returns the request that should be executed on the shard. */ + @SuppressWarnings("unchecked") public > T getRequestToExecute() { assert assertInvariants(ItemProcessingState.TRANSLATED); return (T) requestToExecute; @@ -215,7 +216,7 @@ public void resetForExecutionForRetry() { /** completes the operation without doing anything on the primary */ public void markOperationAsNoOp(DocWriteResponse response) { assertInvariants(ItemProcessingState.INITIAL); - executionResult = new BulkItemResponse(getCurrentItem().id(), getCurrentItem().request().opType(), response); + executionResult = BulkItemResponse.success(getCurrentItem().id(), getCurrentItem().request().opType(), response); currentItemState = ItemProcessingState.EXECUTED; assertInvariants(ItemProcessingState.EXECUTED); } @@ -224,8 +225,8 @@ public void markOperationAsNoOp(DocWriteResponse response) { public void failOnMappingUpdate(Exception cause) { assert assertInvariants(ItemProcessingState.WAIT_FOR_MAPPING_UPDATE); currentItemState = ItemProcessingState.EXECUTED; - final DocWriteRequest docWriteRequest = getCurrentItem().request(); - executionResult = new BulkItemResponse(getCurrentItem().id(), docWriteRequest.opType(), + final DocWriteRequest docWriteRequest = getCurrentItem().request(); + executionResult = BulkItemResponse.failure(getCurrentItem().id(), docWriteRequest.opType(), // Make sure to use getCurrentItem().index() here, if you use docWriteRequest.index() it will use the // concrete index instead of an alias if used! new BulkItemResponse.Failure(getCurrentItem().index(), docWriteRequest.id(), cause)); @@ -236,7 +237,7 @@ public void failOnMappingUpdate(Exception cause) { public void markOperationAsExecuted(Engine.Result result) { assertInvariants(ItemProcessingState.TRANSLATED); final BulkItemRequest current = getCurrentItem(); - DocWriteRequest docWriteRequest = getRequestToExecute(); + DocWriteRequest docWriteRequest = getRequestToExecute(); switch (result.getResultType()) { case SUCCESS: final DocWriteResponse response; @@ -252,13 +253,13 @@ public void markOperationAsExecuted(Engine.Result result) { } else { throw new AssertionError("unknown result type :" + result.getResultType()); } - executionResult = new BulkItemResponse(current.id(), current.request().opType(), response); + executionResult = BulkItemResponse.success(current.id(), current.request().opType(), response); // set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though. executionResult.getResponse().setShardInfo(new ReplicationResponse.ShardInfo()); locationToSync = TransportWriteAction.locationToSync(locationToSync, result.getTranslogLocation()); break; case FAILURE: - executionResult = new BulkItemResponse(current.id(), docWriteRequest.opType(), + executionResult = BulkItemResponse.failure(current.id(), docWriteRequest.opType(), // Make sure to use request.index() here, if you // use docWriteRequest.index() it will use the // concrete index instead of an alias if used! diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 4d12cacc516e2..a95811fbe06e7 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -30,6 +30,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; import java.util.ArrayList; @@ -48,7 +49,8 @@ * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ -public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest, Accountable { +public class BulkRequest extends ActionRequest + implements CompositeIndicesRequest, WriteRequest, Accountable, RawIndexingDataTransportRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkRequest.class); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 9ecac5d377da6..b28a22ad96a3a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -18,13 +18,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; import java.util.HashSet; import java.util.Set; import java.util.stream.Stream; -public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable { +public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawIndexingDataTransportRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a858416137903..7efeb9cbb0bf5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -368,8 +368,8 @@ void createIndex(String index, private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, String index, Exception e) { if (index.equals(request.index())) { - responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), - request.id(), e))); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), e); + responses.set(idx, BulkItemResponse.failure(idx, request.opType(), failure)); return true; } return false; @@ -469,9 +469,8 @@ protected void doRun() { List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); shardRequests.add(new BulkItemRequest(i, docWriteRequest)); } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), - docWriteRequest.id(), e); - BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e); + BulkItemResponse bulkItemResponse = BulkItemResponse.failure(i, docWriteRequest.opType(), failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again bulkRequest.requests.set(i, null); @@ -518,8 +517,8 @@ public void onFailure(Exception e) { for (BulkItemRequest request : requests) { final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); DocWriteRequest docWriteRequest = request.request(); - responses.set(request.id(), new BulkItemResponse(request.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e))); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e); + responses.set(request.id(), BulkItemResponse.failure(request.id(), docWriteRequest.opType(), failure)); } if (counter.decrementAndGet() == 0) { finishHim(); @@ -616,7 +615,7 @@ private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, int i private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); - BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, request.opType(), failure); + BulkItemResponse bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); responses.set(idx, bulkItemResponse); // make sure the request gets never processed again bulkRequest.requests.set(idx, null); @@ -788,7 +787,7 @@ synchronized void markItemAsDropped(int slot) { failedSlots.set(slot); final String id = indexRequest.id() == null ? DROPPED_ITEM_WITH_AUTO_GENERATED_ID : indexRequest.id(); itemResponses.add( - new BulkItemResponse(slot, indexRequest.opType(), + BulkItemResponse.success(slot, indexRequest.opType(), new UpdateResponse( new ShardId(indexRequest.index(), IndexMetadata.INDEX_UUID_NA_VALUE, 0), id, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, @@ -806,7 +805,7 @@ synchronized void markItemAsFailed(int slot, Exception e) { // 3) Continue with the next request in the bulk. failedSlots.set(slot); BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.id(), e); - itemResponses.add(new BulkItemResponse(slot, indexRequest.opType(), failure)); + itemResponses.add(BulkItemResponse.failure(slot, indexRequest.opType(), failure)); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e3b38e331f77d..d942deaef9fe6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -374,7 +374,11 @@ private static BulkItemResponse processUpdateResponse(final UpdateRequest update BulkItemResponse operationResponse, final UpdateHelper.Result translate) { final BulkItemResponse response; if (operationResponse.isFailed()) { - response = new BulkItemResponse(operationResponse.getItemId(), DocWriteRequest.OpType.UPDATE, operationResponse.getFailure()); + response = BulkItemResponse.failure( + operationResponse.getItemId(), + DocWriteRequest.OpType.UPDATE, + operationResponse.getFailure() + ); } else { final DocWriteResponse.Result translatedResult = translate.getResponseResult(); final UpdateResponse updateResponse; @@ -407,7 +411,7 @@ private static BulkItemResponse processUpdateResponse(final UpdateRequest update } else { throw new IllegalArgumentException("unknown operation type: " + translatedResult); } - response = new BulkItemResponse(operationResponse.getItemId(), DocWriteRequest.OpType.UPDATE, updateResponse); + response = BulkItemResponse.success(operationResponse.getItemId(), DocWriteRequest.OpType.UPDATE, updateResponse); } return response; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java index 2daff4559d960..615c7d5336225 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; @@ -47,8 +46,9 @@ ActionListener wrapBulkResponse(ActionListener listener) assert bulkItemResponses.getItems().length == 1 : "expected only one item in bulk request"; BulkItemResponse bulkItemResponse = bulkItemResponses.getItems()[0]; if (bulkItemResponse.isFailed() == false) { - final DocWriteResponse response = bulkItemResponse.getResponse(); - listener.onResponse((Response) response); + @SuppressWarnings("unchecked") + final Response response = (Response) bulkItemResponse.getResponse(); + listener.onResponse(response); } else { listener.onFailure(bulkItemResponse.getFailure().getCause()); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 3101bbfe76c4b..048a0e4898154 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -96,34 +95,26 @@ public FieldCapabilities(String name, String type, FieldCapabilities(StreamInput in) throws IOException { this.name = in.readString(); this.type = in.readString(); - this.isMetadataField = in.getVersion().onOrAfter(Version.V_7_13_0) ? in.readBoolean() : false; + this.isMetadataField = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); this.indices = in.readOptionalStringArray(); this.nonSearchableIndices = in.readOptionalStringArray(); this.nonAggregatableIndices = in.readOptionalStringArray(); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { - meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); - } else { - meta = Collections.emptyMap(); - } + meta = in.readMap(StreamInput::readString, i -> i.readSet(StreamInput::readString)); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(type); - if (out.getVersion().onOrAfter(Version.V_7_13_0)) { - out.writeBoolean(isMetadataField); - } + out.writeBoolean(isMetadataField); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); out.writeOptionalStringArray(indices); out.writeOptionalStringArray(nonSearchableIndices); out.writeOptionalStringArray(nonAggregatableIndices); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { - out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); - } + out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java index 3d193820e2158..f5eb4b4116b08 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -20,7 +19,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -44,9 +42,9 @@ public class FieldCapabilitiesIndexRequest extends ActionRequest implements Indi index = in.readOptionalString(); fields = in.readStringArray(); originalIndices = OriginalIndices.readOriginalIndices(in); - indexFilter = in.getVersion().onOrAfter(Version.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(Version.V_7_9_0) ? in.readLong() : 0L; - runtimeFields = in.getVersion().onOrAfter(Version.V_7_12_0) ? in.readMap() : Collections.emptyMap(); + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readLong(); + runtimeFields = in.readMap(); } FieldCapabilitiesIndexRequest(String[] fields, @@ -112,19 +110,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(index); out.writeStringArray(fields); OriginalIndices.writeOriginalIndices(originalIndices, out); - if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeLong(nowInMillis); - } - if (out.getVersion().onOrAfter(Version.V_7_12_0)) { - out.writeMap(runtimeFields); - } else { - if (false == runtimeFields.isEmpty()) { - throw new IllegalArgumentException( - "Versions before 7.12.0 don't support [runtime_mappings], but trying to send _field_caps request to a node " - + "with version [" + out.getVersion()+ "]"); - } - } + out.writeOptionalNamedWriteable(indexFilter); + out.writeLong(nowInMillis); + out.writeMap(runtimeFields); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 919f131cd80cf..f592b57f76bab 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -19,7 +19,7 @@ import java.util.Objects; /** - * Response for {@link TransportFieldCapabilitiesIndexAction}. + * Response for shard level operation in {@link TransportFieldCapabilitiesAction}. */ public class FieldCapabilitiesIndexResponse extends ActionResponse implements Writeable { private final String indexName; @@ -38,7 +38,7 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse implements Wr super(in); this.indexName = in.readString(); this.responseMap = in.readMap(StreamInput::readString, IndexFieldCapabilities::new); - this.canMatch = in.getVersion().onOrAfter(Version.V_7_9_0) ? in.readBoolean() : true; + this.canMatch = in.readBoolean(); this.originVersion = in.getVersion(); } @@ -76,9 +76,7 @@ Version getOriginVersion() { public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); - if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - out.writeBoolean(canMatch); - } + out.writeBoolean(canMatch); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 93d1f10953bbf..6544d700b0a37 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -48,10 +47,10 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - includeUnmapped = in.getVersion().onOrAfter(Version.V_7_2_0) ? in.readBoolean() : false; - indexFilter = in.getVersion().onOrAfter(Version.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; - nowInMillis = in.getVersion().onOrAfter(Version.V_7_9_0) ? in.readOptionalLong() : null; - runtimeFields = in.getVersion().onOrAfter(Version.V_7_12_0) ? in.readMap() : Collections.emptyMap(); + includeUnmapped = in.readBoolean(); + indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + nowInMillis = in.readOptionalLong(); + runtimeFields = in.readMap(); } public FieldCapabilitiesRequest() { @@ -83,22 +82,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(Version.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } - if (out.getVersion().onOrAfter(Version.V_7_9_0)) { - out.writeOptionalNamedWriteable(indexFilter); - out.writeOptionalLong(nowInMillis); - } - if (out.getVersion().onOrAfter(Version.V_7_12_0)) { - out.writeMap(runtimeFields); - } else { - if (false == runtimeFields.isEmpty()) { - throw new IllegalArgumentException( - "Versions before 7.12.0 don't support [runtime_mappings], but trying to send _field_caps request to a node " - + "with version [" + out.getVersion() + "]"); - } - } + out.writeBoolean(includeUnmapped); + out.writeOptionalNamedWriteable(indexFilter); + out.writeOptionalLong(nowInMillis); + out.writeMap(runtimeFields); } @Override @@ -159,6 +146,11 @@ public IndicesOptions indicesOptions() { return indicesOptions; } + @Override + public boolean allowsRemoteIndices() { + return true; + } + @Override public boolean includeDataStreams() { return true; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 62f8ae0347842..4cb626607224c 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -8,18 +8,17 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Tuple; import java.io.IOException; import java.util.Arrays; @@ -70,18 +69,10 @@ private FieldCapabilitiesResponse(String[] indices, Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); - if (out.getVersion().onOrAfter(Version.CURRENT)) { - out.writeList(failures); - } + out.writeList(failures); } private static void writeField(StreamOutput out, Map map) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java index 7f7ca30f304da..3057339e7538a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -16,8 +15,6 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; /** * Describes the capabilities of a field in a single index. @@ -52,47 +49,22 @@ public class IndexFieldCapabilities implements Writeable { } IndexFieldCapabilities(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_7_0)) { - this.name = in.readString(); - this.type = in.readString(); - this.isMetadatafield = in.getVersion().onOrAfter(Version.V_7_13_0) ? in.readBoolean() : false; - this.isSearchable = in.readBoolean(); - this.isAggregatable = in.readBoolean(); - this.meta = in.readMap(StreamInput::readString, StreamInput::readString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - FieldCapabilities fieldCaps = new FieldCapabilities(in); - this.name = fieldCaps.getName(); - this.type = fieldCaps.getType(); - this.isMetadatafield = fieldCaps.isMetadataField(); - this.isSearchable = fieldCaps.isSearchable(); - this.isAggregatable = fieldCaps.isAggregatable(); - this.meta = fieldCaps.meta().entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, - entry -> entry.getValue().iterator().next())); - } + this.name = in.readString(); + this.type = in.readString(); + this.isMetadatafield = in.readBoolean(); + this.isSearchable = in.readBoolean(); + this.isAggregatable = in.readBoolean(); + this.meta = in.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { - out.writeString(name); - out.writeString(type); - if (out.getVersion().onOrAfter(Version.V_7_13_0)) { - out.writeBoolean(isMetadatafield); - } - out.writeBoolean(isSearchable); - out.writeBoolean(isAggregatable); - out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); - } else { - // Previously we reused the FieldCapabilities class to represent index field capabilities. - Map> wrappedMeta = meta.entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, - entry -> Set.of(entry.getValue()))); - FieldCapabilities fieldCaps = new FieldCapabilities(name, type, isMetadatafield, - isSearchable, isAggregatable, null, null, null, wrappedMeta); - fieldCaps.writeTo(out); - } + out.writeString(name); + out.writeString(type); + out.writeBoolean(isMetadatafield); + out.writeBoolean(isSearchable); + out.writeBoolean(isAggregatable); + out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); } public String getName() { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index f6f9fa0878717..73782e12c929f 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -8,28 +8,60 @@ package org.elasticsearch.action.fieldcaps; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; -import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.RuntimeField; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -40,30 +72,38 @@ import java.util.Set; import java.util.function.Predicate; +import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; + public class TransportFieldCapabilitiesAction extends HandledTransportAction { + + private static final String ACTION_SHARD_NAME = FieldCapabilitiesAction.NAME + "[index][s]"; + + private static final Logger logger = LogManager.getLogger(TransportFieldCapabilitiesAction.class); + private final ThreadPool threadPool; - private final NodeClient client; + private final TransportService transportService; private final ClusterService clusterService; - private final RemoteClusterService remoteClusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; private final Predicate metadataFieldPred; + private final IndicesService indicesService; @Inject public TransportFieldCapabilitiesAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - NodeClient client, ActionFilters actionFilters, IndicesService indicesService, IndexNameExpressionResolver indexNameExpressionResolver) { super(FieldCapabilitiesAction.NAME, transportService, actionFilters, FieldCapabilitiesRequest::new); this.threadPool = threadPool; - this.client = client; + this.transportService = transportService; this.clusterService = clusterService; - this.remoteClusterService = transportService.getRemoteClusterService(); this.indexNameExpressionResolver = indexNameExpressionResolver; + this.indicesService = indicesService; final Set metadataFields = indicesService.getAllMetadataFields(); this.metadataFieldPred = metadataFields::contains; + transportService.registerRequestHandler(ACTION_SHARD_NAME, ThreadPool.Names.SAME, + FieldCapabilitiesIndexRequest::new, new ShardTransportHandler()); } @Override @@ -72,7 +112,7 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti long nowInMillis = request.nowInMillis() == null ? System.currentTimeMillis() : request.nowInMillis(); final ClusterState clusterState = clusterService.state(); final Map remoteClusterIndices = - remoteClusterService.groupIndices(request.indicesOptions(), request.indices()); + transportService.getRemoteClusterService().groupIndices(request.indicesOptions(), request.indices()); final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); final String[] concreteIndices; if (localIndices == null) { @@ -96,7 +136,13 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti List failures = indexFailures.values(); if (indexResponses.size() > 0) { if (request.isMergeResults()) { - listener.onResponse(merge(indexResponses, request.includeUnmapped(), new ArrayList<>(failures))); + // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable + // on a transport thread in case of large numbers of indices and/or fields + threadPool.executor(ThreadPool.Names.MANAGEMENT).submit( + ActionRunnable.supply( + listener, + () -> merge(indexResponses, request.includeUnmapped(), new ArrayList<>(failures))) + ); } else { listener.onResponse(new FieldCapabilitiesResponse(indexResponses, new ArrayList<>(failures))); } @@ -112,33 +158,41 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti } }; - for (String index : concreteIndices) { - client.executeLocally( - TransportFieldCapabilitiesIndexAction.TYPE, - new FieldCapabilitiesIndexRequest( - request.fields(), - index, - localIndices, - request.indexFilter(), - nowInMillis, - request.runtimeFields() - ), - new ActionListener() { - @Override - public void onResponse(FieldCapabilitiesIndexResponse result) { - if (result.canMatch()) { - indexResponses.add(result); - } - countDown.run(); - } + if (concreteIndices.length > 0) { + // fork this action to the management pool as it can fan out to a large number of child requests that get handled on SAME and + // thus would all run on the current transport thread and block it for an unacceptable amount of time + // (particularly with security enabled) + threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(ActionRunnable.wrap(listener, l -> { + for (String index : concreteIndices) { + new AsyncShardsAction( + transportService, + clusterService, + new FieldCapabilitiesIndexRequest( + request.fields(), + index, + localIndices, + request.indexFilter(), + nowInMillis, + request.runtimeFields() + ), + new ActionListener<>() { + @Override + public void onResponse(FieldCapabilitiesIndexResponse result) { + if (result.canMatch()) { + indexResponses.add(result); + } + countDown.run(); + } - @Override - public void onFailure(Exception e) { - indexFailures.collect(e, index); - countDown.run(); - } + @Override + public void onFailure(Exception e) { + indexFailures.collect(e, index); + countDown.run(); + } + } + ).start(); } - ); + })); } // this is the cross cluster part of this API - we force the other cluster to not merge the results but instead @@ -146,7 +200,7 @@ public void onFailure(Exception e) { for (Map.Entry remoteIndices : remoteClusterIndices.entrySet()) { String clusterAlias = remoteIndices.getKey(); OriginalIndices originalIndices = remoteIndices.getValue(); - Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + Client remoteClusterClient = transportService.getRemoteClusterService().getRemoteClusterClient(threadPool, clusterAlias); FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest(); remoteRequest.setMergeResults(false); // we need to merge on this node remoteRequest.indicesOptions(originalIndices.indicesOptions()); @@ -206,9 +260,8 @@ private FieldCapabilitiesResponse merge( } private void addUnmappedFields(String[] indices, String field, Map typeMap) { - Set unmappedIndices = new HashSet<>(); - Arrays.stream(indices).forEach(unmappedIndices::add); - typeMap.values().stream().forEach((b) -> b.getIndices().stream().forEach(unmappedIndices::remove)); + Set unmappedIndices = new HashSet<>(Arrays.asList(indices)); + typeMap.values().forEach((b) -> b.getIndices().forEach(unmappedIndices::remove)); if (unmappedIndices.isEmpty() == false) { FieldCapabilities.Builder unmapped = new FieldCapabilities.Builder(field, "unmapped"); typeMap.put("unmapped", unmapped); @@ -233,7 +286,7 @@ private void innerMerge(Map> resp } } - private class FailureCollector { + private static final class FailureCollector { final Map, FieldCapabilitiesFailure> indexFailures = Collections.synchronizedMap( new HashMap<>() ); @@ -263,4 +316,229 @@ int size() { return this.indexFailures.size(); } } + + private static ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.READ); + } + + private static ClusterBlockException checkRequestBlock(ClusterState state, String concreteIndex) { + return state.blocks().indexBlockedException(ClusterBlockLevel.READ, concreteIndex); + } + + private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesIndexRequest request) throws IOException { + final ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(request.shardId().getId()); + try (Engine.Searcher searcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE)) { + + final SearchExecutionContext searchExecutionContext = indexService.newSearchExecutionContext(shardId.id(), 0, + searcher, request::nowInMillis, null, request.runtimeFields()); + + if (canMatchShard(request, searchExecutionContext) == false) { + return new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false); + } + + Set fieldNames = new HashSet<>(); + for (String pattern : request.fields()) { + fieldNames.addAll(searchExecutionContext.getMatchingFieldNames(pattern)); + } + + Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); + Map responseMap = new HashMap<>(); + for (String field : fieldNames) { + MappedFieldType ft = searchExecutionContext.getFieldType(field); + boolean isMetadataField = searchExecutionContext.isMetadataField(field); + if (isMetadataField || fieldPredicate.test(ft.name())) { + IndexFieldCapabilities fieldCap = new IndexFieldCapabilities(field, + ft.familyTypeName(), isMetadataField, ft.isSearchable(), ft.isAggregatable(), ft.meta()); + responseMap.put(field, fieldCap); + } else { + continue; + } + + // Check the ancestor of the field to find nested and object fields. + // Runtime fields are excluded since they can override any path. + //TODO find a way to do this that does not require an instanceof check + if (ft instanceof RuntimeField == false) { + int dotIndex = ft.name().lastIndexOf('.'); + while (dotIndex > -1) { + String parentField = ft.name().substring(0, dotIndex); + if (responseMap.containsKey(parentField)) { + // we added this path on another field already + break; + } + // checks if the parent field contains sub-fields + if (searchExecutionContext.getFieldType(parentField) == null) { + // no field type, it must be an object field + ObjectMapper mapper = searchExecutionContext.getObjectMapper(parentField); + String type = mapper.isNested() ? "nested" : "object"; + IndexFieldCapabilities fieldCap = new IndexFieldCapabilities(parentField, type, + false, false, false, Collections.emptyMap()); + responseMap.put(parentField, fieldCap); + } + dotIndex = parentField.lastIndexOf('.'); + } + } + } + return new FieldCapabilitiesIndexResponse(request.index(), responseMap, true); + } + } + + private boolean canMatchShard(FieldCapabilitiesIndexRequest req, SearchExecutionContext searchExecutionContext) throws IOException { + if (req.indexFilter() == null || req.indexFilter() instanceof MatchAllQueryBuilder) { + return true; + } + assert req.nowInMillis() != 0L; + ShardSearchRequest searchRequest = new ShardSearchRequest(req.shardId(), req.nowInMillis(), AliasFilter.EMPTY); + searchRequest.source(new SearchSourceBuilder().query(req.indexFilter())); + return SearchService.queryStillMatchesAfterRewrite(searchRequest, searchExecutionContext); + } + + /** + * An action that executes on each shard sequentially until it finds one that can match the provided + * {@link FieldCapabilitiesIndexRequest#indexFilter()}. In which case the shard is used + * to create the final {@link FieldCapabilitiesIndexResponse}. + */ + public static class AsyncShardsAction { + private final FieldCapabilitiesIndexRequest request; + private final TransportService transportService; + private final DiscoveryNodes nodes; + private final ActionListener listener; + private final GroupShardsIterator shardsIt; + + private volatile int shardIndex = 0; + + public AsyncShardsAction(TransportService transportService, + ClusterService clusterService, + FieldCapabilitiesIndexRequest request, + ActionListener listener) { + this.listener = listener; + this.transportService = transportService; + + ClusterState clusterState = clusterService.state(); + if (logger.isTraceEnabled()) { + logger.trace("executing [{}] based on cluster state version [{}]", request, clusterState.version()); + } + nodes = clusterState.nodes(); + ClusterBlockException blockException = checkGlobalBlock(clusterState); + if (blockException != null) { + throw blockException; + } + + this.request = request; + blockException = checkRequestBlock(clusterState, request.index()); + if (blockException != null) { + throw blockException; + } + + shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), + new String[]{request.index()}, null, null, null, null); + } + + public void start() { + tryNext(null, true); + } + + private void onFailure(ShardRouting shardRouting, Exception e) { + if (e != null) { + logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, request), e); + } + tryNext(e, false); + } + + private ShardRouting nextRoutingOrNull() { + if (shardsIt.size() == 0 || shardIndex >= shardsIt.size()) { + return null; + } + ShardRouting next = shardsIt.get(shardIndex).nextOrNull(); + if (next != null) { + return next; + } + moveToNextShard(); + return nextRoutingOrNull(); + } + + private void moveToNextShard() { + ++ shardIndex; + } + + private void tryNext(@Nullable final Exception lastFailure, boolean canMatchShard) { + ShardRouting shardRouting = nextRoutingOrNull(); + if (shardRouting == null) { + if (canMatchShard == false) { + if (lastFailure == null) { + listener.onResponse(new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false)); + } else { + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); + listener.onFailure(lastFailure); + } + } else { + if (lastFailure == null || isShardNotAvailableException(lastFailure)) { + listener.onFailure(new NoShardAvailableActionException(null, + LoggerMessageFormat.format("No shard available for [{}]", request), lastFailure)); + } else { + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); + listener.onFailure(lastFailure); + } + } + return; + } + DiscoveryNode node = nodes.get(shardRouting.currentNodeId()); + if (node == null) { + onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId())); + } else { + request.shardId(shardRouting.shardId()); + if (logger.isTraceEnabled()) { + logger.trace( + "sending request [{}] on node [{}]", + request, + node + ); + } + transportService.sendRequest(node, ACTION_SHARD_NAME, request, + new TransportResponseHandler() { + + @Override + public FieldCapabilitiesIndexResponse read(StreamInput in) throws IOException { + return new FieldCapabilitiesIndexResponse(in); + } + + @Override + public void handleResponse(final FieldCapabilitiesIndexResponse response) { + if (response.canMatch()) { + listener.onResponse(response); + } else { + moveToNextShard(); + tryNext(null, false); + } + } + + @Override + public void handleException(TransportException exp) { + onFailure(shardRouting, exp); + } + }); + } + } + } + + private class ShardTransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(final FieldCapabilitiesIndexRequest request, + final TransportChannel channel, + Task task) throws Exception { + if (logger.isTraceEnabled()) { + logger.trace("executing [{}]", request); + } + ActionListener listener = new ChannelActionListener<>(channel, ACTION_SHARD_NAME, request); + final FieldCapabilitiesIndexResponse resp; + try { + resp = shardOperation(request); + } catch (Exception exc) { + listener.onFailure(exc); + return; + } + listener.onResponse(resp); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java deleted file mode 100644 index d0fe74d89bc64..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.fieldcaps; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ObjectMapper; -import org.elasticsearch.index.mapper.RuntimeField; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.Predicate; - -import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; - -public class TransportFieldCapabilitiesIndexAction - extends HandledTransportAction { - - private static final Logger logger = LogManager.getLogger(TransportFieldCapabilitiesIndexAction.class); - - private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]"; - private static final String ACTION_SHARD_NAME = ACTION_NAME + "[s]"; - public static final ActionType TYPE = - new ActionType<>(ACTION_NAME, FieldCapabilitiesIndexResponse::new); - - private final ClusterService clusterService; - private final TransportService transportService; - private final IndicesService indicesService; - - @Inject - public TransportFieldCapabilitiesIndexAction(ClusterService clusterService, - TransportService transportService, - IndicesService indicesService, - ActionFilters actionFilters) { - super(ACTION_NAME, transportService, actionFilters, FieldCapabilitiesIndexRequest::new); - this.clusterService = clusterService; - this.transportService = transportService; - this.indicesService = indicesService; - transportService.registerRequestHandler(ACTION_SHARD_NAME, ThreadPool.Names.SAME, - FieldCapabilitiesIndexRequest::new, new ShardTransportHandler()); - } - - @Override - protected void doExecute(Task task, FieldCapabilitiesIndexRequest request, ActionListener listener) { - new AsyncShardsAction(request, listener).start(); - } - - private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesIndexRequest request) throws IOException { - final ShardId shardId = request.shardId(); - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(request.shardId().getId()); - try (Engine.Searcher searcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE)) { - - final SearchExecutionContext searchExecutionContext = indexService.newSearchExecutionContext(shardId.id(), 0, - searcher, request::nowInMillis, null, request.runtimeFields()); - - if (canMatchShard(request, searchExecutionContext) == false) { - return new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false); - } - - Set fieldNames = new HashSet<>(); - for (String pattern : request.fields()) { - fieldNames.addAll(searchExecutionContext.getMatchingFieldNames(pattern)); - } - - Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); - Map responseMap = new HashMap<>(); - for (String field : fieldNames) { - MappedFieldType ft = searchExecutionContext.getFieldType(field); - boolean isMetadataField = searchExecutionContext.isMetadataField(field); - if (isMetadataField || fieldPredicate.test(ft.name())) { - IndexFieldCapabilities fieldCap = new IndexFieldCapabilities(field, - ft.familyTypeName(), isMetadataField, ft.isSearchable(), ft.isAggregatable(), ft.meta()); - responseMap.put(field, fieldCap); - } else { - continue; - } - - // Check the ancestor of the field to find nested and object fields. - // Runtime fields are excluded since they can override any path. - //TODO find a way to do this that does not require an instanceof check - if (ft instanceof RuntimeField == false) { - int dotIndex = ft.name().lastIndexOf('.'); - while (dotIndex > -1) { - String parentField = ft.name().substring(0, dotIndex); - if (responseMap.containsKey(parentField)) { - // we added this path on another field already - break; - } - // checks if the parent field contains sub-fields - if (searchExecutionContext.getFieldType(parentField) == null) { - // no field type, it must be an object field - ObjectMapper mapper = searchExecutionContext.getObjectMapper(parentField); - String type = mapper.nested().isNested() ? "nested" : "object"; - IndexFieldCapabilities fieldCap = new IndexFieldCapabilities(parentField, type, - false, false, false, Collections.emptyMap()); - responseMap.put(parentField, fieldCap); - } - dotIndex = parentField.lastIndexOf('.'); - } - } - } - return new FieldCapabilitiesIndexResponse(request.index(), responseMap, true); - } - } - - private boolean canMatchShard(FieldCapabilitiesIndexRequest req, SearchExecutionContext searchExecutionContext) throws IOException { - if (req.indexFilter() == null || req.indexFilter() instanceof MatchAllQueryBuilder) { - return true; - } - assert req.nowInMillis() != 0L; - ShardSearchRequest searchRequest = new ShardSearchRequest(req.shardId(), req.nowInMillis(), AliasFilter.EMPTY); - searchRequest.source(new SearchSourceBuilder().query(req.indexFilter())); - return SearchService.queryStillMatchesAfterRewrite(searchRequest, searchExecutionContext); - } - - private ClusterBlockException checkGlobalBlock(ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.READ); - } - - private ClusterBlockException checkRequestBlock(ClusterState state, String concreteIndex) { - return state.blocks().indexBlockedException(ClusterBlockLevel.READ, concreteIndex); - } - - /** - * An action that executes on each shard sequentially until it finds one that can match the provided - * {@link FieldCapabilitiesIndexRequest#indexFilter()}. In which case the shard is used - * to create the final {@link FieldCapabilitiesIndexResponse}. - */ - class AsyncShardsAction { - private final FieldCapabilitiesIndexRequest request; - private final DiscoveryNodes nodes; - private final ActionListener listener; - private final GroupShardsIterator shardsIt; - - private volatile int shardIndex = 0; - - private AsyncShardsAction(FieldCapabilitiesIndexRequest request, ActionListener listener) { - this.listener = listener; - - ClusterState clusterState = clusterService.state(); - if (logger.isTraceEnabled()) { - logger.trace("executing [{}] based on cluster state version [{}]", request, clusterState.version()); - } - nodes = clusterState.nodes(); - ClusterBlockException blockException = checkGlobalBlock(clusterState); - if (blockException != null) { - throw blockException; - } - - this.request = request; - blockException = checkRequestBlock(clusterState, request.index()); - if (blockException != null) { - throw blockException; - } - - shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), - new String[]{request.index()}, null, null, null, null); - } - - public void start() { - tryNext(null, true); - } - - private void onFailure(ShardRouting shardRouting, Exception e) { - if (e != null) { - logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, request), e); - } - tryNext(e, false); - } - - private ShardRouting nextRoutingOrNull() { - if (shardsIt.size() == 0 || shardIndex >= shardsIt.size()) { - return null; - } - ShardRouting next = shardsIt.get(shardIndex).nextOrNull(); - if (next != null) { - return next; - } - moveToNextShard(); - return nextRoutingOrNull(); - } - - private void moveToNextShard() { - ++ shardIndex; - } - - private void tryNext(@Nullable final Exception lastFailure, boolean canMatchShard) { - ShardRouting shardRouting = nextRoutingOrNull(); - if (shardRouting == null) { - if (canMatchShard == false) { - if (lastFailure == null) { - listener.onResponse(new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false)); - } else { - logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); - listener.onFailure(lastFailure); - } - } else { - if (lastFailure == null || isShardNotAvailableException(lastFailure)) { - listener.onFailure(new NoShardAvailableActionException(null, - LoggerMessageFormat.format("No shard available for [{}]", request), lastFailure)); - } else { - logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); - listener.onFailure(lastFailure); - } - } - return; - } - DiscoveryNode node = nodes.get(shardRouting.currentNodeId()); - if (node == null) { - onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId())); - } else { - request.shardId(shardRouting.shardId()); - if (logger.isTraceEnabled()) { - logger.trace( - "sending request [{}] on node [{}]", - request, - node - ); - } - transportService.sendRequest(node, ACTION_SHARD_NAME, request, - new TransportResponseHandler() { - - @Override - public FieldCapabilitiesIndexResponse read(StreamInput in) throws IOException { - return new FieldCapabilitiesIndexResponse(in); - } - - @Override - public void handleResponse(final FieldCapabilitiesIndexResponse response) { - if (response.canMatch()) { - listener.onResponse(response); - } else { - moveToNextShard(); - tryNext(null, false); - } - } - - @Override - public void handleException(TransportException exp) { - onFailure(shardRouting, exp); - } - }); - } - } - } - - private class ShardTransportHandler implements TransportRequestHandler { - @Override - public void messageReceived(final FieldCapabilitiesIndexRequest request, - final TransportChannel channel, - Task task) throws Exception { - if (logger.isTraceEnabled()) { - logger.trace("executing [{}]", request); - } - ActionListener listener = new ChannelActionListener<>(channel, ACTION_SHARD_NAME, request); - final FieldCapabilitiesIndexResponse resp; - try { - resp = shardOperation(request); - } catch (Exception exc) { - listener.onFailure(exc); - return; - } - listener.onResponse(resp); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 81f951ee72083..77813add33a96 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -36,6 +36,8 @@ * @see org.elasticsearch.client.Requests#getRequest(String) * @see org.elasticsearch.client.Client#get(GetRequest) */ +// It's not possible to suppress teh warning at #realtime(boolean) at a method-level. +@SuppressWarnings("unchecked") public class GetRequest extends SingleShardRequest implements RealtimeRequest { private String id; diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index eacf9f018cdcc..f721519823a86 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -44,6 +44,8 @@ import java.util.List; import java.util.Locale; +// It's not possible to suppress teh warning at #realtime(boolean) at a method-level. +@SuppressWarnings("unchecked") public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest, ToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MultiGetRequest.class); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java index b2f240b036935..be4410b838271 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java @@ -36,6 +36,7 @@ public IngestActionForwarder(TransportService transportService) { ingestNodes = new DiscoveryNode[0]; } + @SuppressWarnings({"rawtypes", "unchecked"}) public void forwardIngestRequest(ActionType action, ActionRequest request, ActionListener listener) { transportService.sendRequest(randomIngestNode(), action.name(), request, new ActionListenerResponseHandler(listener, action.getResponseReader())); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index a02f599348906..cf539915b138a 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; @@ -22,8 +23,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -55,6 +58,12 @@ public PutPipelineTransportAction(ThreadPool threadPool, TransportService transp @Override protected void masterOperation(Task task, PutPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { + if (state.getNodes().getMinNodeVersion().before(Version.V_7_15_0)) { + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); + if (pipelineConfig.containsKey(Pipeline.META_KEY)) { + throw new IllegalStateException("pipelines with _meta field require minimum node version of " + Version.V_7_15_0); + } + } NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear() .addMetric(NodesInfoRequest.Metric.INGEST.metricName()); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java index e45902c83240f..8c7bd3f582abd 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java @@ -38,7 +38,7 @@ void executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean v List processorResultList = new CopyOnWriteArrayList<>(); CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), null, processorResultList); Pipeline verbosePipeline = new Pipeline(pipeline.getId(), pipeline.getDescription(), pipeline.getVersion(), - verbosePipelineProcessor); + pipeline.getMetadata(), verbosePipelineProcessor); ingestDocument.executePipeline(verbosePipeline, (result, e) -> { handler.accept(new SimulateDocumentVerboseResult(processorResultList), e); }); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index d8aa4989bce33..b9dc90dca0b0c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -13,10 +13,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; @@ -33,17 +35,24 @@ import java.util.Objects; public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimulatePipelineRequest.class); private String id; private boolean verbose; private BytesReference source; private XContentType xContentType; + private RestApiVersion restApiVersion; /** * Creates a new request with the given source and its content type */ public SimulatePipelineRequest(BytesReference source, XContentType xContentType) { + this(source, xContentType, RestApiVersion.current()); + } + + public SimulatePipelineRequest(BytesReference source, XContentType xContentType, RestApiVersion restApiVersion) { this.source = Objects.requireNonNull(source); this.xContentType = Objects.requireNonNull(xContentType); + this.restApiVersion = restApiVersion; } SimulatePipelineRequest() { @@ -133,7 +142,8 @@ public boolean isVerbose() { static final String SIMULATED_PIPELINE_ID = "_simulate_pipeline"; - static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, IngestService ingestService) { + static Parsed parseWithPipelineId(String pipelineId, Map config, boolean verbose, IngestService ingestService, + RestApiVersion restApiVersion) { if (pipelineId == null) { throw new IllegalArgumentException("param [pipeline] is null"); } @@ -141,20 +151,21 @@ static Parsed parseWithPipelineId(String pipelineId, Map config, if (pipeline == null) { throw new IllegalArgumentException("pipeline [" + pipelineId + "] does not exist"); } - List ingestDocumentList = parseDocs(config); + List ingestDocumentList = parseDocs(config, restApiVersion); return new Parsed(pipeline, ingestDocumentList, verbose); } - static Parsed parse(Map config, boolean verbose, IngestService ingestService) throws Exception { + static Parsed parse(Map config, boolean verbose, IngestService ingestService, RestApiVersion restApiVersion) + throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); Pipeline pipeline = Pipeline.create( SIMULATED_PIPELINE_ID, pipelineConfig, ingestService.getProcessorFactories(), ingestService.getScriptService() ); - List ingestDocumentList = parseDocs(config); + List ingestDocumentList = parseDocs(config, restApiVersion); return new Parsed(pipeline, ingestDocumentList, verbose); } - private static List parseDocs(Map config) { + private static List parseDocs(Map config, RestApiVersion restApiVersion) { List> docs = ConfigurationUtils.readList(null, null, config, Fields.DOCS); if (docs.isEmpty()) { @@ -165,6 +176,7 @@ private static List parseDocs(Map config) { if ((object instanceof Map) == false) { throw new IllegalArgumentException("malformed [docs] section, should include an inner object"); } + @SuppressWarnings("unchecked") Map dataMap = (Map) object; Map document = ConfigurationUtils.readMap(null, null, dataMap, Fields.SOURCE); @@ -174,6 +186,10 @@ private static List parseDocs(Map config) { dataMap, Metadata.ID.getFieldName(), "_id"); String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); + if (restApiVersion == RestApiVersion.V_7 && dataMap.containsKey(Metadata.TYPE.getFieldName())) { + deprecationLogger.compatibleApiWarning("simulate_pipeline_with_types", + "[types removal] specifying _type in pipeline simulation requests is deprecated"); + } Long version = null; if (dataMap.containsKey(Metadata.VERSION.getFieldName())) { String versionValue = ConfigurationUtils.readOptionalStringOrLongProperty(null, null, @@ -224,4 +240,8 @@ private static List parseDocs(Map config) { } return ingestDocumentList; } + + public RestApiVersion getRestApiVersion() { + return restApiVersion; + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 9ec44f4661904..4600baf9b921f 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -42,9 +42,10 @@ protected void doExecute(Task task, SimulatePipelineRequest request, ActionListe final SimulatePipelineRequest.Parsed simulateRequest; try { if (request.getId() != null) { - simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), ingestService); + simulateRequest = SimulatePipelineRequest.parseWithPipelineId(request.getId(), source, request.isVerbose(), ingestService, + request.getRestApiVersion()); } else { - simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService); + simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService, request.getRestApiVersion()); } } catch (Exception e) { listener.onFailure(e); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java index 22a3c5a185043..c28d62945aa65 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java @@ -16,6 +16,8 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.IngestDocument.Metadata; @@ -118,6 +120,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(metadata.getKey().getFieldName(), metadata.getValue().toString()); } } + if(builder.getRestApiVersion() == RestApiVersion.V_7) { + builder.field(MapperService.TYPE_FIELD_NAME, MapperService.SINGLE_MAPPING_NAME); + } Map source = IngestDocument.deepCopyMap(ingestDocument.getSourceAndMetadata()); metadataMap.keySet().forEach(mD -> source.remove(mD.getFieldName())); builder.field(SOURCE_FIELD, source); diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index 6d4bcae30cc7e..a2f25a349b38f 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; import java.util.Arrays; @@ -20,7 +21,8 @@ /** * Represents a batch of operations sent from the primary to its replicas during the primary-replica resync. */ -public final class ResyncReplicationRequest extends ReplicatedWriteRequest { +public final class ResyncReplicationRequest extends ReplicatedWriteRequest + implements RawIndexingDataTransportRequest { private final long trimAboveSeqNo; private final Translog.Operation[] operations; diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 80d9e47cb590a..3f9e4fd81da45 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -66,7 +66,7 @@ protected ResyncReplicationResponse newResponseInstance(StreamInput in) throws I } @Override - protected ReplicationOperation.Replicas newReplicasProxy() { + protected ReplicationOperation.Replicas newReplicasProxy() { return new ResyncActionReplicasProxy(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/BottomSortValuesCollector.java b/server/src/main/java/org/elasticsearch/action/search/BottomSortValuesCollector.java index fe342b92165f4..b36820b3692fd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/BottomSortValuesCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/BottomSortValuesCollector.java @@ -15,13 +15,15 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchSortValuesAndFormats; +import static org.elasticsearch.core.Types.forciblyCast; + /** * Utility class to keep track of the bottom doc's sort values in a distributed search. */ class BottomSortValuesCollector { private final int topNSize; private final SortField[] sortFields; - private final FieldComparator[] comparators; + private final FieldComparator[] comparators; private final int[] reverseMuls; private volatile long totalHits; @@ -29,7 +31,7 @@ class BottomSortValuesCollector { BottomSortValuesCollector(int topNSize, SortField[] sortFields) { this.topNSize = topNSize; - this.comparators = new FieldComparator[sortFields.length]; + this.comparators = new FieldComparator[sortFields.length]; this.reverseMuls = new int[sortFields.length]; this.sortFields = sortFields; for (int i = 0; i < sortFields.length; i++) { @@ -90,7 +92,7 @@ private FieldDoc extractBottom(TopFieldDocs topDocs) { private int compareValues(Object[] v1, Object[] v2) { for (int i = 0; i < v1.length; i++) { - int cmp = reverseMuls[i] * comparators[i].compareValues(v1[i], v2[i]); + int cmp = reverseMuls[i] * comparators[i].compareValues(forciblyCast(v1[i]), forciblyCast(v2[i])); if (cmp != 0) { return cmp; } diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 361c34daa5344..09ae052bd1d5e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -36,6 +36,8 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static org.elasticsearch.core.Types.forciblyCast; + /** * This search phase can be used as an initial search phase to pre-filter search shards based on query rewriting. * The queries are rewritten against the shards and based on the rewrite result shards might be able to be excluded @@ -185,7 +187,11 @@ private static boolean shouldSortShards(MinAndMax[] minAndMaxes) { private static Comparator shardComparator(GroupShardsIterator shardsIts, MinAndMax[] minAndMaxes, SortOrder order) { - final Comparator comparator = Comparator.comparing(index -> minAndMaxes[index], MinAndMax.getComparator(order)); + final Comparator comparator = Comparator.comparing( + index -> minAndMaxes[index], + forciblyCast(MinAndMax.getComparator(order)) + ); + return comparator.thenComparing(index -> shardsIts.get(index)); } @@ -197,7 +203,7 @@ private static final class CanMatchSearchPhaseResults extends SearchPhaseResults CanMatchSearchPhaseResults(int size) { super(size); possibleMatches = new FixedBitSet(size); - minAndMaxes = new MinAndMax[size]; + minAndMaxes = new MinAndMax[size]; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 90950f413f94e..448384d7bfee7 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -53,7 +53,9 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in search requests is deprecated."; - + public static final String FIRST_LINE_EMPTY_DEPRECATION_MESSAGE = + "support for empty first line before any action metadata in msearch API is deprecated " + + "and will be removed in the next major version"; public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0; private int maxConcurrentSearchRequests = 0; @@ -185,6 +187,13 @@ public static void readMultiLineFormat(BytesReference data, if (nextMarker == -1) { break; } + // support first line with \n + if (restApiVersion == RestApiVersion.V_7 && nextMarker == 0) { + deprecationLogger.compatibleApiWarning("msearch_first_line_empty", FIRST_LINE_EMPTY_DEPRECATION_MESSAGE); + from = nextMarker + 1; + continue; + } + SearchRequest searchRequest = new SearchRequest(); if (indices != null) { searchRequest.indices(indices); @@ -205,7 +214,8 @@ public static void readMultiLineFormat(BytesReference data, // now parse the action if (nextMarker - from > 0) { try (InputStream stream = data.slice(from, nextMarker - from).streamInput(); - XContentParser parser = xContent.createParser(registry, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser parser = xContent + .createParserForCompatibility(registry, LoggingDeprecationHandler.INSTANCE, stream, restApiVersion)) { Map source = parser.map(); Object expandWildcards = null; Object ignoreUnavailable = null; @@ -260,7 +270,8 @@ public static void readMultiLineFormat(BytesReference data, } BytesReference bytes = data.slice(from, nextMarker - from); try (InputStream stream = bytes.streamInput(); - XContentParser parser = xContent.createParser(registry, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser parser = xContent + .createParserForCompatibility(registry, LoggingDeprecationHandler.INSTANCE, stream, restApiVersion)) { consumer.accept(searchRequest, parser); } // move pointers diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 679287c626530..a06f31a2168df 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -38,6 +38,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable PARSER = new ConstructingObjectParser<>("multi_search", true, a -> new MultiSearchResponse(((List)a[0]).toArray(new Item[0]), (long) a[1])); static { diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index fb514b9df03d3..d2decc29b1c61 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -123,6 +123,11 @@ public OpenPointInTimeRequest preference(String preference) { return this; } + @Override + public boolean allowsRemoteIndices() { + return true; + } + @Override public boolean includeDataStreams() { return true; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchExecutionStatsCollector.java b/server/src/main/java/org/elasticsearch/action/search/SearchExecutionStatsCollector.java index 86cd07eb09d4a..9bd1a024411b8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchExecutionStatsCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchExecutionStatsCollector.java @@ -37,8 +37,15 @@ public final class SearchExecutionStatsCollector extends ActionListener.Delegati this.nodeId = nodeId; } - public static BiFunction makeWrapper(ResponseCollectorService service) { - return (connection, originalListener) -> new SearchExecutionStatsCollector(originalListener, service, connection.getNode().getId()); + @SuppressWarnings("unchecked") + public static + BiFunction, ActionListener> + makeWrapper(ResponseCollectorService service) { + return (connection, originalListener) -> new SearchExecutionStatsCollector( + (ActionListener) originalListener, + service, + connection.getNode().getId() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 17aa182be56bb..983f4b288e2c6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -422,7 +422,7 @@ ReducedQueryPhase reducedQueryPhase(Collection quer final boolean hasProfileResults = queryResults.stream().anyMatch(res -> res.queryResult().hasProfileResults()); // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) - final Map> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); + final Map>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) : Collections.emptyMap(); int from = 0; @@ -440,7 +440,7 @@ ReducedQueryPhase reducedQueryPhase(Collection quer if (hasSuggest) { assert result.suggest() != null; for (Suggestion> suggestion : result.suggest()) { - List suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); + List> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestionList.add(suggestion); if (suggestion instanceof CompletionSuggestion) { CompletionSuggestion completionSuggestion = (CompletionSuggestion) suggestion; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 7c0e9dbf50887..43e0baf1dafc0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -139,6 +139,11 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { this.source = source; } + @Override + public boolean allowsRemoteIndices() { + return true; + } + /** * Creates a new sub-search request starting from the original search request that is provided. * For internal use only, allows to fork a search request into multiple search requests that will be executed independently. diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 96f8a51c47c8a..a30b5d2a42fcf 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -454,7 +454,7 @@ public SearchRequestBuilder setRescorer(RescorerBuilder rescorer) { * @param window rescore window * @return this for chaining */ - public SearchRequestBuilder setRescorer(RescorerBuilder rescorer, int window) { + public SearchRequestBuilder setRescorer(RescorerBuilder rescorer, int window) { sourceBuilder().clearRescorers(); return addRescorer(rescorer.windowSize(window)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 10c0bf6f68b3b..0eee2f6ebfcd9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -115,7 +115,7 @@ SearchResponse getMergedResponse(Clusters clusters) { List aggs = new ArrayList<>(); Map shards = new TreeMap<>(); List topDocsList = new ArrayList<>(searchResponses.size()); - Map> groupedSuggestions = new HashMap<>(); + Map>> groupedSuggestions = new HashMap<>(); Boolean trackTotalHits = null; TopDocsStats topDocsStats = new TopDocsStats(trackTotalHitsUpTo); @@ -138,7 +138,8 @@ SearchResponse getMergedResponse(Clusters clusters) { Suggest suggest = searchResponse.getSuggest(); if (suggest != null) { for (Suggest.Suggestion> entries : suggest) { - List suggestionList = groupedSuggestions.computeIfAbsent(entries.getName(), s -> new ArrayList<>()); + List> suggestionList = + groupedSuggestions.computeIfAbsent(entries.getName(), s -> new ArrayList<>()); suggestionList.add(entries); } List completionSuggestions = suggest.filter(CompletionSuggestion.class); @@ -297,10 +298,10 @@ private static void setTopDocsShardIndex(Map sh } private static void setSuggestShardIndex(Map shards, - Map> groupedSuggestions) { + Map>> groupedSuggestions) { assignShardIndex(shards); - for (List suggestions : groupedSuggestions.values()) { - for (Suggest.Suggestion suggestion : suggestions) { + for (List> suggestions : groupedSuggestions.values()) { + for (Suggest.Suggestion suggestion : suggestions) { if (suggestion instanceof CompletionSuggestion) { CompletionSuggestion completionSuggestion = (CompletionSuggestion) suggestion; for (CompletionSuggestion.Entry options : completionSuggestion) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index bc97fbe9d93d3..abc400734d368 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -76,11 +76,20 @@ public class SearchTransportService { private final TransportService transportService; private final NodeClient client; - private final BiFunction responseWrapper; + private final BiFunction< + Transport.Connection, + SearchActionListener, + ActionListener> responseWrapper; private final Map clientConnections = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - public SearchTransportService(TransportService transportService, NodeClient client, - BiFunction responseWrapper) { + public SearchTransportService( + TransportService transportService, + NodeClient client, + BiFunction< + Transport.Connection, + SearchActionListener, + ActionListener> responseWrapper + ) { this.transportService = transportService; this.client = client; this.responseWrapper = responseWrapper; @@ -125,13 +134,13 @@ public void sendExecuteDfs(Transport.Connection connection, final ShardSearchReq } public void sendExecuteQuery(Transport.Connection connection, final ShardSearchRequest request, SearchTask task, - final SearchActionListener listener) { + final SearchActionListener listener) { // we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request // this used to be the QUERY_AND_FETCH which doesn't exist anymore. final boolean fetchDocuments = request.numberOfShards() == 1; Writeable.Reader reader = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new; - final ActionListener handler = responseWrapper.apply(connection, listener); + final ActionListener handler = responseWrapper.apply(connection, listener); transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task, new ConnectionCountingHandler<>(handler, reader, clientConnections, connection.getNode().getId())); } diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java index 553b7b988ff7e..c2245edbdf1fd 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java @@ -26,7 +26,7 @@ public class ListenableActionFuture extends AdapterActionFuture { * Registers an {@link ActionListener} to be notified when this future is completed. If the future is already completed then the * listener is notified immediately, on the calling thread. If not, the listener is notified on the thread that completes the listener. */ - @SuppressWarnings("unchecked,rawtypes") + @SuppressWarnings({"unchecked", "rawtypes"}) public void addListener(final ActionListener listener) { final boolean executeImmediate; synchronized (this) { @@ -56,7 +56,7 @@ public void addListener(final ActionListener listener) { } @Override - @SuppressWarnings("unchecked,rawtypes") + @SuppressWarnings({"unchecked", "rawtypes"}) protected void done(boolean success) { super.done(success); final Object listenersToExecute; diff --git a/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java b/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java index ae49afb955c4f..ca223346cdd9d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java @@ -103,12 +103,12 @@ public void onRejection(Exception e) { public abstract boolean shouldRetry(Exception e); - protected long calculateDelay(long previousDelay) { - return Math.min(previousDelay * 2, Integer.MAX_VALUE); + protected long calculateDelayBound(long previousDelayBound) { + return Math.min(previousDelayBound * 2, Integer.MAX_VALUE); } protected long minimumDelayMillis() { - return 1L; + return 0L; } public void onFinished() { @@ -145,10 +145,12 @@ public void onFailure(Exception e) { } else { addException(e); - final long nextDelayMillisBound = calculateDelay(delayMillisBound); + final long nextDelayMillisBound = calculateDelayBound(delayMillisBound); final RetryingListener retryingListener = new RetryingListener(nextDelayMillisBound, caughtExceptions); final Runnable runnable = createRunnable(retryingListener); - final long delayMillis = Randomness.get().nextInt(Math.toIntExact(delayMillisBound)) + minimumDelayMillis(); + int range = Math.toIntExact((delayMillisBound + 1) / 2); + final long delayMillis = Randomness.get().nextInt(range) + delayMillisBound - range + 1L; + assert delayMillis > 0; if (isDone.get() == false) { final TimeValue delay = TimeValue.timeValueMillis(delayMillis); logger.debug(() -> new ParameterizedMessage("retrying action that failed in {}", delay), e); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 4470700752d51..cf948fa90b5e6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -100,7 +100,8 @@ protected class AsyncBroadcastAction { final GroupShardsIterator shardsIts; final int expectedOps; final AtomicInteger counterOps = new AtomicInteger(); - protected final AtomicReferenceArray shardsResponses; + // ShardResponse or Exception + protected final AtomicReferenceArray shardsResponses; protected AsyncBroadcastAction(Task task, Request request, ActionListener listener) { this.task = task; @@ -132,7 +133,7 @@ public void start() { if (shardsIts.size() == 0) { // no shards try { - listener.onResponse(newResponse(request, new AtomicReferenceArray(0), clusterState)); + listener.onResponse(newResponse(request, new AtomicReferenceArray(0), clusterState)); } catch (Exception e) { listener.onFailure(e); } @@ -189,7 +190,6 @@ public void handleException(TransportException e) { } } - @SuppressWarnings({"unchecked"}) protected void onOperation(ShardRouting shard, int shardIndex, ShardResponse response) { logger.trace("received response for {}", shard); shardsResponses.set(shardIndex, response); @@ -228,7 +228,7 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int } } - protected AtomicReferenceArray shardsResponses() { + protected AtomicReferenceArray shardsResponses() { return shardsResponses; } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index a37c6ef7aeb96..e403999b44b17 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -110,7 +110,7 @@ public TransportBroadcastByNodeAction( private Response newResponse( Request request, - AtomicReferenceArray responses, + AtomicReferenceArray responses, List unavailableShardExceptions, Map> nodes, ClusterState clusterState) { @@ -126,6 +126,7 @@ private Response newResponse( exceptions.add(new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), exception)); } } else { + @SuppressWarnings("unchecked") NodeResponse response = (NodeResponse) responses.get(i); broadcastByNodeResponses.addAll(response.results); totalShards += response.getTotalShards(); @@ -372,8 +373,7 @@ protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { } protected void onCompletion() { - if (task instanceof CancellableTask && ((CancellableTask)task).isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); + if (task instanceof CancellableTask && ((CancellableTask)task).notifyIfCancelled(listener)) { return; } @@ -402,7 +402,7 @@ public void messageReceived(final NodeRequest request, TransportChannel channel, if (logger.isTraceEnabled()) { logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards); } - final AtomicArray shardResultOrExceptions = new AtomicArray(totalShards); + final AtomicArray shardResultOrExceptions = new AtomicArray<>(totalShards); final AtomicInteger counter = new AtomicInteger(shards.size()); int shardIndex = -1; @@ -431,15 +431,21 @@ public void onFailure(Exception e) { } } + @SuppressWarnings("unchecked") private void finishHim(NodeRequest request, TransportChannel channel, Task task, AtomicArray shardResultOrExceptions) { - if (task instanceof CancellableTask && ((CancellableTask)task).isCancelled()) { + if (task instanceof CancellableTask) { try { - channel.sendResponse(new TaskCancelledException("task cancelled")); - } catch (IOException e) { - logger.warn("failed to send response", e); + ((CancellableTask) task).ensureNotCancelled(); + } catch (TaskCancelledException e) { + try { + channel.sendResponse(e); + } catch (IOException ioException) { + e.addSuppressed(ioException); + logger.warn("failed to send response", e); + } + return; } - return; } List accumulatedExceptions = new ArrayList<>(); List results = new ArrayList<>(); @@ -461,8 +467,7 @@ private void finishHim(NodeRequest request, TransportChannel channel, Task task, private void onShardOperation(final NodeRequest request, final ShardRouting shardRouting, final Task task, final ActionListener listener) { - if (task instanceof CancellableTask && ((CancellableTask)task).isCancelled()) { - listener.onFailure(new TaskCancelledException("task cancelled")); + if (task instanceof CancellableTask && ((CancellableTask)task).notifyIfCancelled(listener)) { return; } if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java index bd876c12f144e..4318d8329aed3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.core.TimeValue; -public abstract class NodesOperationRequestBuilder, Response extends BaseNodesResponse, +public abstract class NodesOperationRequestBuilder, Response extends BaseNodesResponse, RequestBuilder extends NodesOperationRequestBuilder> extends ActionRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 56cbf0ae90003..0cb214c305d1b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -39,7 +38,7 @@ import java.util.concurrent.atomic.AtomicReferenceArray; public abstract class TransportNodesAction, - NodesResponse extends BaseNodesResponse, + NodesResponse extends BaseNodesResponse, NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { @@ -254,8 +253,7 @@ private void onFailure(int idx, String nodeId, Throwable t) { } private void finishHim() { - if (isCancelled(task)) { - listener.onFailure(new TaskCancelledException("task cancelled")); + if (task instanceof CancellableTask && ((CancellableTask) task).notifyIfCancelled(listener)) { return; } @@ -264,17 +262,12 @@ private void finishHim() { } } - private boolean isCancelled(Task task) { - return task instanceof CancellableTask && ((CancellableTask) task).isCancelled(); - } - class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeRequest request, TransportChannel channel, Task task) throws Exception { - if (isCancelled(task)) { - throw new TaskCancelledException("task cancelled"); + if (task instanceof CancellableTask) { + ((CancellableTask) task).ensureNotCancelled(); } - channel.sendResponse(nodeOperation(request, task)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index cafaa07f9b0cb..a1858855c555e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -133,7 +133,7 @@ protected List shards(Request request, ClusterState clusterState) { protected abstract ShardRequest newShardRequest(Request request, ShardId shardId); - private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList shardsResponses) { + private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList shardsResponses) { logger.trace("{}: got all shard responses", actionName); int successfulShards = 0; int failedShards = 0; @@ -159,6 +159,6 @@ private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayLi listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); } - protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, + protected abstract Response newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 8db3e68b89f52..5910f2393be67 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -60,6 +60,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -1097,7 +1098,8 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, l } /** a wrapper class to encapsulate a request when being sent to a specific allocation id **/ - public static class ConcreteShardRequest extends TransportRequest { + public static class ConcreteShardRequest extends TransportRequest + implements RawIndexingDataTransportRequest { /** {@link AllocationId#getId()} of the shard this request is sent to **/ private final String targetAllocationID; @@ -1188,6 +1190,14 @@ public long getPrimaryTerm() { return primaryTerm; } + @Override + public boolean isRawIndexingData() { + if (request instanceof RawIndexingDataTransportRequest) { + return ((RawIndexingDataTransportRequest) request).isRawIndexingData(); + } + return false; + } + @Override public String toString() { return "request: " + request + ", target allocation id: " + targetAllocationID + ", primary term: " + primaryTerm; diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index e01173a037f79..fd7f308e14419 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -147,6 +147,7 @@ protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) } } + @SuppressWarnings("unchecked") protected void processTasks(TasksRequest request, Consumer operation) { if (request.getTaskId().isSet()) { // we are only checking one task, we can optimize it @@ -173,7 +174,7 @@ protected abstract TasksResponse newResponse(TasksRequest request, List failedNodeExceptions); @SuppressWarnings("unchecked") - protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) { + protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) { List tasks = new ArrayList<>(); List failedNodeExceptions = new ArrayList<>(); List taskOperationFailures = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index 67c89418a1958..8ec240c0d143e 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -27,6 +27,8 @@ import java.util.List; import java.util.Set; +// It's not possible to suppress teh warning at #realtime(boolean) at a method-level. +@SuppressWarnings("unchecked") public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 6d26501106262..ffe2c82c070cc 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -16,6 +16,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; import java.util.Arrays; @@ -112,6 +114,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); Failure failure = response.getFailure(); builder.field(Fields._INDEX, failure.getIndex()); + if (builder.getRestApiVersion() == RestApiVersion.V_7) { + builder.field(Fields._TYPE, MapperService.SINGLE_MAPPING_NAME); + } builder.field(Fields._ID, failure.getId()); ElasticsearchException.generateFailureXContent(builder, params, failure.getCause(), true); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 8233872d3c277..5d2b7c60801ba 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -48,6 +48,8 @@ *

* Note, the {@link #index()} and {@link #id(String)} are required. */ +// It's not possible to suppress teh warning at #realtime(boolean) at a method-level. +@SuppressWarnings("unchecked") public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TermVectorsRequest.class); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 0a4456ab07951..da817fa334ebb 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -57,7 +57,6 @@ private static class FieldStrings { public static final String END_OFFSET = "end_offset"; public static final String PAYLOAD = "payload"; public static final String _INDEX = "_index"; - public static final String _TYPE = "_type"; public static final String _ID = "_id"; public static final String _VERSION = "_version"; public static final String FOUND = "found"; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 472afeaa4f738..00ccac0d4bf04 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -18,7 +18,6 @@ import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.PidFile; @@ -238,37 +237,11 @@ static SecureSettings loadSecureSettings(Environment initialEnv) throws Bootstra } static SecureSettings loadSecureSettings(Environment initialEnv, InputStream stdin) throws BootstrapException { - final KeyStoreWrapper keystore; try { - keystore = KeyStoreWrapper.load(initialEnv.configFile()); - } catch (IOException e) { - throw new BootstrapException(e); - } - - SecureString password; - try { - if (keystore != null && keystore.hasPassword()) { - password = readPassphrase(stdin, KeyStoreAwareCommand.MAX_PASSPHRASE_LENGTH); - } else { - password = new SecureString(new char[0]); - } - } catch (IOException e) { - throw new BootstrapException(e); - } - - try (password) { - if (keystore == null) { - final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - keyStoreWrapper.save(initialEnv.configFile(), new char[0]); - return keyStoreWrapper; - } else { - keystore.decrypt(password.getChars()); - KeyStoreWrapper.upgrade(keystore, initialEnv.configFile(), password.getChars()); - } + return KeyStoreWrapper.bootstrap(initialEnv.configFile(), () -> readPassphrase(stdin, KeyStoreWrapper.MAX_PASSPHRASE_LENGTH)); } catch (Exception e) { throw new BootstrapException(e); } - return keystore; } // visible for tests diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index bbba3522c632a..09785f1b1892c 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -104,6 +104,7 @@ public boolean implies(ProtectionDomain domain, Permission permission) { * Classy puzzler to rethrow any checked exception as an unchecked one. */ private static class Rethrower { + @SuppressWarnings("unchecked") private void rethrow(Throwable t) throws T { throw (T) t; } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 3498f0b1ea725..1275cf83a5e97 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -80,21 +80,29 @@ public void checkPermission(Permission perm) { final Elasticsearch elasticsearch = new Elasticsearch(); int status = main(args, elasticsearch, Terminal.DEFAULT); if (status != ExitCodes.OK) { - final String basePath = System.getProperty("es.logs.base_path"); - // It's possible to fail before logging has been configured, in which case there's no point - // suggesting that the user look in the log file. - if (basePath != null) { - Terminal.DEFAULT.errorPrintln( - "ERROR: Elasticsearch did not exit normally - check the logs at " - + basePath - + System.getProperty("file.separator") - + System.getProperty("es.logs.cluster_name") + ".log" - ); - } + printLogsSuggestion(); exit(status); } } + /** + * Prints a message directing the user to look at the logs. A message is only printed if + * logging has been configured. + */ + static void printLogsSuggestion() { + final String basePath = System.getProperty("es.logs.base_path"); + // It's possible to fail before logging has been configured, in which case there's no point + // suggesting that the user look in the log file. + if (basePath != null) { + Terminal.DEFAULT.errorPrintln( + "ERROR: Elasticsearch did not exit normally - check the logs at " + + basePath + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + ".log" + ); + } + } + private static void overrideDnsCachePolicyProperties() { for (final String property : new String[] {"networkaddress.cache.ttl", "networkaddress.cache.negative.ttl" }) { final String overrideProperty = "es." + property; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 601b9888c794a..a56e074a47c09 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -58,6 +58,8 @@ void onFatalUncaught(final String threadName, final Throwable t) { t.printStackTrace(Terminal.DEFAULT.getErrorWriter()); // Without a final flush, the stacktrace may not be shown before ES exits Terminal.DEFAULT.flush(); + + Elasticsearch.printLogsSuggestion(); } void onNonFatalUncaught(final String threadName, final Throwable t) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java index 9aea69e5baa12..ec05c4d1a62e1 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java @@ -14,6 +14,9 @@ import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.script.ClassPermission; +import javax.management.MBeanPermission; +import javax.management.MBeanServerPermission; +import javax.management.ObjectName; import javax.security.auth.AuthPermission; import javax.security.auth.PrivateCredentialPermission; import javax.security.auth.kerberos.DelegationPermission; @@ -127,7 +130,11 @@ public boolean test(Permission permission) { new AuthPermission("getLoginConfiguration"), new AuthPermission("setLoginConfiguration"), new AuthPermission("createLoginConfiguration.*"), - new AuthPermission("refreshLoginConfiguration") + new AuthPermission("refreshLoginConfiguration"), + new MBeanPermission("*", "*", ObjectName.WILDCARD, + "addNotificationListener,getAttribute,getDomains,getMBeanInfo,getObjectInstance,instantiate,invoke," + + "isInstanceOf,queryMBeans,queryNames,registerMBean,removeNotificationListener,setAttribute,unregisterMBean"), + new MBeanServerPermission("*") ); // While it would be ideal to represent all allowed permissions with concrete instances so that we can // use the builtin implies method to match them against the parsed policy, this does not work in all @@ -287,6 +294,19 @@ static PluginPolicyInfo readPolicyInfo(Path pluginRoot) throws IOException { } } } + // also add spi jars + // TODO: move this to a shared function, or fix plugin layout to have jar files in lib directory + Path spiDir = pluginRoot.resolve("spi"); + if (Files.exists(spiDir)) { + try (DirectoryStream jarStream = Files.newDirectoryStream(spiDir, "*.jar")) { + for (Path jar : jarStream) { + URL url = jar.toRealPath().toUri().toURL(); + if (jars.add(url) == false) { + throw new IllegalStateException("duplicate module/plugin: " + url); + } + } + } + } // parse the plugin's policy file into a set of permissions Policy policy = readPolicy(policyFile.toUri().toURL(), getCodebaseJarMap(jars)); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java index c5f1740651b55..44a6810da36c7 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java @@ -103,7 +103,7 @@ interface LinuxLibrary extends Library { LinuxLibrary lib = null; if (Constants.LINUX) { try { - lib = (LinuxLibrary) Native.loadLibrary("c", LinuxLibrary.class); + lib = Native.loadLibrary("c", LinuxLibrary.class); } catch (UnsatisfiedLinkError e) { logger.warn("unable to link C library. native methods (seccomp) will be disabled.", e); } @@ -421,7 +421,7 @@ interface MacLibrary extends Library { MacLibrary lib = null; if (Constants.MAC_OS_X) { try { - lib = (MacLibrary) Native.loadLibrary("c", MacLibrary.class); + lib = Native.loadLibrary("c", MacLibrary.class); } catch (UnsatisfiedLinkError e) { logger.warn("unable to link C library. native methods (seatbelt) will be disabled.", e); } @@ -490,7 +490,7 @@ interface SolarisLibrary extends Library { SolarisLibrary lib = null; if (Constants.SUN_OS) { try { - lib = (SolarisLibrary) Native.loadLibrary("c", SolarisLibrary.class); + lib = Native.loadLibrary("c", SolarisLibrary.class); } catch (UnsatisfiedLinkError e) { logger.warn("unable to link C library. native methods (priv_set) will be disabled.", e); } diff --git a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java b/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java index c17bd88d40fab..e2ca18263487b 100644 --- a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java @@ -26,9 +26,6 @@ public KeyStoreAwareCommand(String description) { super(description); } - /** Arbitrarily chosen maximum passphrase length */ - public static final int MAX_PASSPHRASE_LENGTH = 128; - /** * Reads the keystore password from the {@link Terminal}, prompting for verification where applicable and returns it as a * {@link SecureString}. @@ -42,9 +39,9 @@ protected static SecureString readPassword(Terminal terminal, boolean withVerifi final char[] passwordArray; if (withVerification) { passwordArray = terminal.readSecret("Enter new password for the elasticsearch keystore (empty for no password): ", - MAX_PASSPHRASE_LENGTH); + KeyStoreWrapper.MAX_PASSPHRASE_LENGTH); char[] passwordVerification = terminal.readSecret("Enter same password again: ", - MAX_PASSPHRASE_LENGTH); + KeyStoreWrapper.MAX_PASSPHRASE_LENGTH); if (Arrays.equals(passwordArray, passwordVerification) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Passwords are not equal, exiting."); } diff --git a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java index 4d15159812177..8ce93acf87832 100644 --- a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -34,8 +34,7 @@ */ public class NodeClient extends AbstractClient { - @SuppressWarnings("rawtypes") - private Map actions; + private Map, TransportAction> actions; private TaskManager taskManager; @@ -52,10 +51,14 @@ public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); } - @SuppressWarnings("rawtypes") - public void initialize(Map actions, TaskManager taskManager, Supplier localNodeId, - Transport.Connection localConnection, RemoteClusterService remoteClusterService, - NamedWriteableRegistry namedWriteableRegistry) { + public void initialize( + Map, TransportAction> actions, + TaskManager taskManager, + Supplier localNodeId, + Transport.Connection localConnection, + RemoteClusterService remoteClusterService, + NamedWriteableRegistry namedWriteableRegistry + ) { this.actions = actions; this.taskManager = taskManager; this.localNodeId = localNodeId; @@ -137,14 +140,14 @@ public String getLocalNodeId() { /** * Get the {@link TransportAction} for an {@link ActionType}, throwing exceptions if the action isn't available. */ - @SuppressWarnings("unchecked") private < Request extends ActionRequest, Response extends ActionResponse > TransportAction transportAction(ActionType action) { if (actions == null) { throw new IllegalStateException("NodeClient has not been initialized"); } - TransportAction transportAction = actions.get(action); + @SuppressWarnings("unchecked") + TransportAction transportAction = (TransportAction) actions.get(action); if (transportAction == null) { throw new IllegalStateException("failed to find action [" + action + "] to execute"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 01c887d52d88d..a9678d2657e1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -166,17 +166,17 @@ public static List getNamedXWriteables() { } private static void registerClusterCustom(List entries, String name, Reader reader, - Reader diffReader) { + Reader> diffReader) { registerCustom(entries, ClusterState.Custom.class, name, reader, diffReader); } private static void registerMetadataCustom(List entries, String name, Reader reader, - Reader diffReader) { + Reader> diffReader) { registerCustom(entries, Metadata.Custom.class, name, reader, diffReader); } private static void registerCustom(List entries, Class category, String name, - Reader reader, Reader diffReader) { + Reader reader, Reader> diffReader) { entries.add(new Entry(category, name, reader)); entries.add(new Entry(NamedDiff.class, name, diffReader)); } @@ -190,7 +190,7 @@ public IndexNameExpressionResolver getIndexNameExpressionResolver() { public static Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings, List clusterPlugins) { // collect deciders by class so that we can detect duplicates - Map deciders = new LinkedHashMap<>(); + Map, AllocationDecider> deciders = new LinkedHashMap<>(); addAllocationDecider(deciders, new MaxRetryAllocationDecider()); addAllocationDecider(deciders, new ResizeAllocationDecider()); addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider()); @@ -217,7 +217,7 @@ public static Collection createAllocationDeciders(Settings se } /** Add the given allocation decider to the given deciders collection, erroring if the class name is already used. */ - private static void addAllocationDecider(Map deciders, AllocationDecider decider) { + private static void addAllocationDecider(Map, AllocationDecider> deciders, AllocationDecider decider) { if (deciders.put(decider.getClass(), decider) != null) { throw new IllegalArgumentException("Cannot specify allocation decider [" + decider.getClass().getName() + "] twice"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 46fca6cafaf26..923c54d124383 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -52,24 +52,23 @@ /** * Represents the current state of the cluster. *

- * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is - * built on demand from the {@link RoutingTable}. - * The cluster state can be updated only on the master node. All updates are performed by on a - * single thread and controlled by the {@link ClusterService}. After every update the - * {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the - * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on - * the type of discovery. + * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is built on demand from the {@link + * RoutingTable}. The cluster state can be updated only on the master node. All updates are performed by on a single thread and controlled + * by the {@link ClusterService}. After every update the {@link Discovery#publish} method publishes a new version of the cluster state to + * all other nodes in the cluster. *

- * The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state - * differences instead of the entire state on each change. The publishing mechanism should only send differences - * to a node if this node was present in the previous version of the cluster state. If a node was - * not present in the previous version of the cluster state, this node is unlikely to have the previous cluster - * state version and should be sent a complete version. In order to make sure that the differences are applied to the - * correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely - * identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to - * make sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method - * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send + * Implements the {@link Diffable} interface in order to support publishing of cluster state differences instead of the entire state on each + * change. The publishing mechanism only sends differences to a node if this node was present in the previous version of the cluster state. + * If a node was not present in the previous version of the cluster state, this node is unlikely to have the previous cluster state version + * and should be sent a complete version. In order to make sure that the differences are applied to the correct version of the cluster + * state, each cluster state version update generates {@link #stateUUID} that uniquely identifies this version of the state. This uuid is + * verified by the {@link ClusterStateDiff#apply} method to make sure that the correct diffs are applied. If uuids don’t match, the {@link + * ClusterStateDiff#apply} method throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. + *

+ * Implements {@link ToXContentFragment} to be exposed in REST APIs (e.g. {@code GET _cluster/state} and {@code POST _cluster/reroute}) and + * to be indexed by monitoring, mostly just for diagnostics purposes. The XContent representation does not need to be 100% faithful since we + * never reconstruct a cluster state from its XContent representation, but the more faithful it is the more useful it is for diagnostics. */ public class ClusterState implements ToXContentFragment, Diffable { @@ -85,6 +84,13 @@ default boolean isPrivate() { return false; } + /** + * Serialize this {@link Custom} for diagnostic purposes, exposed by the

GET _cluster/state
API etc. The XContent + * representation does not need to be 100% faithful since we never reconstruct a cluster state from its XContent representation, but + * the more faithful it is the more useful it is for diagnostics. + */ + @Override + XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; } private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 4d6c938e6e1ca..c0abfc0f47f2b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -173,6 +173,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.timeField("start_time_millis", "start_time", entry.startTime); builder.field("repository_state_id", entry.repositoryStateId); + builder.field("state", entry.state); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 92960c1ca24e1..947ca3acde233 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -22,10 +22,12 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryOperation; import org.elasticsearch.repositories.RepositoryShardId; +import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.snapshots.InFlightShardSnapshotStates; import org.elasticsearch.snapshots.Snapshot; @@ -62,7 +64,7 @@ public static SnapshotsInProgress of(List entries) { } public SnapshotsInProgress(StreamInput in) throws IOException { - this(in.readList(SnapshotsInProgress.Entry::new)); + this(in.readList(SnapshotsInProgress.Entry::readFrom)); } private SnapshotsInProgress(List entries) { @@ -196,12 +198,7 @@ private static boolean assertConsistentEntries(List entries) { final Map>> assignedShardsByRepo = new HashMap<>(); final Map>> queuedShardsByRepo = new HashMap<>(); for (Entry entry : entries) { - for (ObjectObjectCursor shard : entry.shards()) { - final ShardId sid = shard.key; - assert assertShardStateConsistent(entries, assignedShardsByRepo, queuedShardsByRepo, entry, sid.getIndexName(), sid.id(), - shard.value); - } - for (ObjectObjectCursor shard : entry.clones()) { + for (ObjectObjectCursor shard : entry.shardsByRepoShardId()) { final RepositoryShardId sid = shard.key; assert assertShardStateConsistent(entries, assignedShardsByRepo, queuedShardsByRepo, entry, sid.indexName(), sid.shardId(), shard.value); @@ -349,7 +346,7 @@ public static class ShardSnapshotStatus implements Writeable { private final String nodeId; @Nullable - private final String generation; + private final ShardGeneration generation; @Nullable private final String reason; @@ -357,15 +354,15 @@ public static class ShardSnapshotStatus implements Writeable { @Nullable // only present in state SUCCESS; may be null even in SUCCESS if this state came over the wire from an older node private final ShardSnapshotResult shardSnapshotResult; - public ShardSnapshotStatus(String nodeId, String generation) { + public ShardSnapshotStatus(String nodeId, ShardGeneration generation) { this(nodeId, ShardState.INIT, generation); } - public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable String generation) { + public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable ShardGeneration generation) { this(nodeId, assertNotSuccess(state), null, generation); } - public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, String reason, @Nullable String generation) { + public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, String reason, @Nullable ShardGeneration generation) { this(nodeId, assertNotSuccess(state), reason, generation, null); } @@ -373,7 +370,7 @@ private ShardSnapshotStatus( @Nullable String nodeId, ShardState state, String reason, - @Nullable String generation, + @Nullable ShardGeneration generation, @Nullable ShardSnapshotResult shardSnapshotResult) { this.nodeId = nodeId; this.state = state; @@ -407,7 +404,7 @@ private boolean assertConsistent() { public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { String nodeId = in.readOptionalString(); final ShardState state = ShardState.fromValue(in.readByte()); - final String generation = in.readOptionalString(); + final ShardGeneration generation = in.readOptionalWriteable(ShardGeneration::new); final String reason = in.readOptionalString(); final ShardSnapshotResult shardSnapshotResult = in.readOptionalWriteable(ShardSnapshotResult::new); if (state == ShardState.QUEUED) { @@ -426,7 +423,7 @@ public String nodeId() { } @Nullable - public String generation() { + public ShardGeneration generation() { return this.generation; } @@ -434,6 +431,13 @@ public String reason() { return reason; } + public ShardSnapshotStatus withUpdatedGeneration(ShardGeneration newGeneration) { + assert state == ShardState.SUCCESS : "can't move generation in state " + state; + return new ShardSnapshotStatus(nodeId, state, reason, newGeneration, + shardSnapshotResult == null ? null : + new ShardSnapshotResult(newGeneration, shardSnapshotResult.getSize(), shardSnapshotResult.getSegmentCount())); + } + @Nullable public ShardSnapshotResult shardSnapshotResult() { assert state == ShardState.SUCCESS : "result is unavailable in state " + state; @@ -453,7 +457,7 @@ public boolean isActive() { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(nodeId); out.writeByte(state.value); - out.writeOptionalString(generation); + out.writeOptionalWriteable(generation); out.writeOptionalString(reason); out.writeOptionalWriteable(shardSnapshotResult); } @@ -500,6 +504,9 @@ public static class Entry implements Writeable, ToXContent, RepositoryOperation * Map of index name to {@link IndexId}. */ private final Map indices; + + private final Map snapshotIndices; + private final List dataStreams; private final List featureStates; private final long startTime; @@ -513,10 +520,9 @@ public static class Entry implements Writeable, ToXContent, RepositoryOperation private final SnapshotId source; /** - * Map of {@link RepositoryShardId} to {@link ShardSnapshotStatus} tracking the state of each shard clone operation in this entry - * the same way {@link #shards} tracks the status of each shard snapshot operation in non-clone entries. + * Map of {@link RepositoryShardId} to {@link ShardSnapshotStatus} tracking the state of each shard operation in this entry. */ - private final ImmutableOpenMap clones; + private final ImmutableOpenMap shardStatusByRepoShardId; @Nullable private final Map userMetadata; @Nullable private final String failure; @@ -534,14 +540,14 @@ private Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, St List dataStreams, List featureStates, long startTime, long repositoryStateId, ImmutableOpenMap shards, String failure, Map userMetadata, Version version, @Nullable SnapshotId source, - @Nullable ImmutableOpenMap clones) { + ImmutableOpenMap shardStatusByRepoShardId) { this.state = state; this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; this.partial = partial; this.indices = Map.copyOf(indices); this.dataStreams = List.copyOf(dataStreams); - this.featureStates = Collections.unmodifiableList(featureStates); + this.featureStates = List.copyOf(featureStates); this.startTime = startTime; this.shards = shards; this.repositoryStateId = repositoryStateId; @@ -550,20 +556,36 @@ private Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, St this.version = version; this.source = source; if (source == null) { - assert clones == null || clones.isEmpty() : "Provided [" + clones + "] but no source"; - this.clones = ImmutableOpenMap.of(); + assert shardStatusByRepoShardId == null || shardStatusByRepoShardId.isEmpty() + : "Provided explict repo shard id statuses [" + shardStatusByRepoShardId + "] but no source"; + final Map res = new HashMap<>(indices.size()); + final ImmutableOpenMap.Builder byRepoShardIdBuilder = + ImmutableOpenMap.builder(shards.size()); + for (ObjectObjectCursor entry : shards) { + final ShardId shardId = entry.key; + final IndexId indexId = indices.get(shardId.getIndexName()); + final Index index = shardId.getIndex(); + final Index existing = res.put(indexId.getName(), index); + assert existing == null || existing.equals(index) : "Conflicting indices [" + existing + "] and [" + index + "]"; + byRepoShardIdBuilder.put(new RepositoryShardId(indexId, shardId.id()), entry.value); + } + this.shardStatusByRepoShardId = byRepoShardIdBuilder.build(); + snapshotIndices = Map.copyOf(res); } else { - this.clones = clones; + assert shards.isEmpty(); + this.shardStatusByRepoShardId = shardStatusByRepoShardId; + snapshotIndices = Map.of(); } - assert assertShardsConsistent(this.source, this.state, this.indices, this.shards, this.clones); + assert assertShardsConsistent(this.source, this.state, this.indices, this.shards, this.shardStatusByRepoShardId); } - private Entry(StreamInput in) throws IOException { - snapshot = new Snapshot(in); - includeGlobalState = in.readBoolean(); - partial = in.readBoolean(); - state = State.fromValue(in.readByte()); + private static Entry readFrom(StreamInput in) throws IOException { + final Snapshot snapshot = new Snapshot(in); + final boolean includeGlobalState = in.readBoolean(); + final boolean partial = in.readBoolean(); + final State state = State.fromValue(in.readByte()); final int indexCount = in.readVInt(); + final Map indices; if (indexCount == 0) { indices = Collections.emptyMap(); } else { @@ -574,21 +596,25 @@ private Entry(StreamInput in) throws IOException { } indices = Collections.unmodifiableMap(idx); } - startTime = in.readLong(); - shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); - repositoryStateId = in.readLong(); - failure = in.readOptionalString(); - userMetadata = in.readMap(); - version = Version.readVersion(in); - dataStreams = in.readStringList(); - source = in.readOptionalWriteable(SnapshotId::new); - clones = in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); - featureStates = Collections.unmodifiableList(in.readList(SnapshotFeatureInfo::new)); + final long startTime = in.readLong(); + final ImmutableOpenMap shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); + final long repositoryStateId = in.readLong(); + final String failure = in.readOptionalString(); + final Map userMetadata = in.readMap(); + final Version version = Version.readVersion(in); + final List dataStreams = in.readStringList(); + final SnapshotId source = in.readOptionalWriteable(SnapshotId::new); + final ImmutableOpenMap clones = + in.readImmutableMap(RepositoryShardId::new, ShardSnapshotStatus::readFrom); + final List featureStates = Collections.unmodifiableList(in.readList(SnapshotFeatureInfo::new)); + return new SnapshotsInProgress.Entry( + snapshot, includeGlobalState, partial, state, indices, dataStreams, featureStates, startTime, repositoryStateId, + shards, failure, userMetadata, version, source, clones); } private static boolean assertShardsConsistent(SnapshotId source, State state, Map indices, ImmutableOpenMap shards, - ImmutableOpenMap clones) { + ImmutableOpenMap statusByRepoShardId) { if ((state == State.INIT || state == State.ABORTED) && shards.isEmpty()) { return true; } @@ -602,15 +628,23 @@ private static boolean assertShardsConsistent(SnapshotId source, State state, Ma assert source == null || indexNames.isEmpty() == false : "No empty snapshot clones allowed"; assert source != null || indexNames.equals(indexNamesInShards) : "Indices in shards " + indexNamesInShards + " differ from expected indices " + indexNames + " for state [" + state + "]"; - final boolean shardsCompleted = completed(shards.values()) && completed(clones.values()); + final boolean shardsCompleted = completed(shards.values()) && completed(statusByRepoShardId.values()); // Check state consistency for normal snapshots and started clone operations - if (source == null || clones.isEmpty() == false) { + if (source == null || statusByRepoShardId.isEmpty() == false) { assert (state.completed() && shardsCompleted) || (state.completed() == false && shardsCompleted == false) : "Completed state must imply all shards completed but saw state [" + state + "] and shards " + shards; } if (source != null && state.completed()) { - assert hasFailures(clones) == false || state == State.FAILED - : "Failed shard clones in [" + clones + "] but state was [" + state + "]"; + assert hasFailures(statusByRepoShardId) == false || state == State.FAILED + : "Failed shard clones in [" + statusByRepoShardId + "] but state was [" + state + "]"; + } + if (source == null) { + assert shards.size() == statusByRepoShardId.size(); + for (ObjectObjectCursor entry : shards) { + final ShardId routingShardId = entry.key; + assert statusByRepoShardId.get(new RepositoryShardId(indices.get(routingShardId.getIndexName()), routingShardId.id())) + == entry.value : "found inconsistent values tracked by routing- and repository shard id"; + } } return true; } @@ -619,17 +653,18 @@ public Entry withRepoGen(long newRepoGen) { assert newRepoGen > repositoryStateId : "Updated repository generation [" + newRepoGen + "] must be higher than current generation [" + repositoryStateId + "]"; return new Entry(snapshot, includeGlobalState, partial, state, indices, dataStreams, featureStates, startTime, newRepoGen, - shards, failure, userMetadata, version, source, clones); + shards, failure, userMetadata, version, source, source == null ? ImmutableOpenMap.of() : shardStatusByRepoShardId); } public Entry withClones(ImmutableOpenMap updatedClones) { - if (updatedClones.equals(clones)) { + if (updatedClones.equals(shardStatusByRepoShardId)) { return this; } + assert shards.isEmpty(); return new Entry(snapshot, includeGlobalState, partial, completed(updatedClones.values()) ? (hasFailures(updatedClones) ? State.FAILED : State.SUCCESS) : - state, indices, dataStreams, featureStates, startTime, repositoryStateId, shards, failure, userMetadata, - version, source, updatedClones); + state, indices, dataStreams, featureStates, startTime, repositoryStateId, ImmutableOpenMap.of(), failure, + userMetadata, version, source, updatedClones); } /** @@ -661,12 +696,23 @@ public Entry abort() { if (allQueued) { return null; } - return fail(shardsBuilder.build(), completed ? State.SUCCESS : State.ABORTED, ABORTED_FAILURE_TEXT); - } - - public Entry fail(ImmutableOpenMap shards, State state, String failure) { - return new Entry(snapshot, includeGlobalState, partial, state, indices, dataStreams, featureStates, startTime, - repositoryStateId, shards, failure, userMetadata, version, source, clones); + return new Entry( + snapshot, + includeGlobalState, + partial, + completed ? State.SUCCESS : State.ABORTED, + indices, + dataStreams, + featureStates, + startTime, + repositoryStateId, + shardsBuilder.build(), + ABORTED_FAILURE_TEXT, + userMetadata, + version, + source, + ImmutableOpenMap.of() + ); } /** @@ -692,7 +738,7 @@ public Entry withShardStates(ImmutableOpenMap shar public Entry withStartedShards(ImmutableOpenMap shards) { final SnapshotsInProgress.Entry updated = new Entry(snapshot, includeGlobalState, partial, state, indices, dataStreams, featureStates, startTime, repositoryStateId, shards, failure, userMetadata, version); - assert updated.state().completed() == false && completed(updated.shards().values()) == false + assert updated.state().completed() == false && completed(updated.shardsByRepoShardId().values()) == false : "Only running snapshots allowed but saw [" + updated + "]"; return updated; } @@ -706,10 +752,25 @@ public Snapshot snapshot() { return this.snapshot; } + public ImmutableOpenMap shardsByRepoShardId() { + return shardStatusByRepoShardId; + } + + public Index indexByName(String name) { + assert isClone() == false : "tried to get routing index for clone entry [" + this + "]"; + return snapshotIndices.get(name); + } + public ImmutableOpenMap shards() { + assert isClone() == false : "tried to get routing shards for clone entry [" + this + "]"; return this.shards; } + public ShardId shardId(RepositoryShardId repositoryShardId) { + assert isClone() == false : "must not be called for clone [" + this + "]"; + return new ShardId(indexByName(repositoryShardId.indexName()), repositoryShardId.shardId()); + } + public State state() { return state; } @@ -767,10 +828,6 @@ public boolean isClone() { return source != null; } - public ImmutableOpenMap clones() { - return clones; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -791,7 +848,7 @@ public boolean equals(Object o) { if (Objects.equals(userMetadata, ((Entry) o).userMetadata) == false) return false; if (version.equals(entry.version) == false) return false; if (Objects.equals(source, ((Entry) o).source) == false) return false; - if (clones.equals(((Entry) o).clones) == false) return false; + if (shardStatusByRepoShardId.equals(((Entry) o).shardStatusByRepoShardId) == false) return false; if (featureStates.equals(entry.featureStates) == false) return false; return true; @@ -812,7 +869,7 @@ public int hashCode() { result = 31 * result + (userMetadata == null ? 0 : userMetadata.hashCode()); result = 31 * result + version.hashCode(); result = 31 * result + (source == null ? 0 : source.hashCode()); - result = 31 * result + clones.hashCode(); + result = 31 * result + shardStatusByRepoShardId.hashCode(); result = 31 * result + featureStates.hashCode(); return result; } @@ -859,7 +916,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("source", source); builder.startArray("clones"); { - for (ObjectObjectCursor shardEntry : clones) { + for (ObjectObjectCursor shardEntry : shardStatusByRepoShardId) { RepositoryShardId shardId = shardEntry.key; writeShardSnapshotStatus(builder, shardId.index(), shardId.shardId(), shardEntry.value); } @@ -877,7 +934,22 @@ private void writeShardSnapshotStatus(XContentBuilder builder, ToXContent indexI builder.field("index", indexId); builder.field("shard", shardId); builder.field("state", status.state()); + builder.field("generation", status.generation()); builder.field("node", status.nodeId()); + + if (status.state() == ShardState.SUCCESS) { + final ShardSnapshotResult result = status.shardSnapshotResult(); + builder.startObject("result"); + builder.field("generation", result.getGeneration()); + builder.humanReadableField("size_in_bytes", "size", result.getSize()); + builder.field("segments", result.getSegmentCount()); + builder.endObject(); + } + + if (status.reason() != null) { + builder.field("reason", status.reason()); + } + builder.endObject(); } @@ -896,7 +968,11 @@ public void writeTo(StreamOutput out) throws IOException { Version.writeVersion(version, out); out.writeStringCollection(dataStreams); out.writeOptionalWriteable(source); - out.writeMap(clones); + if (source == null) { + out.writeMap(ImmutableOpenMap.of()); + } else { + out.writeMap(shardStatusByRepoShardId); + } out.writeList(featureStates); } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 81850c5640769..4ebbc83287ccd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -135,7 +135,7 @@ public void handleException(TransportException exp) { } } - private static final Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[]{ + private static final Class[] MASTER_CHANNEL_EXCEPTIONS = new Class[]{ NotMasterException.class, ConnectTransportException.class, FailedToCommitClusterStateException.class diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 17ee5f44d7e21..8cb727977d5b7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -187,14 +187,14 @@ String getDescription() { final VoteCollection voteCollection = new VoteCollection(); foundPeers.forEach(voteCollection::addVote); - final String isQuorumOrNot + final String haveDiscoveredQuorum = electionStrategy.isElectionQuorum(clusterState.nodes().getLocalNode(), currentTerm, clusterState.term(), clusterState.version(), clusterState.getLastCommittedConfiguration(), clusterState.getLastAcceptedConfiguration(), - voteCollection) ? "is a quorum" : "is not a quorum"; + voteCollection) ? "have discovered possible quorum" : "have only discovered non-quorum"; return String.format(Locale.ROOT, - "master not discovered or elected yet, an election requires %s, have discovered [%s] which %s; %s", - quorumDescription, foundPeersDescription, isQuorumOrNot, discoveryWillContinueDescription); + "master not discovered or elected yet, an election requires %s, %s [%s]; %s", + quorumDescription, haveDiscoveredQuorum, foundPeersDescription, discoveryWillContinueDescription); } private String describeQuorum(VotingConfiguration votingConfiguration) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index 3193803520bad..471d3ac61075d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -24,6 +24,7 @@ import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportConnectionListener; @@ -261,6 +262,7 @@ public String toString() { private class FollowerChecker { private final DiscoveryNode discoveryNode; private int failureCountSinceLastSuccess; + private int timeoutCountSinceLastSuccess; FollowerChecker(DiscoveryNode discoveryNode) { this.discoveryNode = discoveryNode; @@ -296,6 +298,7 @@ public void handleResponse(TransportResponse.Empty response) { } failureCountSinceLastSuccess = 0; + timeoutCountSinceLastSuccess = 0; logger.trace("{} check successful", FollowerChecker.this); scheduleNextWakeUp(); } @@ -307,7 +310,11 @@ public void handleException(TransportException exp) { return; } - failureCountSinceLastSuccess++; + if (exp instanceof ReceiveTimeoutTransportException) { + timeoutCountSinceLastSuccess++; + } else { + failureCountSinceLastSuccess++; + } final String reason; if (exp instanceof ConnectTransportException @@ -317,9 +324,10 @@ public void handleException(TransportException exp) { } else if (exp.getCause() instanceof NodeHealthCheckFailureException) { logger.debug(() -> new ParameterizedMessage("{} health check failed", FollowerChecker.this), exp); reason = "health check failed"; - } else if (failureCountSinceLastSuccess >= followerCheckRetryCount) { + } else if (failureCountSinceLastSuccess + timeoutCountSinceLastSuccess >= followerCheckRetryCount) { logger.debug(() -> new ParameterizedMessage("{} failed too many times", FollowerChecker.this), exp); - reason = "followers check retry count exceeded"; + reason = "followers check retry count exceeded [timeouts=" + timeoutCountSinceLastSuccess + + ", failures=" + failureCountSinceLastSuccess + "]"; } else { logger.debug(() -> new ParameterizedMessage("{} failed, retrying", FollowerChecker.this), exp); scheduleNextWakeUp(); @@ -373,6 +381,7 @@ public String toString() { return "FollowerChecker{" + "discoveryNode=" + discoveryNode + ", failureCountSinceLastSuccess=" + failureCountSinceLastSuccess + + ", timeoutCountSinceLastSuccess=" + timeoutCountSinceLastSuccess + ", [" + FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey() + "]=" + followerCheckRetryCount + '}'; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index ef1e86683d4b6..9f289ef65ce48 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -204,12 +204,14 @@ public static class AddDataStreamAlias extends AliasAction { private final String aliasName; private final String dataStreamName; private final Boolean isWriteDataStream; + private final String filter; - public AddDataStreamAlias(String aliasName, String dataStreamName, Boolean isWriteDataStream) { + public AddDataStreamAlias(String aliasName, String dataStreamName, Boolean isWriteDataStream, String filter) { super(dataStreamName); this.aliasName = aliasName; this.dataStreamName = dataStreamName; this.isWriteDataStream = isWriteDataStream; + this.filter = filter; } public String getAliasName() { @@ -231,8 +233,8 @@ boolean removeIndex() { @Override boolean apply(NewAliasValidator aliasValidator, Metadata.Builder metadata, IndexMetadata index) { - aliasValidator.validate(aliasName, null, null, isWriteDataStream); - return metadata.put(aliasName, dataStreamName, isWriteDataStream); + aliasValidator.validate(aliasName, null, filter, isWriteDataStream); + return metadata.put(aliasName, dataStreamName, isWriteDataStream, filter); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index f05f842b95869..0f76a98bfd17f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; @@ -290,7 +291,7 @@ public String getTimestampField() { */ public Map getDataStreamMappingSnippet() { // _data_stream_timestamp meta fields default to @timestamp: - return Map.of(MapperService.SINGLE_MAPPING_NAME, Map.of("_data_stream_timestamp", Map.of("enabled", true))); + return Map.of(MapperService.SINGLE_MAPPING_NAME, Map.of(DataStreamTimestampFieldMapper.NAME, Map.of("enabled", true))); } public boolean isHidden() { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java index 7e5ed869461b7..598cddd4e8e60 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java @@ -7,8 +7,12 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -16,11 +20,15 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.Predicate; @@ -30,34 +38,77 @@ public class DataStreamAlias extends AbstractDiffable implement public static final ParseField DATA_STREAMS_FIELD = new ParseField("data_streams"); public static final ParseField WRITE_DATA_STREAM_FIELD = new ParseField("write_data_stream"); + public static final ParseField FILTER_FIELD = new ParseField("filter"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_alias", false, - (args, name) -> new DataStreamAlias(name, (List) args[0], (String) args[1]) + (args, name) -> new DataStreamAlias(name, (List) args[0], (String) args[1], (CompressedXContent) args[2]) ); static { PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), DATA_STREAMS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), WRITE_DATA_STREAM_FIELD); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_EMBEDDED_OBJECT || + p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new CompressedXContent(p.binaryValue()); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(p.mapOrdered()); + return new CompressedXContent(BytesReference.bytes(builder)); + } else { + assert false : "unexpected token [" + p.currentToken() + " ]"; + return null; + } + }, + FILTER_FIELD, + ObjectParser.ValueType.VALUE_OBJECT_ARRAY + ); } private final String name; private final List dataStreams; private final String writeDataStream; + private final CompressedXContent filter; - public DataStreamAlias(String name, List dataStreams, String writeDataStream) { + private DataStreamAlias(String name, List dataStreams, String writeDataStream, CompressedXContent filter) { this.name = Objects.requireNonNull(name); this.dataStreams = List.copyOf(dataStreams); this.writeDataStream = writeDataStream; + this.filter = filter; assert writeDataStream == null || dataStreams.contains(writeDataStream); } + public DataStreamAlias(String name, List dataStreams, String writeDataStream, Map filter) { + this(name, dataStreams, writeDataStream, compress(filter)); + } + + private static CompressedXContent compress(Map filterAsMap) { + if (filterAsMap == null) { + return null; + } + + try { + XContentBuilder builder = XContentFactory.jsonBuilder().map(filterAsMap); + return new CompressedXContent(BytesReference.bytes(builder)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Map decompress(CompressedXContent filter) { + String filterAsString = filter.string(); + return XContentHelper.convertToMap(XContentFactory.xContent(filterAsString), filterAsString, true); + } + public DataStreamAlias(StreamInput in) throws IOException { this.name = in.readString(); this.dataStreams = in.readStringList(); this.writeDataStream = in.readOptionalString(); + this.filter = in.getVersion().onOrAfter(Version.V_7_15_0) && in.readBoolean() ? CompressedXContent.readCompressedString(in) : null; } /** @@ -85,15 +136,20 @@ public String getWriteDataStream() { return writeDataStream; } + public CompressedXContent getFilter() { + return filter; + } + /** * Returns a new {@link DataStreamAlias} instance with the provided data stream name added to it as a new member. * If the provided isWriteDataStream is set to true then the provided data stream is also set as write data stream. * If the provided isWriteDataStream is set to false and the provided data stream is also the write data stream of * this instance then the returned data stream alias instance's write data stream is unset. + * If the provided filter is the same as the filter of this alias then this instance isn't updated, otherwise it is updated. * * The same instance is returned if the attempted addition of the provided data stream didn't change this instance. */ - public DataStreamAlias addDataStream(String dataStream, Boolean isWriteDataStream) { + public DataStreamAlias update(String dataStream, Boolean isWriteDataStream, Map filterAsMap) { String writeDataStream = this.writeDataStream; if (isWriteDataStream != null) { if (isWriteDataStream) { @@ -105,10 +161,24 @@ public DataStreamAlias addDataStream(String dataStream, Boolean isWriteDataStrea } } + boolean filterUpdated; + CompressedXContent filter; + if (filterAsMap != null) { + filter = compress(filterAsMap); + if (this.filter == null) { + filterUpdated = true; + } else { + filterUpdated = filterAsMap.equals(decompress(this.filter)) == false; + } + } else { + filter = this.filter; + filterUpdated = false; + } + Set dataStreams = new HashSet<>(this.dataStreams); boolean added = dataStreams.add(dataStream); - if (added || Objects.equals(this.writeDataStream, writeDataStream) == false) { - return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream); + if (added || Objects.equals(this.writeDataStream, writeDataStream) == false || filterUpdated) { + return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream, filter); } else { return this; } @@ -133,7 +203,7 @@ public DataStreamAlias removeDataStream(String dataStream) { if (dataStream.equals(writeDataStream)) { writeDataStream = null; } - return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream); + return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream, filter); } } @@ -152,7 +222,7 @@ public DataStreamAlias intersect(Predicate filter) { if (intersectingDataStreams.contains(writeDataStream) == false) { writeDataStream = null; } - return new DataStreamAlias(this.name, intersectingDataStreams, writeDataStream); + return new DataStreamAlias(this.name, intersectingDataStreams, writeDataStream, this.filter); } /** @@ -171,7 +241,7 @@ public DataStreamAlias merge(DataStreamAlias other) { } } - return new DataStreamAlias(this.name, List.copyOf(mergedDataStreams), writeDataStream); + return new DataStreamAlias(this.name, List.copyOf(mergedDataStreams), writeDataStream, filter); } /** @@ -187,7 +257,7 @@ public DataStreamAlias renameDataStreams(String renamePattern, String renameRepl if (writeDataStream != null) { writeDataStream = writeDataStream.replaceAll(renamePattern, renameReplacement); } - return new DataStreamAlias(this.name, renamedDataStreams, writeDataStream); + return new DataStreamAlias(this.name, renamedDataStreams, writeDataStream, filter); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -210,6 +280,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (writeDataStream != null) { builder.field(WRITE_DATA_STREAM_FIELD.getPreferredName(), writeDataStream); } + if (filter != null) { + boolean binary = params.paramAsBoolean("binary", false); + if (binary) { + builder.field("filter", filter.compressed()); + } else { + builder.field("filter", XContentHelper.convertToMap(filter.uncompressed(), true).v2()); + } + } builder.endObject(); return builder; } @@ -219,6 +297,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeStringCollection(dataStreams); out.writeOptionalString(writeDataStream); + if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + if (filter != null) { + out.writeBoolean(true); + filter.writeTo(out); + } else { + out.writeBoolean(false); + } + } } @Override @@ -228,11 +314,22 @@ public boolean equals(Object o) { DataStreamAlias that = (DataStreamAlias) o; return Objects.equals(name, that.name) && Objects.equals(dataStreams, that.dataStreams) && - Objects.equals(writeDataStream, that.writeDataStream); + Objects.equals(writeDataStream, that.writeDataStream) && + Objects.equals(filter, that.filter); } @Override public int hashCode() { - return Objects.hash(name, dataStreams, writeDataStream); + return Objects.hash(name, dataStreams, writeDataStream, filter); + } + + @Override + public String toString() { + return "DataStreamAlias{" + + "name='" + name + '\'' + + ", dataStreams=" + dataStreams + + ", writeDataStream='" + writeDataStream + '\'' + + ", filter=" + filter.string() + + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java index d5e1919dc7139..f29ddef978963 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java @@ -133,15 +133,20 @@ public static boolean isIndexVisible(String expression, String index, IndicesOpt if (includeDataStreams == false) { return false; } - if (indexAbstraction.isSystem()) { final SystemIndexAccessLevel level = resolver.getSystemIndexAccessLevel(); - if (level == SystemIndexAccessLevel.ALL) { - return true; - } else if (level == SystemIndexAccessLevel.NONE) { - return false; - } else if (level == SystemIndexAccessLevel.RESTRICTED) { - return resolver.getSystemIndexAccessPredicate().test(indexAbstraction.getName()); + switch (level) { + case ALL: + return true; + case NONE: + return false; + case RESTRICTED: + return resolver.getSystemIndexAccessPredicate().test(indexAbstraction.getName()); + case BACKWARDS_COMPATIBLE_ONLY: + return resolver.getNetNewSystemIndexPredicate().test(indexAbstraction.getName()); + default: + assert false : "unexpected system index access level [" + level + "]"; + throw new IllegalStateException("unexpected system index access level [" + level + "]"); } } else { return isVisible; @@ -158,18 +163,43 @@ public static boolean isIndexVisible(String expression, String index, IndicesOpt return false; } if (indexAbstraction.isSystem()) { - // system index that backs system data stream + // check if it is net new + if (resolver.getNetNewSystemIndexPredicate().test(indexAbstraction.getName())) { + final SystemIndexAccessLevel level = resolver.getSystemIndexAccessLevel(); + switch (level) { + case ALL: + return true; + case NONE: + return false; + case RESTRICTED: + return resolver.getSystemIndexAccessPredicate().test(indexAbstraction.getName()); + case BACKWARDS_COMPATIBLE_ONLY: + return resolver.getNetNewSystemIndexPredicate().test(indexAbstraction.getName()); + default: + assert false : "unexpected system index access level [" + level + "]"; + throw new IllegalStateException("unexpected system index access level [" + level + "]"); + } + } + + // does the system index back a system data stream? if (indexAbstraction.getParentDataStream() != null) { if (indexAbstraction.getParentDataStream().isSystem() == false) { + assert false : "system index is part of a data stream that is not a system data stream"; throw new IllegalStateException("system index is part of a data stream that is not a system data stream"); } final SystemIndexAccessLevel level = resolver.getSystemIndexAccessLevel(); - if (level == SystemIndexAccessLevel.ALL) { - return true; - } else if (level == SystemIndexAccessLevel.NONE) { - return false; - } else if (level == SystemIndexAccessLevel.RESTRICTED) { - return resolver.getSystemIndexAccessPredicate().test(indexAbstraction.getName()); + switch (level) { + case ALL: + return true; + case NONE: + return false; + case RESTRICTED: + return resolver.getSystemIndexAccessPredicate().test(indexAbstraction.getName()); + case BACKWARDS_COMPATIBLE_ONLY: + return resolver.getNetNewSystemIndexPredicate().test(indexAbstraction.getName()); + default: + assert false : "unexpected system index access level [" + level + "]"; + throw new IllegalStateException("unexpected system index access level [" + level + "]"); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index caff31e78143d..c80bb40250f40 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -23,6 +23,13 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -34,12 +41,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; @@ -1309,6 +1310,7 @@ public IndexMetadata build() { timestampRange); } + @SuppressWarnings("unchecked") public static void toXContent(IndexMetadata indexMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException { Metadata.XContentContext context = Metadata.XContentContext.valueOf( params.param(CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API)); @@ -1422,16 +1424,12 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token parser.nextToken(); } - if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { - throw new IllegalArgumentException("expected field name but got a " + parser.currentToken()); - } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); Builder builder = new Builder(parser.currentName()); String currentFieldName = null; XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("expected object but got a " + token); - } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); boolean mappingVersion = false; boolean settingsVersion = false; boolean aliasesVersion = false; @@ -1514,11 +1512,8 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti } else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) { LongArrayList list = new LongArrayList(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_NUMBER) { - list.add(parser.longValue()); - } else { - throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]"); - } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + list.add(parser.longValue()); } builder.primaryTerms(list.toArray()); } else { @@ -1549,6 +1544,7 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token " + token); } } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); if (Assertions.ENABLED) { assert mappingVersion : "mapping version should be present for indices created on or after 6.5.0"; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index 99f1170142818..4ee4a57911dee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -33,6 +33,8 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isPartialSearchableSnapshotIndex; + /** * This service is responsible for verifying index metadata when an index is introduced * to the cluster, for example when restarting nodes, importing dangling indices, or restoring @@ -202,7 +204,7 @@ IndexMetadata archiveBrokenIndexSettings(IndexMetadata indexMetadata) { IndexMetadata convertSharedCacheTierPreference(IndexMetadata indexMetadata) { final Settings settings = indexMetadata.getSettings(); // Only remove these settings for a shared_cache searchable snapshot - if ("snapshot".equals(settings.get("index.store.type", "")) && settings.getAsBoolean("index.store.snapshot.partial", false)) { + if (isPartialSearchableSnapshotIndex(settings)) { final Settings.Builder settingsBuilder = Settings.builder().put(settings); // Clear any allocation rules other than preference for tier settingsBuilder.remove("index.routing.allocation.include._tier"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index efb3e1129e249..d8f384014c53e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexAbstraction.Type; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.logging.DeprecationCategory; @@ -26,6 +25,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; @@ -78,7 +78,7 @@ public IndexNameExpressionResolver(ThreadContext threadContext, SystemIndices sy */ public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { Context context = new Context(state, request.indicesOptions(), false, false, request.includeDataStreams(), - getSystemIndexAccessPredicate()); + getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndexNames(context, request.indices()); } @@ -87,7 +87,7 @@ public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { */ public String[] concreteIndexNamesWithSystemIndexAccess(ClusterState state, IndicesRequest request) { Context context = new Context(state, request.indicesOptions(), false, false, request.includeDataStreams(), - name -> true); + SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY, name -> true, this.getNetNewSystemIndexPredicate()); return concreteIndexNames(context, request.indices()); } @@ -97,7 +97,7 @@ public String[] concreteIndexNamesWithSystemIndexAccess(ClusterState state, Indi */ public Index[] concreteIndices(ClusterState state, IndicesRequest request) { Context context = new Context(state, request.indicesOptions(), false, false, request.includeDataStreams(), - getSystemIndexAccessPredicate()); + getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndices(context, request.indices()); } @@ -112,30 +112,29 @@ public Index[] concreteIndices(ClusterState state, IndicesRequest request) { * provided indices options in the context don't allow such a case, or if the final result of the indices resolution * contains no indices and the indices options in the context don't allow such a case. * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options in the context don't allow such a case. + * indices options in the context don't allow such a case; if a remote index is requested. */ public String[] concreteIndexNames(ClusterState state, IndicesOptions options, String... indexExpressions) { - Context context = new Context(state, options, getSystemIndexAccessPredicate()); + Context context = new Context(state, options, getSystemIndexAccessLevel(), + getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndexNames(context, indexExpressions); } public String[] concreteIndexNames(ClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { - Context context = new Context(state, options, false, false, includeDataStreams, getSystemIndexAccessPredicate()); + Context context = new Context(state, options, false, false, includeDataStreams, getSystemIndexAccessLevel(), + getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndexNames(context, indexExpressions); } public String[] concreteIndexNames(ClusterState state, IndicesOptions options, IndicesRequest request) { - Context context = new Context(state, options, false, false, request.includeDataStreams(), getSystemIndexAccessPredicate()); + Context context = new Context(state, options, false, false, request.includeDataStreams(), + getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndexNames(context, request.indices()); } - public String[] concreteIndexNamesWithSystemIndexAccess(ClusterState state, IndicesOptions options, String... indexExpressions) { - Context context = new Context(state, options, name -> true); - return concreteIndexNames(context, indexExpressions); - } - public List dataStreamNames(ClusterState state, IndicesOptions options, String... indexExpressions) { - Context context = new Context(state, options, false, false, true, true, getSystemIndexAccessPredicate()); + Context context = new Context(state, options, false, false, true, true, getSystemIndexAccessLevel(), + getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); if (indexExpressions == null || indexExpressions.length == 0) { indexExpressions = new String[]{"*"}; } @@ -163,7 +162,7 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, * provided indices options in the context don't allow such a case, or if the final result of the indices resolution * contains no indices and the indices options in the context don't allow such a case. * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options in the context don't allow such a case. + * indices options in the context don't allow such a case; if a remote index is requested. */ public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { return concreteIndices(state, options, false, indexExpressions); @@ -171,7 +170,7 @@ public Index[] concreteIndices(ClusterState state, IndicesOptions options, Strin public Index[] concreteIndices(ClusterState state, IndicesOptions options, boolean includeDataStreams, String... indexExpressions) { Context context = new Context(state, options, false, false, includeDataStreams, - getSystemIndexAccessPredicate()); + getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndices(context, indexExpressions); } @@ -185,11 +184,11 @@ public Index[] concreteIndices(ClusterState state, IndicesOptions options, boole * provided indices options in the context don't allow such a case, or if the final result of the indices resolution * contains no indices and the indices options in the context don't allow such a case. * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options in the context don't allow such a case. + * indices options in the context don't allow such a case; if a remote index is requested. */ public Index[] concreteIndices(ClusterState state, IndicesRequest request, long startTime) { Context context = new Context(state, request.indicesOptions(), startTime, false, false, request.includeDataStreams(), false, - getSystemIndexAccessPredicate()); + getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); return concreteIndices(context, request.indices()); } @@ -203,11 +202,20 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { + Metadata metadata = context.getState().metadata(); + IndicesOptions options = context.getOptions(); if (indexExpressions == null || indexExpressions.length == 0) { indexExpressions = new String[]{Metadata.ALL}; + } else { + if (options.ignoreUnavailable() == false) { + List crossClusterIndices = Arrays.stream(indexExpressions) + .filter(index -> index.contains(":")).collect(Collectors.toList()); + if (crossClusterIndices.size() > 0) { + throw new IllegalArgumentException("Cross-cluster calls are not supported in this context but remote indices " + + "were requested: " + crossClusterIndices); + } + } } - Metadata metadata = context.getState().metadata(); - IndicesOptions options = context.getOptions(); // If only one index is specified then whether we fail a request if an index is missing depends on the allow_no_indices // option. At some point we should change this, because there shouldn't be a reason why whether a single index // or multiple indices are specified yield different behaviour. @@ -327,12 +335,15 @@ private void checkSystemIndexAccess(Context context, Metadata metadata, Set resolvedSystemIndices = new ArrayList<>(); + final List resolvedNetNewSystemIndices = new ArrayList<>(); final Set resolvedSystemDataStreams = new HashSet<>(); final SortedMap indicesLookup = metadata.getIndicesLookup(); for (IndexMetadata idxMetadata : systemIndicesThatShouldNotBeAccessed) { IndexAbstraction abstraction = indicesLookup.get(idxMetadata.getIndex().getName()); if (abstraction.getParentDataStream() != null) { resolvedSystemDataStreams.add(abstraction.getParentDataStream().getName()); + } else if (systemIndices.isNetNewSystemIndex(idxMetadata.getIndex().getName())) { + resolvedNetNewSystemIndices.add(idxMetadata.getIndex().getName()); } else { resolvedSystemIndices.add(idxMetadata.getIndex().getName()); } @@ -347,9 +358,17 @@ private void checkSystemIndexAccess(Context context, Metadata metadata, Set resolveExpressions(ClusterState state, String... expressions) { - Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false, true, getSystemIndexAccessPredicate()); + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false, true, getSystemIndexAccessLevel(), + getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); List resolvedExpressions = Arrays.asList(expressions); for (ExpressionResolver expressionResolver : expressionResolvers) { resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); @@ -527,39 +552,55 @@ public String[] indexAliases(ClusterState state, String index, Predicate indexAliases = indexMetadata.getAliases(); - final AliasMetadata[] aliasCandidates; - if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { - // faster to iterate indexAliases - aliasCandidates = StreamSupport.stream(Spliterators.spliteratorUnknownSize(indexAliases.values().iterator(), 0), false) + IndexAbstraction ia = state.metadata().getIndicesLookup().get(index); + if (ia.getParentDataStream() != null) { + DataStream dataStream = ia.getParentDataStream().getDataStream(); + Map dataStreamAliases = state.metadata().dataStreamAliases(); + Stream stream; + if (iterateIndexAliases(dataStreamAliases.size(), resolvedExpressions.size())) { + stream = dataStreamAliases.values().stream() + .filter(dataStreamAlias -> resolvedExpressions.contains(dataStreamAlias.getName())); + } else { + stream = resolvedExpressions.stream().map(dataStreamAliases::get).filter(Objects::nonNull); + } + return stream.filter(dataStreamAlias -> dataStreamAlias.getDataStreams().contains(dataStream.getName())) + .filter(dataStreamAlias -> dataStreamAlias.getFilter() != null) + .map(DataStreamAlias::getName) + .toArray(String[]::new); + } else { + final ImmutableOpenMap indexAliases = indexMetadata.getAliases(); + final AliasMetadata[] aliasCandidates; + if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { + // faster to iterate indexAliases + aliasCandidates = StreamSupport.stream(Spliterators.spliteratorUnknownSize(indexAliases.values().iterator(), 0), false) .map(cursor -> cursor.value) .filter(aliasMetadata -> resolvedExpressions.contains(aliasMetadata.alias())) .toArray(AliasMetadata[]::new); - } else { - // faster to iterate resolvedExpressions - aliasCandidates = resolvedExpressions.stream() + } else { + // faster to iterate resolvedExpressions + aliasCandidates = resolvedExpressions.stream() .map(indexAliases::get) .filter(Objects::nonNull) .toArray(AliasMetadata[]::new); - } - - List aliases = null; - for (AliasMetadata aliasMetadata : aliasCandidates) { - if (requiredAlias.test(aliasMetadata)) { - // If required - add it to the list of aliases - if (aliases == null) { - aliases = new ArrayList<>(); + } + List aliases = null; + for (AliasMetadata aliasMetadata : aliasCandidates) { + if (requiredAlias.test(aliasMetadata)) { + // If required - add it to the list of aliases + if (aliases == null) { + aliases = new ArrayList<>(); + } + aliases.add(aliasMetadata.alias()); + } else { + // If not, we have a non required alias for this index - no further checking needed + return null; } - aliases.add(aliasMetadata.alias()); - } else { - // If not, we have a non required alias for this index - no further checking needed + } + if (aliases == null) { return null; } + return aliases.toArray(new String[aliases.size()]); } - if (aliases == null) { - return null; - } - return aliases.toArray(new String[aliases.size()]); } /** @@ -570,7 +611,8 @@ public String[] indexAliases(ClusterState state, String index, Predicate> resolveSearchRouting(ClusterState state, @Nullable String routing, String... expressions) { List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); - Context context = new Context(state, IndicesOptions.lenientExpandOpen(), false, false, true, getSystemIndexAccessPredicate()); + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), false, false, true, getSystemIndexAccessLevel(), + getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate()); for (ExpressionResolver expressionResolver : expressionResolvers) { resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); } @@ -722,7 +764,10 @@ boolean isPatternMatchingAllIndices(Metadata metadata, String[] indicesOrAliases } public SystemIndexAccessLevel getSystemIndexAccessLevel() { - return systemIndices.getSystemIndexAccessLevel(threadContext); + final SystemIndexAccessLevel accessLevel = systemIndices.getSystemIndexAccessLevel(threadContext); + assert accessLevel != SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY + : "BACKWARDS_COMPATIBLE_ONLY access level should never be used automatically, it should only be used in known special cases"; + return accessLevel; } public Predicate getSystemIndexAccessPredicate() { @@ -730,6 +775,8 @@ public Predicate getSystemIndexAccessPredicate() { final Predicate systemIndexAccessLevelPredicate; if (systemIndexAccessLevel == SystemIndexAccessLevel.NONE) { systemIndexAccessLevelPredicate = s -> false; + } else if (systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY) { + systemIndexAccessLevelPredicate = getNetNewSystemIndexPredicate(); } else if (systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { systemIndexAccessLevelPredicate = s -> true; } else { @@ -739,6 +786,10 @@ public Predicate getSystemIndexAccessPredicate() { return systemIndexAccessLevelPredicate; } + public Predicate getNetNewSystemIndexPredicate() { + return systemIndices::isNetNewSystemIndex; + } + public static class Context { private final ClusterState state; @@ -748,30 +799,44 @@ public static class Context { private final boolean resolveToWriteIndex; private final boolean includeDataStreams; private final boolean preserveDataStreams; + private final SystemIndexAccessLevel systemIndexAccessLevel; private final Predicate systemIndexAccessPredicate; + private final Predicate netNewSystemIndexPredicate; - Context(ClusterState state, IndicesOptions options, Predicate systemIndexAccessPredicate) { - this(state, options, System.currentTimeMillis(), systemIndexAccessPredicate); + Context(ClusterState state, IndicesOptions options, SystemIndexAccessLevel systemIndexAccessLevel) { + this(state, options, systemIndexAccessLevel, s -> true, s -> false); + } + + Context(ClusterState state, IndicesOptions options, SystemIndexAccessLevel systemIndexAccessLevel, + Predicate systemIndexAccessPredicate, Predicate netNewSystemIndexPredicate) { + this(state, options, System.currentTimeMillis(), systemIndexAccessLevel, systemIndexAccessPredicate, + netNewSystemIndexPredicate); } Context(ClusterState state, IndicesOptions options, boolean preserveAliases, boolean resolveToWriteIndex, - boolean includeDataStreams, Predicate systemIndexAccessPredicate) { + boolean includeDataStreams, SystemIndexAccessLevel systemIndexAccessLevel, Predicate systemIndexAccessPredicate, + Predicate netNewSystemIndexPredicate) { this(state, options, System.currentTimeMillis(), preserveAliases, resolveToWriteIndex, includeDataStreams, false, - systemIndexAccessPredicate); + systemIndexAccessLevel, systemIndexAccessPredicate, netNewSystemIndexPredicate); } Context(ClusterState state, IndicesOptions options, boolean preserveAliases, boolean resolveToWriteIndex, - boolean includeDataStreams, boolean preserveDataStreams, Predicate systemIndexAccessPredicate) { + boolean includeDataStreams, boolean preserveDataStreams, SystemIndexAccessLevel systemIndexAccessLevel, + Predicate systemIndexAccessPredicate, Predicate netNewSystemIndexPredicate) { this(state, options, System.currentTimeMillis(), preserveAliases, resolveToWriteIndex, includeDataStreams, preserveDataStreams, - systemIndexAccessPredicate); + systemIndexAccessLevel, systemIndexAccessPredicate, netNewSystemIndexPredicate); } - Context(ClusterState state, IndicesOptions options, long startTime, Predicate systemIndexAccessPredicate) { - this(state, options, startTime, false, false, false, false, systemIndexAccessPredicate); + Context(ClusterState state, IndicesOptions options, long startTime, SystemIndexAccessLevel systemIndexAccessLevel, + Predicate systemIndexAccessPredicate, Predicate netNewSystemIndexPredicate) { + this(state, options, startTime, false, false, false, false, systemIndexAccessLevel, systemIndexAccessPredicate, + netNewSystemIndexPredicate); } protected Context(ClusterState state, IndicesOptions options, long startTime, boolean preserveAliases, boolean resolveToWriteIndex, - boolean includeDataStreams, boolean preserveDataStreams, Predicate systemIndexAccessPredicate) { + boolean includeDataStreams, boolean preserveDataStreams, SystemIndexAccessLevel systemIndexAccessLevel, + Predicate systemIndexAccessPredicate, + Predicate netNewSystemIndexPredicate) { this.state = state; this.options = options; this.startTime = startTime; @@ -779,7 +844,9 @@ protected Context(ClusterState state, IndicesOptions options, long startTime, bo this.resolveToWriteIndex = resolveToWriteIndex; this.includeDataStreams = includeDataStreams; this.preserveDataStreams = preserveDataStreams; + this.systemIndexAccessLevel = systemIndexAccessLevel; this.systemIndexAccessPredicate = systemIndexAccessPredicate; + this.netNewSystemIndexPredicate = netNewSystemIndexPredicate; } public ClusterState getState() { @@ -855,20 +922,7 @@ public List resolve(Context context, List expressions) { } if (isEmptyOrTrivialWildcard(expressions)) { - List resolvedExpressions = resolveEmptyOrTrivialWildcard(options, metadata).stream() - .filter(expression -> { - IndexAbstraction abstraction = metadata.getIndicesLookup().get(expression); - if (abstraction != null && abstraction.isSystem()) { - if (abstraction.getType() == Type.DATA_STREAM || abstraction.getParentDataStream() != null) { - return context.systemIndexAccessPredicate.test(abstraction.getName()); - } else { - return true; - } - } else { - return true; - } - }) - .collect(Collectors.toList()); + List resolvedExpressions = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams()) { final IndexMetadata.State excludeState = excludeState(options); final Map dataStreamsAbstractions = metadata.getIndicesLookup().entrySet() @@ -1068,11 +1122,16 @@ private static Set expand(Context context, IndexMetadata.State excludeSt String aliasOrIndexName = entry.getKey(); IndexAbstraction indexAbstraction = entry.getValue(); - if (indexAbstraction.isSystem() && - (indexAbstraction.getType() == Type.DATA_STREAM || indexAbstraction.getParentDataStream() != null)) { - if (context.systemIndexAccessPredicate.test(indexAbstraction.getName()) == false) { + if (indexAbstraction.isSystem()) { + if (context.netNewSystemIndexPredicate.test(indexAbstraction.getName()) && + context.systemIndexAccessPredicate.test(indexAbstraction.getName()) == false) { continue; } + if (indexAbstraction.getType() == Type.DATA_STREAM || indexAbstraction.getParentDataStream() != null) { + if (context.systemIndexAccessPredicate.test(indexAbstraction.getName()) == false) { + continue; + } + } } if (indexAbstraction.isHidden() == false || includeHidden || implicitHiddenMatch(aliasOrIndexName, expression)) { @@ -1102,21 +1161,56 @@ private boolean isEmptyOrTrivialWildcard(List expressions) { Regex.isMatchAllPattern(expressions.get(0)))); } - private static List resolveEmptyOrTrivialWildcard(IndicesOptions options, Metadata metadata) { + private static List resolveEmptyOrTrivialWildcard(Context context) { + final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getState().metadata()); + if (context.systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { + return List.of(allIndices); + } else { + return resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices); + } + } + + private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context context, String[] allIndices) { + return Arrays.stream(allIndices) + .filter(name -> { + if (name.startsWith(".")) { + IndexAbstraction abstraction = context.state.metadata().getIndicesLookup().get(name); + assert abstraction != null : "null abstraction for " + name + " but was in array of all indices"; + if (abstraction.isSystem()) { + if (context.netNewSystemIndexPredicate.test(name)) { + if (SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY.equals(context.systemIndexAccessLevel)) { + return false; + } else { + return context.systemIndexAccessPredicate.test(name); + } + } else if (abstraction.getType() == Type.DATA_STREAM || abstraction.getParentDataStream() != null) { + return context.systemIndexAccessPredicate.test(name); + } + } else { + return true; + } + } + return true; + } + ) + .collect(Collectors.toUnmodifiableList()); + } + + private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions options, Metadata metadata) { if (options.expandWildcardsOpen() && options.expandWildcardsClosed() && options.expandWildcardsHidden()) { - return Arrays.asList(metadata.getConcreteAllIndices()); + return metadata.getConcreteAllIndices(); } else if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { - return Arrays.asList(metadata.getConcreteVisibleIndices()); + return metadata.getConcreteVisibleIndices(); } else if (options.expandWildcardsOpen() && options.expandWildcardsHidden()) { - return Arrays.asList(metadata.getConcreteAllOpenIndices()); + return metadata.getConcreteAllOpenIndices(); } else if (options.expandWildcardsOpen()) { - return Arrays.asList(metadata.getConcreteVisibleOpenIndices()); + return metadata.getConcreteVisibleOpenIndices(); } else if (options.expandWildcardsClosed() && options.expandWildcardsHidden()) { - return Arrays.asList(metadata.getConcreteAllClosedIndices()); + return metadata.getConcreteAllClosedIndices(); } else if (options.expandWildcardsClosed()) { - return Arrays.asList(metadata.getConcreteVisibleClosedIndices()); + return metadata.getConcreteVisibleClosedIndices(); } else { - return Collections.emptyList(); + return Strings.EMPTY_ARRAY; } } } @@ -1271,4 +1365,28 @@ static String resolveExpression(String expression, final Context context) { return beforePlaceHolderSb.toString(); } } + + /** + * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} + * since it uses only the start time to resolve expressions. + */ + public static final class ResolverContext extends Context { + public ResolverContext() { + this(System.currentTimeMillis()); + } + + public ResolverContext(long startTime) { + super(null, null, startTime, false, false, false, false, SystemIndexAccessLevel.ALL, name -> false, name -> false); + } + + @Override + public ClusterState getState() { + throw new UnsupportedOperationException("should never be called"); + } + + @Override + public IndicesOptions getOptions() { + throw new UnsupportedOperationException("should never be called"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java index 3d92f55e79b21..ba4b26b5d7058 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; import java.io.UncheckedIOException; @@ -391,6 +392,8 @@ private static void toInnerXContent(IndexTemplateMetadata indexTemplateMetadata, Map documentMapping = XContentHelper.convertToMap(m.uncompressed(), true).v2(); if (includeTypeName == false) { documentMapping = reduceMapping(documentMapping); + } else { + documentMapping = reduceEmptyMapping(documentMapping); } builder.field("mappings"); builder.map(documentMapping); @@ -405,6 +408,16 @@ private static void toInnerXContent(IndexTemplateMetadata indexTemplateMetadata, builder.endObject(); } + @SuppressWarnings("unchecked") + private static Map reduceEmptyMapping(Map mapping) { + if(mapping.keySet().size() == 1 && mapping.containsKey(MapperService.SINGLE_MAPPING_NAME) && + ((Map)mapping.get(MapperService.SINGLE_MAPPING_NAME)).size() == 0){ + return (Map) mapping.values().iterator().next(); + } else { + return mapping; + } + } + @SuppressWarnings("unchecked") private static Map reduceMapping(Map mapping) { assert mapping.keySet().size() == 1 : mapping.keySet(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java new file mode 100644 index 0000000000000..993ae141e3466 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +/** + * A class encapsulating the usage of a particular "thing" by something else + */ +public class ItemUsage implements Writeable, ToXContentObject { + + private final Set indices; + private final Set dataStreams; + private final Set composableTemplates; + + /** + * Create a new usage, a {@code null} value indicates that the item *cannot* be used by the + * thing, otherwise use an empty collection to indicate no usage. + */ + public ItemUsage(@Nullable Collection indices, + @Nullable Collection dataStreams, + @Nullable Collection composableTemplates) { + this.indices = indices == null ? null : new HashSet<>(indices); + this.dataStreams = dataStreams == null ? null : new HashSet<>(dataStreams); + this.composableTemplates = composableTemplates == null ? null : new HashSet<>(composableTemplates); + } + + public ItemUsage(StreamInput in) throws IOException { + if (in.readBoolean()) { + this.indices = in.readSet(StreamInput::readString); + } else { + this.indices = null; + } + if (in.readBoolean()) { + this.dataStreams = in.readSet(StreamInput::readString); + } else { + this.dataStreams = null; + } + if (in.readBoolean()) { + this.composableTemplates = in.readSet(StreamInput::readString); + } else { + this.composableTemplates = null; + } + } + + public Set getIndices() { + return indices; + } + + public Set getDataStreams() { + return dataStreams; + } + + public Set getComposableTemplates() { + return composableTemplates; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (this.indices != null) { + builder.field("indices", this.indices); + } + if (this.dataStreams != null) { + builder.field("data_streams", this.dataStreams); + } + if (this.composableTemplates != null) { + builder.field("composable_templates", this.composableTemplates); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalStringCollection(this.indices); + out.writeOptionalStringCollection(this.dataStreams); + out.writeOptionalStringCollection(this.composableTemplates); + } + + @Override + public int hashCode() { + return Objects.hash(this.indices, this.dataStreams, this.composableTemplates); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ItemUsage other = (ItemUsage) obj; + return Objects.equals(indices, other.indices) && + Objects.equals(dataStreams, other.dataStreams) && + Objects.equals(composableTemplates, other.composableTemplates); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 73120987005c8..267db7de99ced 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -17,6 +17,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.action.AliasesRequest; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; @@ -25,6 +26,15 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.coordination.CoordinationMetadata; +import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -37,12 +47,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -73,6 +77,10 @@ import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +/** + * {@link Metadata} is the part of the {@link ClusterState} which persists across restarts. This persistence is XContent-based, so a + * round-trip through XContent must be faithful in {@link XContentContext#GATEWAY} context. + */ public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -115,6 +123,10 @@ public enum XContentContext { */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); + /** + * Custom metadata that persists (via XContent) across restarts. The deserialization method for each implementation must be registered + * with the {@link NamedXContentRegistry}. + */ public interface Custom extends NamedDiffable, ToXContentFragment { EnumSet context(); @@ -707,6 +719,12 @@ public Map dataStreamAliases() { .orElse(Collections.emptyMap()); } + public Map nodeShutdowns() { + return Optional.ofNullable((NodesShutdownMetadata) this.custom(NodesShutdownMetadata.TYPE)) + .map(NodesShutdownMetadata::getAllNodeMetadataMap) + .orElse(Collections.emptyMap()); + } + public ImmutableOpenMap customs() { return this.customs; } @@ -1177,7 +1195,7 @@ public Builder put(DataStream dataStream) { return this; } - public boolean put(String aliasName, String dataStream, Boolean isWriteDataStream) { + public boolean put(String aliasName, String dataStream, Boolean isWriteDataStream, String filter) { Map existingDataStream = Optional.ofNullable((DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE)) .map(dsmd -> new HashMap<>(dsmd.dataStreams())) @@ -1191,12 +1209,19 @@ public boolean put(String aliasName, String dataStream, Boolean isWriteDataStrea throw new IllegalArgumentException("alias [" + aliasName + "] refers to a non existing data stream [" + dataStream + "]"); } + Map filterAsMap; + if (filter != null) { + filterAsMap = XContentHelper.convertToMap(XContentFactory.xContent(filter), filter, true); + } else { + filterAsMap = null; + } + DataStreamAlias alias = dataStreamAliases.get(aliasName); if (alias == null) { String writeDataStream = isWriteDataStream != null && isWriteDataStream ? dataStream : null; - alias = new DataStreamAlias(aliasName, List.of(dataStream), writeDataStream); + alias = new DataStreamAlias(aliasName, List.of(dataStream), writeDataStream, filterAsMap); } else { - DataStreamAlias copy = alias.addDataStream(dataStream, isWriteDataStream); + DataStreamAlias copy = alias.update(dataStream, isWriteDataStream, filterAsMap); if (copy == alias) { return false; } @@ -1653,22 +1678,17 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { // move to the field name (meta-data) - token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new IllegalArgumentException("Expected a field name but got " + token); - } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser); // move to the next object token = parser.nextToken(); } currentFieldName = parser.currentName(); } - if ("meta-data".equals(parser.currentName()) == false) { + if ("meta-data".equals(currentFieldName) == false) { throw new IllegalArgumentException("Expected [meta-data] as a field name but got " + currentFieldName); } - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Expected a START_OBJECT but got " + token); - } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -1711,6 +1731,7 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { throw new IllegalArgumentException("Unexpected token " + token); } } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return builder.build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index e5abf16a74016..8358c3d4ddbcd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -24,10 +24,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.index.Index; -import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.indices.SystemDataStreamDescriptor; @@ -35,6 +33,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; @@ -157,12 +156,11 @@ static ClusterState createDataStream(MetadataCreateIndexService metadataCreateIn } static ClusterState createDataStream(MetadataCreateIndexService metadataCreateIndexService, - ClusterState currentState, - String dataStreamName, - List backingIndices, - IndexMetadata writeIndex, - SystemDataStreamDescriptor systemDataStreamDescriptor) throws Exception - { + ClusterState currentState, + String dataStreamName, + List backingIndices, + IndexMetadata writeIndex, + SystemDataStreamDescriptor systemDataStreamDescriptor) throws Exception { Objects.requireNonNull(metadataCreateIndexService); Objects.requireNonNull(currentState); Objects.requireNonNull(backingIndices); @@ -177,8 +175,8 @@ static ClusterState createDataStream(MetadataCreateIndexService metadataCreateIn throw new IllegalArgumentException("data_stream [" + dataStreamName + "] must be lowercase"); } if (dataStreamName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { - throw new IllegalArgumentException("data_stream [" + dataStreamName + "] must not start with '" - + DataStream.BACKING_INDEX_PREFIX + "'"); + throw new IllegalArgumentException( + "data_stream [" + dataStreamName + "] must not start with '" + DataStream.BACKING_INDEX_PREFIX + "'"); } final boolean isSystem = systemDataStreamDescriptor != null; @@ -219,9 +217,24 @@ static ClusterState createDataStream(MetadataCreateIndexService metadataCreateIn DataStream newDataStream = new DataStream(dataStreamName, timestampField, dsBackingIndices, 1L, template.metadata() != null ? Map.copyOf(template.metadata()) : null, hidden, false, isSystem); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); - logger.info("adding data stream [{}] with write index [{}] and backing indices [{}]", dataStreamName, + + List aliases = new ArrayList<>(); + var resolvedAliases = MetadataIndexTemplateService.resolveAliases(currentState.metadata(), template); + for (var resolvedAliasMap : resolvedAliases) { + for (var alias : resolvedAliasMap.values()) { + aliases.add(alias.getAlias()); + builder.put(alias.getAlias(), dataStreamName, alias.writeIndex(), alias.filter() == null ? null : alias.filter().string()); + } + } + + logger.info( + "adding data stream [{}] with write index [{}], backing indices [{}], and aliases [{}]", + dataStreamName, writeIndex.getIndex().getName(), - Strings.arrayToCommaDelimitedString(backingIndices.stream().map(i -> i.getIndex().getName()).toArray())); + Strings.arrayToCommaDelimitedString(backingIndices.stream().map(i -> i.getIndex().getName()).toArray()), + Strings.collectionToCommaDelimitedString(aliases) + ); + return ClusterState.builder(currentState).metadata(builder).build(); } @@ -239,18 +252,13 @@ public static ComposableIndexTemplate lookupTemplateForDataStream(String dataStr } public static void validateTimestampFieldMapping(MappingLookup mappingLookup) throws IOException { - MetadataFieldMapper fieldMapper = (MetadataFieldMapper) mappingLookup.getMapper("_data_stream_timestamp"); - assert fieldMapper != null : "[_data_stream_timestamp] meta field mapper must exist"; - - Map parsedTemplateMapping = - MapperService.parseMapping(NamedXContentRegistry.EMPTY, mappingLookup.getMapping().toCompressedXContent().string()); - Boolean enabled = ObjectPath.eval("_doc._data_stream_timestamp.enabled", parsedTemplateMapping); + MetadataFieldMapper fieldMapper = (MetadataFieldMapper) mappingLookup.getMapper(DataStreamTimestampFieldMapper.NAME); + assert fieldMapper != null : DataStreamTimestampFieldMapper.NAME + " meta field mapper must exist"; // Sanity check: if this fails then somehow the mapping for _data_stream_timestamp has been overwritten and // that would be a bug. - if (enabled == null || enabled == false) { - throw new IllegalStateException("[_data_stream_timestamp] meta field has been disabled"); + if (mappingLookup.isDataStreamTimestampFieldEnabled() == false) { + throw new IllegalStateException("[" + DataStreamTimestampFieldMapper.NAME + "] meta field has been disabled"); } - // Sanity check (this validation logic should already have been executed when merging mappings): fieldMapper.validate(mappingLookup); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 51a924a3a648b..68906e8661967 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -35,13 +35,11 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.core.PathUtils; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -49,8 +47,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.PathUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -71,6 +72,7 @@ import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -99,6 +101,9 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; +import static org.elasticsearch.index.IndexModule.INDEX_RECOVERY_TYPE_SETTING; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.isSearchableSnapshotStore; /** * Service responsible for submitting create index requests @@ -499,7 +504,8 @@ private ClusterState applyCreateIndexRequestWithV2Template(final ClusterState cu logger.debug("applying create index request using composable template [{}]", templateName); ComposableIndexTemplate template = currentState.getMetadata().templatesV2().get(templateName); - if (request.dataStreamName() == null && template.getDataStreamTemplate() != null) { + final boolean isDataStream = template.getDataStreamTemplate() != null; + if (isDataStream && request.dataStreamName() == null) { throw new IllegalArgumentException("cannot create index with name [" + request.index() + "], because it matches with template [" + templateName + "] that creates data streams only, " + "use create data stream api instead"); @@ -514,14 +520,28 @@ private ClusterState applyCreateIndexRequestWithV2Template(final ClusterState cu int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(aggregatedIndexSettings, request, routingNumShards); - return applyCreateIndexWithTemporaryService(currentState, request, silent, null, tmpImd, mappings, - indexService -> resolveAndValidateAliases(request.index(), request.aliases(), - MetadataIndexTemplateService.resolveAliases(currentState.metadata(), templateName), currentState.metadata(), - // the context is only used for validation so it's fine to pass fake values for the - // shard id and the current timestamp - aliasValidator, xContentRegistry, indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()), - indexService.dateMathExpressionResolverAt(request.getNameResolvedAt())), - Collections.singletonList(templateName), metadataTransformer); + return applyCreateIndexWithTemporaryService( + currentState, + request, + silent, + null, + tmpImd, + mappings, + indexService -> resolveAndValidateAliases( + request.index(), + // data stream aliases are created separately in MetadataCreateDataStreamService::createDataStream + isDataStream ? Set.of() : request.aliases(), + isDataStream ? List.of() : MetadataIndexTemplateService.resolveAliases(currentState.metadata(), templateName), + currentState.metadata(), + aliasValidator, + xContentRegistry, + // the context is used ony for validation so it's fine to pass fake values for the shard id and the current timestamp + indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()), + indexService.dateMathExpressionResolverAt(request.getNameResolvedAt()) + ), + Collections.singletonList(templateName), + metadataTransformer + ); } private ClusterState applyCreateIndexRequestForSystemDataStream(final ClusterState currentState, @@ -557,7 +577,7 @@ private ClusterState applyCreateIndexRequestForSystemDataStream(final ClusterSta return applyCreateIndexWithTemporaryService(currentState, request, silent, null, tmpImd, mappings, indexService -> resolveAndValidateAliases(request.index(), request.aliases(), - MetadataIndexTemplateService.resolveAliases(template, componentTemplates, null), currentState.metadata(), + MetadataIndexTemplateService.resolveAliases(template, componentTemplates), currentState.metadata(), // the context is only used for validation so it's fine to pass fake values for the // shard id and the current timestamp aliasValidator, xContentRegistry, indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()), @@ -775,6 +795,7 @@ static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClu shardLimitValidator.validateShardLimit(indexSettings, currentState); validateSoftDeleteSettings(indexSettings); validateTranslogRetentionSettings(indexSettings); + validateStoreTypeSetting(indexSettings); return indexSettings; } @@ -798,7 +819,9 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index // in this case we either have no index to recover from or // we have a source index with 1 shard and without an explicit split factor // or one that is valid in that case we can split into whatever and auto-generate a new factor. - if (IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettings)) { + // (Don't use IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(indexSettings) here, otherwise + // we get the default value when `null` has been provided as value) + if (indexSettings.get(IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey()) != null) { routingNumShards = IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(indexSettings); } else { routingNumShards = calculateNumRoutingShards(numTargetShards, indexVersionCreated); @@ -1075,6 +1098,9 @@ private static List validateIndexCustomPath(Settings settings, @Nullable */ static List validateShrinkIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + throw new IllegalArgumentException("can't shrink searchable snapshot index [" + sourceIndex + ']'); + } assert INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings); IndexMetadata.selectShrinkShards(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); @@ -1106,11 +1132,23 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, static void validateSplitIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + throw new IllegalArgumentException("can't split searchable snapshot index [" + sourceIndex + ']'); + } IndexMetadata.selectSplitShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } static void validateCloneIndex(ClusterState state, String sourceIndex, String targetIndexName, Settings targetIndexSettings) { IndexMetadata sourceMetadata = validateResize(state, sourceIndex, targetIndexName, targetIndexSettings); + if (isSearchableSnapshotStore(sourceMetadata.getSettings())) { + for (Setting nonCloneableSetting : Arrays.asList(INDEX_STORE_TYPE_SETTING, INDEX_RECOVERY_TYPE_SETTING)) { + if (nonCloneableSetting.exists(targetIndexSettings) == false) { + throw new IllegalArgumentException("can't clone searchable snapshot index [" + sourceIndex + "]; setting [" + + nonCloneableSetting.getKey() + + "] should be overridden"); + } + } + } IndexMetadata.selectCloneShard(0, sourceMetadata, INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } @@ -1130,7 +1168,6 @@ static IndexMetadata validateResize(ClusterState state, String sourceIndex, Stri throw new IllegalArgumentException(String.format(Locale.ROOT, "cannot resize the write index [%s] for data stream [%s]", sourceIndex, source.getParentDataStream().getName())); } - // ensure index is read-only if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\""); @@ -1242,4 +1279,14 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { + "and [index.translog.retention.size] are deprecated and effectively ignored. They will be removed in a future version."); } } + + public static void validateStoreTypeSetting(Settings indexSettings) { + final String storeType = IndexModule.INDEX_STORE_TYPE_SETTING.get(indexSettings); + if (IndexModule.Type.SIMPLEFS.match(storeType)) { + deprecationLogger.deprecate(DeprecationCategory.SETTINGS, "store_type_setting", + "[simplefs] is deprecated and will be removed in 8.0. Use [niofs] or other file systems instead. " + + "Elasticsearch 7.15 or later uses [niofs] for the [simplefs] store type as it offers superior " + + "or equivalent performance to [simplefs]."); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java index 8ff7a4a9a7aef..7c95387e6b4c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java @@ -130,6 +130,15 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable { aliasValidator.validateAlias(alias, action.getIndex(), indexRouting, lookup); + if (Strings.hasLength(filter)) { + for (Index index : dataStream.getIndices()) { + IndexMetadata imd = metadata.get(index.getName()); + if (imd == null) { + throw new IndexNotFoundException(action.getIndex()); + } + validateFilter(indicesToClose, indices, action, imd, alias, filter); + } + } }; if (action.apply(newAliasValidator, metadata, null)) { changed = true; @@ -145,25 +154,7 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable { aliasValidator.validateAlias(alias, action.getIndex(), indexRouting, lookup); if (Strings.hasLength(filter)) { - IndexService indexService = indices.get(index.getIndex().getName()); - if (indexService == null) { - indexService = indicesService.indexService(index.getIndex()); - if (indexService == null) { - // temporarily create the index and add mappings so we can parse the filter - try { - indexService = indicesService.createIndex(index, emptyList(), false); - indicesToClose.add(index.getIndex()); - } catch (IOException e) { - throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); - } - indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY); - } - indices.put(action.getIndex(), indexService); - } - // the context is only used for validation so it's fine to pass fake values for the shard id, - // but the current timestamp should be set to real value as we may use `now` in a filtered alias - aliasValidator.validateAliasFilter(alias, filter, indexService.newSearchExecutionContext(0, 0, - null, () -> System.currentTimeMillis(), null, emptyMap()), xContentRegistry); + validateFilter(indicesToClose, indices, action, index, alias, filter); } }; if (action.apply(newAliasValidator, metadata, index)) { @@ -198,6 +189,33 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable indicesToClose, + Map indices, + AliasAction action, + IndexMetadata index, + String alias, + String filter) { + IndexService indexService = indices.get(index.getIndex().getName()); + if (indexService == null) { + indexService = indicesService.indexService(index.getIndex()); + if (indexService == null) { + // temporarily create the index and add mappings so we can parse the filter + try { + indexService = indicesService.createIndex(index, emptyList(), false); + indicesToClose.add(index.getIndex()); + } catch (IOException e) { + throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); + } + indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY); + } + indices.put(action.getIndex(), indexService); + } + // the context is only used for validation so it's fine to pass fake values for the shard id, + // but the current timestamp should be set to real value as we may use `now` in a filtered alias + aliasValidator.validateAliasFilter(alias, filter, indexService.newSearchExecutionContext(0, 0, + null, System::currentTimeMillis, null, emptyMap()), xContentRegistry); + } + private void validateAliasTargetIsNotDSBackingIndex(ClusterState currentState, AliasAction action) { IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(action.getIndex()); assert indexAbstraction != null : "invalid cluster metadata. index [" + action.getIndex() + "] was not found"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index ea7cae3a682bf..0c8f0520ec961 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -643,7 +643,9 @@ public void innerOnFailure(final Exception e) { private void processIfFinished() { if (countDown.countDown()) { - onResponse.accept(new AddBlockResult(index, results.toArray(new AddBlockShardResult[results.length()]))); + AddBlockResult result = new AddBlockResult(index, results.toArray(new AddBlockShardResult[results.length()])); + logger.debug("result of applying block to index {}: {}", index, result); + onResponse.accept(result); } } }); @@ -875,7 +877,6 @@ static Tuple> finalizeBlock(final Clust logger.debug("verification of shards before blocking {} failed [{}]", index, result); continue; } - final IndexMetadata indexMetadata = metadata.getSafe(index); final ClusterBlock tempBlock = blockedIndices.get(index); assert tempBlock != null; assert tempBlock.uuid() != null; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index bd34d5adf9d24..2a80812b295b5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -1112,25 +1113,31 @@ public static List> resolveAliases(final List> resolveAliases(final Metadata metadata, final String templateName) { final ComposableIndexTemplate template = metadata.templatesV2().get(templateName); assert template != null : "attempted to resolve aliases for a template [" + templateName + "] that did not exist in the cluster state"; + return resolveAliases(metadata, template); + } + + /** + * Resolve the given v2 template into an ordered list of aliases + */ + static List> resolveAliases(final Metadata metadata, final ComposableIndexTemplate template) { if (template == null) { return List.of(); } final Map componentTemplates = metadata.componentTemplates(); - return resolveAliases(template, componentTemplates, templateName); + return resolveAliases(template, componentTemplates); } /** * Resolve the given v2 template and component templates into an ordered list of aliases */ static List> resolveAliases(final ComposableIndexTemplate template, - final Map componentTemplates, - @Nullable String templateName) { + final Map componentTemplates) { Objects.requireNonNull(template, "attempted to resolve aliases for a null template"); Objects.requireNonNull(componentTemplates, "attempted to resolve aliases with null component templates"); List> aliases = template.composedOf().stream() @@ -1146,12 +1153,6 @@ static List> resolveAliases(final ComposableIndexTemp .map(Template::aliases) .ifPresent(aliases::add); - // A template that creates data streams can't also create aliases. - // (otherwise we end up with aliases pointing to backing indices of data streams) - if (aliases.size() > 0 && template.getDataStreamTemplate() != null) { - throw new IllegalArgumentException("template [" + templateName + "] has alias and data stream definitions"); - } - // Aliases are applied in order, but subsequent alias configuration from the same name is // ignored, so in order for the order to be correct, alias configuration should be in order // of precedence (with the index template first) @@ -1218,7 +1219,7 @@ private static void validateCompositeTemplate(final ClusterState state, if (template.getDataStreamTemplate() != null) { // If there is no _data_stream meta field mapper and a data stream should be created then // fail as if the data_stream field can't be parsed: - if (tempIndexService.mapperService().isMetadataField("_data_stream_timestamp") == false) { + if (tempIndexService.mapperService().isMetadataField(DataStreamTimestampFieldMapper.NAME) == false) { // Fail like a parsing expection, since we will be moving data_stream template out of server module and // then we would fail with the same error message, like we do here. throw new XContentParseException("[index_template] unknown field [data_stream]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index 72a6882a94caa..0e663b9eadd4b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; @@ -161,7 +162,8 @@ private static void prepareBackingIndex( MapperService mapperService = mapperSupplier.apply(im); mapperService.merge(im, MapperService.MergeReason.MAPPING_RECOVERY); - mapperService.merge("_doc", Map.of("_data_stream_timestamp", Map.of("enabled", true)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", Map.of(DataStreamTimestampFieldMapper.NAME, Map.of("enabled", true)), + MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapper(); b.put(IndexMetadata.builder(im) @@ -193,6 +195,7 @@ static void validateBackingIndices(ClusterState currentState, String dataStreamN } } + @SuppressWarnings("rawtypes") public static final class MigrateToDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest { private final String aliasName; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 2eab1d439a070..431d7462fb3c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -82,7 +82,7 @@ public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request false, // don't validate values here we check it below never allow to change the number of shards true); // validate internal or private index settings for (String key : normalizedSettings.keySet()) { - Setting setting = indexScopedSettings.get(key); + Setting setting = indexScopedSettings.get(key); boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); assert setting != null // we already validated the normalized settings || (isWildcard && normalizedSettings.hasValue(key) == false) @@ -208,7 +208,9 @@ public ClusterState execute(ClusterState currentState) { if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) || IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) { for (String index : actualIndices) { - MetadataCreateIndexService.validateTranslogRetentionSettings(metadataBuilder.get(index).getSettings()); + final Settings settings = metadataBuilder.get(index).getSettings(); + MetadataCreateIndexService.validateTranslogRetentionSettings(settings); + MetadataCreateIndexService.validateStoreTypeSetting(settings); } } boolean changed = false; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index a52d27f56a325..59d2743a189be 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -14,10 +14,10 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index bfe73c4d409d9..200878577b411 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -10,13 +10,16 @@ import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diffable; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Locale; @@ -35,6 +38,7 @@ public class SingleNodeShutdownMetadata extends AbstractDiffable PARSER = new ConstructingObjectParser<>( "node_shutdown_info", @@ -42,7 +46,8 @@ public class SingleNodeShutdownMetadata extends AbstractDiffable TimeValue.parseTimeValue(p.textOrNull(), ALLOCATION_DELAY_FIELD.getPreferredName()), ALLOCATION_DELAY_FIELD, + ObjectParser.ValueType.STRING_OR_NULL + ); } public static SingleNodeShutdownMetadata parse(XContentParser parser) { return PARSER.apply(parser, null); } + public static final TimeValue DEFAULT_RESTART_SHARD_ALLOCATION_DELAY = TimeValue.timeValueMinutes(5); + private final String nodeId; private final Type type; private final String reason; private final long startedAtMillis; + @Nullable private final TimeValue allocationDelay; /** * @param nodeId The node ID that this shutdown metadata refers to. @@ -72,12 +85,17 @@ private SingleNodeShutdownMetadata( String nodeId, Type type, String reason, - long startedAtMillis + long startedAtMillis, + @Nullable TimeValue allocationDelay ) { this.nodeId = Objects.requireNonNull(nodeId, "node ID must not be null"); this.type = Objects.requireNonNull(type, "shutdown type must not be null"); this.reason = Objects.requireNonNull(reason, "shutdown reason must not be null"); this.startedAtMillis = startedAtMillis; + if (allocationDelay != null && Type.RESTART.equals(type) == false) { + throw new IllegalArgumentException("shard allocation delay is only valid for RESTART-type shutdowns"); + } + this.allocationDelay = allocationDelay; } public SingleNodeShutdownMetadata(StreamInput in) throws IOException { @@ -85,6 +103,7 @@ public SingleNodeShutdownMetadata(StreamInput in) throws IOException { this.type = in.readEnum(Type.class); this.reason = in.readString(); this.startedAtMillis = in.readVLong(); + this.allocationDelay = in.readOptionalTimeValue(); } /** @@ -115,12 +134,27 @@ public long getStartedAtMillis() { return startedAtMillis; } + /** + * @return The amount of time shard reallocation should be delayed for shards on this node, so that they will not be automatically + * reassigned while the node is restarting. Will be {@code null} for non-restart shutdowns. + */ + @Nullable + public TimeValue getAllocationDelay() { + if (allocationDelay != null) { + return allocationDelay; + } else if (Type.RESTART.equals(type)) { + return DEFAULT_RESTART_SHARD_ALLOCATION_DELAY; + } + return null; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeEnum(type); out.writeString(reason); out.writeVLong(startedAtMillis); + out.writeOptionalTimeValue(allocationDelay); } @Override @@ -131,6 +165,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TYPE_FIELD.getPreferredName(), type); builder.field(REASON_FIELD.getPreferredName(), reason); builder.timeField(STARTED_AT_MILLIS_FIELD.getPreferredName(), STARTED_AT_READABLE_FIELD, startedAtMillis); + if (allocationDelay != null) { + builder.field(ALLOCATION_DELAY_FIELD.getPreferredName(), allocationDelay.getStringRep()); + } } builder.endObject(); @@ -145,7 +182,8 @@ public boolean equals(Object o) { return getStartedAtMillis() == that.getStartedAtMillis() && getNodeId().equals(that.getNodeId()) && getType() == that.getType() - && getReason().equals(that.getReason()); + && getReason().equals(that.getReason()) + && Objects.equals(allocationDelay, that.allocationDelay); } @Override @@ -154,7 +192,8 @@ public int hashCode() { getNodeId(), getType(), getReason(), - getStartedAtMillis() + getStartedAtMillis(), + allocationDelay ); } @@ -178,6 +217,7 @@ public static class Builder { private Type type; private String reason; private long startedAtMillis = -1; + private TimeValue allocationDelay; private Builder() {} @@ -217,15 +257,25 @@ public Builder setStartedAtMillis(long startedAtMillis) { return this; } + /** + * @param allocationDelay The amount of time shard reallocation should be delayed while this node is offline. + * @return This builder. + */ + public Builder setAllocationDelay(TimeValue allocationDelay) { + this.allocationDelay = allocationDelay; + return this; + } + public SingleNodeShutdownMetadata build() { if (startedAtMillis == -1) { throw new IllegalArgumentException("start timestamp must be set"); } + return new SingleNodeShutdownMetadata( nodeId, type, reason, - startedAtMillis + startedAtMillis, allocationDelay ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java index c8fd758292de7..d017aee919089 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java @@ -18,11 +18,14 @@ import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import java.util.stream.Collectors; public class DiscoveryNodeFilters { + static final Set NON_ATTRIBUTE_NAMES = Set.of("_ip", "_host_ip", "_publish_ip", "host", "_id", "_name", "name"); + public enum OpType { AND, OR @@ -226,6 +229,14 @@ public boolean match(DiscoveryNode node) { } } + /** + * + * @return true if this filter only contains attribute values, i.e., no node specific info. + */ + public boolean isOnlyAttributeValueFilter() { + return filters.keySet().stream().anyMatch(NON_ATTRIBUTE_NAMES::contains) == false; + } + /** * Generates a human-readable string for the DiscoverNodeFilters. * Example: {@code _id:"id1 OR blah",name:"blah OR name2"} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 78bade7493875..b207776d49260 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -9,18 +9,20 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Assertions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Randomness; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -65,6 +67,8 @@ public class RoutingNodes implements Iterable { private final Map> assignedShards = new HashMap<>(); + private final Map nodeShutdowns; + private final boolean readOnly; private int inactivePrimaryCount = 0; @@ -83,6 +87,7 @@ public RoutingNodes(ClusterState clusterState) { public RoutingNodes(ClusterState clusterState, boolean readOnly) { this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); + nodeShutdowns = clusterState.metadata().nodeShutdowns(); Map> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes @@ -533,9 +538,17 @@ assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId // re-resolve replica as earlier iteration could have changed source/target of replica relocation ShardRouting replicaShard = getByAllocationId(routing.shardId(), routing.allocationId().getId()); assert replicaShard != null : "failed to re-resolve " + routing + " when failing replicas"; - UnassignedInfo primaryFailedUnassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, - "primary failed while replica initializing", null, 0, unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), false, AllocationStatus.NO_ATTEMPT, Collections.emptySet()); + UnassignedInfo primaryFailedUnassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.PRIMARY_FAILED, + "primary failed while replica initializing", + null, + 0, + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + false, + AllocationStatus.NO_ATTEMPT, + Collections.emptySet(), + routing.currentNodeId()); failShard(logger, replicaShard, primaryFailedUnassignedInfo, indexMetadata, routingChangesObserver); } } @@ -858,10 +871,17 @@ public void ignoreShard(ShardRouting shard, AllocationStatus allocationStatus, R UnassignedInfo currInfo = shard.unassignedInfo(); assert currInfo != null; if (allocationStatus.equals(currInfo.getLastAllocationStatus()) == false) { - UnassignedInfo newInfo = new UnassignedInfo(currInfo.getReason(), currInfo.getMessage(), currInfo.getFailure(), - currInfo.getNumFailedAllocations(), currInfo.getUnassignedTimeInNanos(), - currInfo.getUnassignedTimeInMillis(), currInfo.isDelayed(), - allocationStatus, currInfo.getFailedNodeIds()); + UnassignedInfo newInfo = new UnassignedInfo( + currInfo.getReason(), + currInfo.getMessage(), + currInfo.getFailure(), + currInfo.getNumFailedAllocations(), + currInfo.getUnassignedTimeInNanos(), + currInfo.getUnassignedTimeInMillis(), + currInfo.isDelayed(), + allocationStatus, + currInfo.getFailedNodeIds(), + currInfo.getLastAllocatedNodeId()); ShardRouting updatedShard = shard.updateUnassigned(newInfo, shard.recoverySource()); changes.unassignedInfoUpdated(shard, newInfo); shard = updatedShard; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index b8426e4968a9c..36acc0ee4e4f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -93,8 +93,8 @@ public boolean hasIndex(String index) { } public boolean hasIndex(Index index) { - IndexRoutingTable indexRouting = index(index.getName()); - return indexRouting != null && indexRouting.getIndex().equals(index); + IndexRoutingTable indexRouting = index(index); + return indexRouting != null; } public IndexRoutingTable index(String index) { @@ -102,7 +102,8 @@ public IndexRoutingTable index(String index) { } public IndexRoutingTable index(Index index) { - return indicesRouting.get(index.getName()); + IndexRoutingTable indexRouting = index(index.getName()); + return indexRouting != null && indexRouting.getIndex().equals(index) ? indexRouting : null; } public ImmutableOpenMap indicesRouting() { @@ -134,8 +135,8 @@ public IndexShardRoutingTable shardRoutingTable(String index, int shardId) { * @throws ShardNotFoundException if provided shard id is unknown */ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { - IndexRoutingTable indexRouting = index(shardId.getIndexName()); - if (indexRouting == null || indexRouting.getIndex().equals(shardId.getIndex()) == false) { + IndexRoutingTable indexRouting = index(shardId.getIndex()); + if (indexRouting == null) { throw new IndexNotFoundException(shardId.getIndex()); } IndexShardRoutingTable shard = indexRouting.shard(shardId.id()); @@ -147,7 +148,7 @@ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { @Nullable public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { - final IndexRoutingTable indexRoutingTable = index(shardId.getIndexName()); + final IndexRoutingTable indexRoutingTable = index(shardId.getIndex()); if (indexRoutingTable == null) { return null; } @@ -256,6 +257,10 @@ public ShardsIterator allShards(String[] indices) { return allShardsSatisfyingPredicate(indices, shardRouting -> true, false); } + public ShardsIterator allActiveShards(String[] indices) { + return allShardsSatisfyingPredicate(indices, ShardRouting::active, false); + } + public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { return allShardsSatisfyingPredicate(indices, shardRouting -> true, true); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index f8e201e1f2361..f8ed1f8444350 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -9,11 +9,12 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,9 +22,10 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.time.Instant; @@ -31,7 +33,9 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; /** @@ -39,6 +43,12 @@ */ public final class UnassignedInfo implements ToXContentFragment, Writeable { + /** + * The version that the {@code lastAllocatedNode} field was added in. Used to adapt streaming of this class as appropriate for the + * version of the node sending/receiving it. Should be removed once wire compatibility with this version is no longer necessary. + */ + private static final Version VERSION_LAST_ALLOCATED_NODE_ADDED = Version.V_8_0_0; + public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = @@ -114,7 +124,12 @@ public enum Reason { /** * Unassigned as a result of closing an index. */ - INDEX_CLOSED + INDEX_CLOSED, + /** + * Similar to NODE_LEFT, but at the time the node left, it had been registered for a restart via the Node Shutdown API. Note that + * there is no verification that it was ready to be restarted, so this may be an intentional restart or a node crash. + */ + NODE_RESTARTING } /** @@ -208,6 +223,7 @@ public String value() { private final int failedAllocations; private final Set failedNodeIds; private final AllocationStatus lastAllocationStatus; // result of the last allocation attempt for this shard + private final String lastAllocatedNodeId; /** * creates an UnassignedInfo object based on **current** time @@ -217,22 +233,32 @@ public String value() { **/ public UnassignedInfo(Reason reason, String message) { this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false, - AllocationStatus.NO_ATTEMPT, Collections.emptySet()); + AllocationStatus.NO_ATTEMPT, Collections.emptySet(), null); } /** - * @param reason the cause for making this shard unassigned. See {@link Reason} for more information. - * @param message more information about cause. - * @param failure the shard level failure that caused this shard to be unassigned, if exists. - * @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation - * @param unassignedTimeMillis the time of unassignment used to display to in our reporting. - * @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING. - * @param lastAllocationStatus the result of the last allocation attempt for this shard - * @param failedNodeIds a set of nodeIds that failed to complete allocations for this shard + * @param reason the cause for making this shard unassigned. See {@link Reason} for more information. + * @param message more information about cause. + * @param failure the shard level failure that caused this shard to be unassigned, if exists. + * @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation + * @param unassignedTimeMillis the time of unassignment used to display to in our reporting. + * @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING. + * @param lastAllocationStatus the result of the last allocation attempt for this shard + * @param failedNodeIds a set of nodeIds that failed to complete allocations for this shard + * @param lastAllocatedNodeId the ID of the node this shard was last allocated to */ - public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Exception failure, int failedAllocations, - long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed, AllocationStatus lastAllocationStatus, - Set failedNodeIds) { + public UnassignedInfo( + Reason reason, + @Nullable String message, + @Nullable Exception failure, + int failedAllocations, + long unassignedTimeNanos, + long unassignedTimeMillis, + boolean delayed, + AllocationStatus lastAllocationStatus, + Set failedNodeIds, + @Nullable String lastAllocatedNodeId + ) { this.reason = Objects.requireNonNull(reason); this.unassignedTimeMillis = unassignedTimeMillis; this.unassignedTimeNanos = unassignedTimeNanos; @@ -241,14 +267,24 @@ public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Excepti this.failure = failure; this.failedAllocations = failedAllocations; this.lastAllocationStatus = Objects.requireNonNull(lastAllocationStatus); - this.failedNodeIds = Collections.unmodifiableSet(failedNodeIds); - assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) : - "failedAllocations: " + failedAllocations + " for reason " + reason; + this.failedNodeIds = Set.copyOf(failedNodeIds); + this.lastAllocatedNodeId = lastAllocatedNodeId; + assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) : "failedAllocations: " + + failedAllocations + + " for reason " + + reason; assert (message == null && failure != null) == false : "provide a message if a failure exception is provided"; - assert (delayed && reason != Reason.NODE_LEFT) == false : "shard can only be delayed if it is unassigned due to a node leaving"; + assert (delayed + && reason != Reason.NODE_LEFT + && reason != Reason.NODE_RESTARTING) == false : "shard can only be delayed if it is unassigned due to a node leaving"; + // The below check should be expanded to require `lastAllocatedNodeId` for `NODE_LEFT` as well, once we no longer have to consider + // BWC with versions prior to `VERSION_LAST_ALLOCATED_NODE_ADDED`. + assert (reason == Reason.NODE_RESTARTING && lastAllocatedNodeId == null) == false + : "last allocated node ID must be set if the shard is unassigned due to a node restarting"; } public UnassignedInfo(StreamInput in) throws IOException { + // Because Reason.NODE_RESTARTING is new and can't be sent by older versions, there's no need to vary the deserialization behavior this.reason = Reason.values()[(int) in.readByte()]; this.unassignedTimeMillis = in.readLong(); // As System.nanoTime() cannot be compared across different JVMs, reset it to now. @@ -260,10 +296,19 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); + if (in.getVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + this.lastAllocatedNodeId = in.readOptionalString(); + } else { + this.lastAllocatedNodeId = null; + } } public void writeTo(StreamOutput out) throws IOException { - out.writeByte((byte) reason.ordinal()); + if (reason.equals(Reason.NODE_RESTARTING) && out.getVersion().before(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + out.writeByte((byte) Reason.NODE_LEFT.ordinal()); + } else { + out.writeByte((byte) reason.ordinal()); + } out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); @@ -272,6 +317,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); out.writeCollection(failedNodeIds, StreamOutput::writeString); + if (out.getVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + out.writeOptionalString(lastAllocatedNodeId); + } } /** @@ -339,6 +387,14 @@ public String getDetails() { return message + (failure == null ? "" : ", failure " + ExceptionsHelper.stackTrace(failure)); } + /** + * Gets the ID of the node this shard was last allocated to, or null if unavailable. + */ + @Nullable + public String getLastAllocatedNodeId() { + return lastAllocatedNodeId; + } + /** * Get the status for the last allocation attempt for this shard. */ @@ -366,8 +422,21 @@ public Set getFailedNodeIds() { * * @return calculated delay in nanoseconds */ - public long getRemainingDelay(final long nanoTimeNow, final Settings indexSettings) { - long delayTimeoutNanos = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos(); + public long getRemainingDelay( + final long nanoTimeNow, + final Settings indexSettings, + final Map nodesShutdownMap + ) { + final long indexLevelDelay = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos(); + long delayTimeoutNanos = Optional.ofNullable(lastAllocatedNodeId) + // If the node wasn't restarting when this became unassigned, use default delay + .filter(nodeId -> reason.equals(Reason.NODE_RESTARTING)) + .map(nodesShutdownMap::get) + .filter(shutdownMetadata -> SingleNodeShutdownMetadata.Type.RESTART.equals(shutdownMetadata.getType())) + .map(SingleNodeShutdownMetadata::getAllocationDelay) + .map(TimeValue::nanos) + .map(knownRestartDelay -> Math.max(indexLevelDelay, knownRestartDelay)) + .orElse(indexLevelDelay); assert nanoTimeNow >= unassignedTimeNanos; return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos)); } @@ -399,7 +468,11 @@ public static long findNextDelayedAllocation(long currentNanoTime, ClusterState if (unassignedInfo.isDelayed()) { Settings indexSettings = metadata.index(shard.index()).getSettings(); // calculate next time to schedule - final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(currentNanoTime, indexSettings); + final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay( + currentNanoTime, + indexSettings, + metadata.nodeShutdowns() + ); if (newComputedLeftDelayNanos < nextDelayNanos) { nextDelayNanos = newComputedLeftDelayNanos; } @@ -486,6 +559,11 @@ public boolean equals(Object o) { if (Objects.equals(failure, that.failure) == false) { return false; } + + if (Objects.equals(lastAllocatedNodeId, that.lastAllocatedNodeId) == false) { + return false; + } + return failedNodeIds.equals(that.failedNodeIds); } @@ -499,6 +577,7 @@ public int hashCode() { result = 31 * result + (failure != null ? failure.hashCode() : 0); result = 31 * result + lastAllocationStatus.hashCode(); result = 31 * result + failedNodeIds.hashCode(); + result = 31 * result + (lastAllocatedNodeId != null ? lastAllocatedNodeId.hashCode() : 0); return result; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 5e7d008f1e6f2..948ec14bea434 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -19,6 +19,8 @@ import org.elasticsearch.cluster.metadata.AutoExpandReplicas; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.RoutingNode; @@ -45,6 +47,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -209,7 +212,7 @@ public ClusterState applyFailedShards(final ClusterState clusterState, final Lis String message = "failed shard on node [" + shardToFail.currentNodeId() + "]: " + failedShardEntry.getMessage(); UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, message, failedShardEntry.getFailure(), failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, - AllocationStatus.NO_ATTEMPT, failedNodeIds); + AllocationStatus.NO_ATTEMPT, failedNodeIds, shardToFail.currentNodeId()); if (failedShardEntry.markAsStale()) { allocation.removeAllocationId(failedShard); } @@ -300,13 +303,27 @@ private void removeDelayMarkers(RoutingAllocation allocation) { ShardRouting shardRouting = unassignedIterator.next(); UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); if (unassignedInfo.isDelayed()) { - final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(), - metadata.getIndexSafe(shardRouting.index()).getSettings()); + final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay( + allocation.getCurrentNanoTime(), + metadata.getIndexSafe(shardRouting.index()).getSettings(), + metadata.nodeShutdowns() + ); if (newComputedLeftDelayNanos == 0) { - unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), - unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus(), - unassignedInfo.getFailedNodeIds()), shardRouting.recoverySource(), allocation.changes()); + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + unassignedInfo.getNumFailedAllocations(), + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + false, + unassignedInfo.getLastAllocationStatus(), + unassignedInfo.getFailedNodeIds(), + unassignedInfo.getLastAllocatedNodeId()), + shardRouting.recoverySource(), + allocation.changes() + ); } } } @@ -320,11 +337,21 @@ private void resetFailedAllocationCounter(RoutingAllocation allocation) { while (unassignedIterator.hasNext()) { ShardRouting shardRouting = unassignedIterator.next(); UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getNumFailedAllocations() > 0 ? - UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), unassignedInfo.getMessage(), - unassignedInfo.getFailure(), 0, unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), - unassignedInfo.getLastAllocationStatus(), Collections.emptySet()), shardRouting.recoverySource(), allocation.changes()); + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getNumFailedAllocations() > 0 ? UnassignedInfo.Reason.MANUAL_ALLOCATION : unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + 0, + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + unassignedInfo.isDelayed(), + unassignedInfo.getLastAllocationStatus(), + Collections.emptySet(), + unassignedInfo.getLastAllocatedNodeId()), + shardRouting.recoverySource(), + allocation.changes() + ); } } @@ -460,19 +487,40 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { } private void disassociateDeadNodes(RoutingAllocation allocation) { + Map nodesShutdownMetadata = allocation.metadata().nodeShutdowns(); + for (Iterator it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) { RoutingNode node = it.next(); if (allocation.nodes().getDataNodes().containsKey(node.nodeId())) { // its a live node, continue continue; } + final UnassignedInfo.Reason + unassignedReason = + nodesShutdownMetadata.containsKey(node.nodeId()) ? + UnassignedInfo.Reason.NODE_RESTARTING : + UnassignedInfo.Reason.NODE_LEFT; // now, go over all the shards routing on the node, and fail them for (ShardRouting shardRouting : node.copyShards()) { final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetadata.getSettings()).nanos() > 0; - UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left [" + node.nodeId() + "]", - null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT, - Collections.emptySet()); + boolean delayedDueToKnownRestart = Optional.ofNullable(nodesShutdownMetadata.get(node.nodeId())) + .map(shutdown -> Type.RESTART.equals(shutdown.getType()) && shutdown.getAllocationDelay().nanos() > 0) + .orElse(false); + boolean delayed = delayedDueToKnownRestart + || INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetadata.getSettings()).nanos() > 0; + + UnassignedInfo unassignedInfo = new UnassignedInfo( + unassignedReason, + "node_left [" + node.nodeId() + "]", + null, + 0, + allocation.getCurrentNanoTime(), + System.currentTimeMillis(), + delayed, + AllocationStatus.NO_ATTEMPT, + Collections.emptySet(), + shardRouting.currentNodeId() + ); allocation.routingNodes().failShard(logger, shardRouting, unassignedInfo, indexMetadata, allocation.changes()); } // its a dead node, remove it, note, its important to remove it *after* we apply failed shard diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 25a3409110118..11200891051db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -145,8 +145,9 @@ public SnapshotShardSizeInfo snapshotShardSizeInfo() { return shardSizeInfo; } + @SuppressWarnings("unchecked") public T custom(String key) { - return (T)customs.get(key); + return (T) customs.get(key); } public ImmutableOpenMap getCustoms() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 59a332d6ce5bf..0b223d0e69c3d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -138,11 +138,22 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { final ShardRouting shardRouting = unassignedIterator.next(); final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_ATTEMPT) { - unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), - unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), AllocationStatus.DECIDERS_NO, - unassignedInfo.getFailedNodeIds()), - shardRouting.recoverySource(), allocation.changes()); + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + unassignedInfo.getNumFailedAllocations(), + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + unassignedInfo.isDelayed(), + AllocationStatus.DECIDERS_NO, + unassignedInfo.getFailedNodeIds(), + unassignedInfo.getLastAllocatedNodeId() + ), + shardRouting.recoverySource(), + allocation.changes() + ); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index eeb76f336bd59..980d88d3c42a8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -129,7 +129,7 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) ", " + shardRouting.unassignedInfo().getMessage(); unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY, unassignedInfoMessage, shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false, - shardRouting.unassignedInfo().getLastAllocationStatus(), Collections.emptySet()); + shardRouting.unassignedInfo().getLastAllocationStatus(), Collections.emptySet(), null); } initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index abe3f6a417f5e..9bb4b98985002 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -55,6 +55,9 @@ private Decision canMove(ShardRouting shardRouting, RoutingAllocation allocation } for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { + if (snapshot.isClone()) { + continue; + } SnapshotsInProgress.ShardSnapshotStatus shardSnapshotStatus = snapshot.shards().get(shardRouting.shardId()); if (shardSnapshotStatus != null && shardSnapshotStatus.state().completed() == false && shardSnapshotStatus.nodeId() != null && shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 984db5b0c17b4..21743d0a9462a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -127,7 +127,8 @@ protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME, daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), - threadPool.scheduler()); + threadPool.scheduler(), + PrioritizedEsThreadPoolExecutor.StarvationWatcher.NOOP_STARVATION_WATCHER); } class UpdateTask extends SourcePrioritizedRunnable implements Function { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index ac261a6847131..742c207d221b6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -54,6 +54,7 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -62,9 +63,16 @@ public class MasterService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(MasterService.class); - public static final Setting MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = - Setting.positiveTimeSetting("cluster.service.slow_master_task_logging_threshold", TimeValue.timeValueSeconds(10), - Setting.Property.Dynamic, Setting.Property.NodeScope); + public static final Setting MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( + "cluster.service.slow_master_task_logging_threshold", + TimeValue.timeValueSeconds(10), + Setting.Property.Dynamic, + Setting.Property.NodeScope); + + public static final Setting MASTER_SERVICE_STARVATION_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( + "cluster.service.master_service_starvation_logging_threshold", + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope); static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; @@ -75,6 +83,7 @@ public class MasterService extends AbstractLifecycleComponent { private java.util.function.Supplier clusterStateSupplier; private volatile TimeValue slowTaskLoggingThreshold; + private final TimeValue starvationLoggingThreshold; protected final ThreadPool threadPool; @@ -87,6 +96,8 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP this.slowTaskLoggingThreshold = MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); + this.starvationLoggingThreshold = MASTER_SERVICE_STARVATION_LOGGING_THRESHOLD_SETTING.get(settings); + this.threadPool = threadPool; } @@ -112,10 +123,14 @@ protected synchronized void doStart() { protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { return EsExecutors.newSinglePrioritizing( - nodeName + "/" + MASTER_UPDATE_THREAD_NAME, - daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), - threadPool.getThreadContext(), - threadPool.scheduler()); + nodeName + "/" + MASTER_UPDATE_THREAD_NAME, + daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), + threadPool.getThreadContext(), + threadPool.scheduler(), + new MasterServiceStarvationWatcher( + starvationLoggingThreshold.getMillis(), + threadPool::relativeTimeInMillis, + () -> threadPoolExecutor)); } @SuppressWarnings("unchecked") @@ -789,4 +804,62 @@ public void submitStateUpdateTasks(final String source, } } + private static class MasterServiceStarvationWatcher implements PrioritizedEsThreadPoolExecutor.StarvationWatcher { + + private final long warnThreshold; + private final LongSupplier nowMillisSupplier; + private final Supplier threadPoolExecutorSupplier; + + // accesses of these mutable fields are synchronized (on this) + private long lastLogMillis; + private long nonemptySinceMillis; + private boolean isEmpty = true; + + MasterServiceStarvationWatcher( + long warnThreshold, + LongSupplier nowMillisSupplier, + Supplier threadPoolExecutorSupplier) { + this.nowMillisSupplier = nowMillisSupplier; + this.threadPoolExecutorSupplier = threadPoolExecutorSupplier; + this.warnThreshold = warnThreshold; + } + + @Override + public synchronized void onEmptyQueue() { + isEmpty = true; + } + + @Override + public void onNonemptyQueue() { + final long nowMillis = nowMillisSupplier.getAsLong(); + final long nonemptyDurationMillis; + synchronized (this) { + if (isEmpty) { + isEmpty = false; + nonemptySinceMillis = nowMillis; + lastLogMillis = nowMillis; + return; + } + + if (nowMillis - lastLogMillis < warnThreshold) { + return; + } + + lastLogMillis = nowMillis; + nonemptyDurationMillis = nowMillis - nonemptySinceMillis; + } + + final PrioritizedEsThreadPoolExecutor threadPoolExecutor = threadPoolExecutorSupplier.get(); + final TimeValue maxTaskWaitTime = threadPoolExecutor.getMaxTaskWaitTime(); + logger.warn("pending task queue has been nonempty for [{}/{}ms] which is longer than the warn threshold of [{}ms];" + + " there are currently [{}] pending tasks, the oldest of which has age [{}/{}ms]", + TimeValue.timeValueMillis(nonemptyDurationMillis), + nonemptyDurationMillis, + warnThreshold, + threadPoolExecutor.getNumberOfPendingTasks(), + maxTaskWaitTime, + maxTaskWaitTime.millis()); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 266a1536274b2..90da41f654528 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -9,9 +9,11 @@ package org.elasticsearch.common.blobstore; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Iterator; @@ -116,6 +118,18 @@ default void writeBlob(String blobName, BytesReference bytes, boolean failIfAlre writeBlob(blobName, bytes.streamInput(), bytes.length(), failIfAlreadyExists); } + /** + * Write a blob by providing a consumer that will write its contents to an output stream. This method allows serializing a blob's + * contents directly to the blob store without having to materialize the serialized version in full before writing. + * + * @param blobName the name of the blob to write + * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists + * @param atomic whether the write should be atomic in case the implementation supports it + * @param writer consumer for an output stream that will write the blob contents to the stream + */ + void writeBlob(String blobName, boolean failIfAlreadyExists, boolean atomic, + CheckedConsumer writer) throws IOException; + /** * Reads blob content from a {@link BytesReference} and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 2d1d75618ad45..e7f7fd0889136 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -18,9 +18,11 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.internal.io.IOUtils; import java.io.FileNotFoundException; +import java.io.FilterOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -243,6 +245,49 @@ public void writeBlob(String blobName, BytesReference bytes, boolean failIfAlrea IOUtils.fsync(path, true); } + @Override + public void writeBlob(String blobName, + boolean failIfAlreadyExists, + boolean atomic, + CheckedConsumer writer) throws IOException { + if (atomic) { + final String tempBlob = tempBlobName(blobName); + try { + writeToPath(tempBlob, true, writer); + moveBlobAtomic(tempBlob, blobName, failIfAlreadyExists); + } catch (IOException ex) { + try { + deleteBlobsIgnoringIfNotExists(Iterators.single(tempBlob)); + } catch (IOException e) { + ex.addSuppressed(e); + } + throw ex; + } + } else { + writeToPath(blobName, failIfAlreadyExists, writer); + } + IOUtils.fsync(path, true); + } + + private void writeToPath(String blobName, boolean failIfAlreadyExists, CheckedConsumer writer) + throws IOException { + final Path file = path.resolve(blobName); + try { + try (OutputStream out = new BlobOutputStream(file)) { + writer.accept(out); + } + } catch (FileAlreadyExistsException faee) { + if (failIfAlreadyExists) { + throw faee; + } + deleteBlobsIgnoringIfNotExists(Iterators.single(blobName)); + try (OutputStream out = new BlobOutputStream(file)) { + writer.accept(out); + } + } + IOUtils.fsync(file, false); + } + @Override public void writeBlobAtomic(final String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException { final String tempBlob = tempBlobName(blobName); @@ -306,4 +351,16 @@ public static String tempBlobName(final String blobName) { public static boolean isTempBlobName(final String blobName) { return blobName.startsWith(TEMP_FILE_PREFIX); } + + private static class BlobOutputStream extends FilterOutputStream { + + BlobOutputStream(Path file) throws IOException { + super(Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java index cb287531c8f7b..d2746fcb8d1f8 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/support/FilterBlobContainer.java @@ -13,9 +13,11 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -61,6 +63,12 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b delegate.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); } + @Override + public void writeBlob(String blobName, boolean failIfAlreadyExists, boolean atomic, + CheckedConsumer writer) throws IOException { + delegate.writeBlob(blobName, failIfAlreadyExists, atomic, writer); + } + @Override public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException { delegate.writeBlobAtomic(blobName, bytes, failIfAlreadyExists); diff --git a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java index dd10a801e89bc..ff290bbea9d93 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java @@ -43,12 +43,6 @@ public interface CircuitBreaker { * writing requests on the network layer. */ String IN_FLIGHT_REQUESTS = "inflight_requests"; - /** - * The accounting breaker tracks things held in memory that is independent - * of the request lifecycle. This includes memory used by Lucene for - * segments. - */ - String ACCOUNTING = "accounting"; enum Type { // A regular or ChildMemoryCircuitBreaker diff --git a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index a50e91cad1348..1585a0134afa7 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -190,13 +190,14 @@ Leaf remove(Object key, int hash) { } private static T[] removeArrayElement(T[] array, int index) { - final Object result = Array.newInstance(array.getClass().getComponentType(), array.length - 1); + @SuppressWarnings("unchecked") + final T[] result = (T[]) Array.newInstance(array.getClass().getComponentType(), array.length - 1); System.arraycopy(array, 0, result, 0, index); if (index < array.length - 1) { System.arraycopy(array, index + 1, result, index, array.length - index - 1); } - return (T[]) result; + return result; } public static T[] appendElement(final T[] array, final T element) { @@ -329,6 +330,7 @@ private Node newSubNode(int hashBits) { } } + @SuppressWarnings("unchecked") private InnerNode putExisting(K key, int hash, int hashBits, int slot, V value, MutableValueInt newValue) { final K[] keys2 = Arrays.copyOf(keys, keys.length); final Object[] subNodes2 = Arrays.copyOf(subNodes, subNodes.length); diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 115f47598c9ff..52854a8b53074 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -36,6 +36,8 @@ public T next() { }; } + @SafeVarargs + @SuppressWarnings("varargs") public static Iterator concat(Iterator... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); @@ -49,6 +51,8 @@ static class ConcatenatedIterator implements Iterator { private final Iterator[] iterators; private int index = 0; + @SafeVarargs + @SuppressWarnings("varargs") ConcatenatedIterator(Iterator... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoFormatterFactory.java b/server/src/main/java/org/elasticsearch/common/geo/GeoFormatterFactory.java index ab64b92859580..335c2bf6712fa 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoFormatterFactory.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoFormatterFactory.java @@ -9,34 +9,71 @@ package org.elasticsearch.common.geo; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.utils.WellKnownText; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; /** - * Output formatters supported by geo fields. + * Output formatters for geo fields support extensions such as vector tiles. + * + * This class is an extensible version of a static GeometryFormatterFactory */ -public class GeoFormatterFactory { +public class GeoFormatterFactory { - public static final String GEOJSON = "geojson"; - public static final String WKT = "wkt"; + /** + * Defines an extension point for geometry formatter + * @param + */ + public interface FormatterFactory { + /** + * Format name + */ + String getName(); + + /** + * Generates a formatter builder that parses the formatter configuration and generates a formatter + */ + Function, List>> getFormatterBuilder(); + } + + private final Map, List>>> factories; - private static final Map> FORMATTERS = new HashMap<>(); - static { - FORMATTERS.put(GEOJSON, GeoJson::toMap); - FORMATTERS.put(WKT, WellKnownText::toWKT); + /** + * Creates an extensible geo formatter. The extension points can be added as a list of factories + */ + public GeoFormatterFactory(List> factories) { + Map, List>>> factoriesBuilder = new HashMap<>(); + for (FormatterFactory factory : factories) { + if(factoriesBuilder.put(factory.getName(), factory.getFormatterBuilder()) != null) { + throw new IllegalArgumentException("More then one formatter factory with the name [" + factory.getName() + + "] was configured"); + } + + } + this.factories = Collections.unmodifiableMap(factoriesBuilder); } /** * Returns a formatter by name + * + * The format can contain an optional parameters in parentheses such as "mvt(1/2/3)". Parameterless formats are getting resolved + * using standard GeometryFormatterFactory and formats with parameters are getting resolved using factories specified during + * construction. */ - public static Function getFormatter(String name) { - Function format = FORMATTERS.get(name); - if (format == null) { - throw new IllegalArgumentException("Unrecognized geometry format [" + name + "]."); + public Function, List> getFormatter(String format, Function toGeometry) { + final int start = format.indexOf('('); + if (start == -1) { + return GeometryFormatterFactory.getFormatter(format, toGeometry); + } + final String formatName = format.substring(0, start); + Function, List>> factory = factories.get(formatName); + if (factory == null) { + throw new IllegalArgumentException("Invalid format: " + formatName); } - return format; + final String param = format.substring(start + 1, format.length() - 1); + return factory.apply(param); } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java new file mode 100644 index 0000000000000..21785bf549f0a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.utils.WellKnownText; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +/** + * Output formatters supported by geometry fields. + */ +public class GeometryFormatterFactory { + + public static final String GEOJSON = "geojson"; + public static final String WKT = "wkt"; + + /** + * Returns a formatter by name + */ + public static Function, List> getFormatter(String name, Function toGeometry) { + switch (name) { + case GEOJSON: + return geometries -> { + final List objects = new ArrayList<>(geometries.size()); + geometries.forEach((shape) -> objects.add(GeoJson.toMap(toGeometry.apply(shape)))); + return objects; + }; + case WKT: + return geometries -> { + final List objects = new ArrayList<>(geometries.size()); + geometries.forEach((shape) -> objects.add(WellKnownText.toWKT(toGeometry.apply(shape)))); + return objects; + }; + default: throw new IllegalArgumentException("Unrecognized geometry format [" + name + "]."); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/SimpleFeatureFactory.java b/server/src/main/java/org/elasticsearch/common/geo/SimpleFeatureFactory.java new file mode 100644 index 0000000000000..9f61d8b26e2c9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/SimpleFeatureFactory.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.geo; + +import org.apache.lucene.util.BitUtil; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Comparator; +import java.util.List; + +/** + * Transforms points and rectangles objects in WGS84 into mvt features. + */ +public class SimpleFeatureFactory { + + private final int extent; + private final double pointXScale, pointYScale, pointXTranslate, pointYTranslate; + + private static final int MOVETO = 1; + private static final int LINETO = 2; + private static final int CLOSEPATH = 7; + + private static final byte[] EMPTY = new byte[0]; + + public SimpleFeatureFactory(int z, int x, int y, int extent) { + this.extent = extent; + final Rectangle rectangle = SphericalMercatorUtils.recToSphericalMercator(GeoTileUtils.toBoundingBox(x, y, z)); + pointXScale = (double) extent / (rectangle.getMaxLon() - rectangle.getMinLon()); + pointYScale = (double) -extent / (rectangle.getMaxLat() - rectangle.getMinLat()); + pointXTranslate = -pointXScale * rectangle.getMinX(); + pointYTranslate = -pointYScale * rectangle.getMinY(); + } + + /** + * Returns a {@code byte[]} containing the mvt representation of the provided point + */ + public byte[] point(double lon, double lat) throws IOException { + final int posLon = lon(lon); + if (posLon > extent || posLon < 0) { + return EMPTY; + } + final int posLat = lat(lat); + if (posLat > extent || posLat < 0) { + return EMPTY; + } + final int[] commands = new int[3]; + commands[0] = encodeCommand(MOVETO, 1); + commands[1] = BitUtil.zigZagEncode(posLon); + commands[2] = BitUtil.zigZagEncode(posLat); + return writeCommands(commands, 1, 3); + } + + /** + * Returns a {@code byte[]} containing the mvt representation of the provided points + */ + public byte[] points(List multiPoint) { + multiPoint.sort(Comparator.comparingDouble(GeoPoint::getLon).thenComparingDouble(GeoPoint::getLat)); + final int[] commands = new int[2 * multiPoint.size() + 1]; + int pos = 1, prevLon = 0, prevLat = 0, numPoints = 0; + for (GeoPoint point : multiPoint) { + final int posLon = lon(point.getLon()); + if (posLon > extent || posLon < 0) { + continue; + } + final int posLat = lat(point.getLat()); + if (posLat > extent || posLat < 0) { + continue; + } + // filter out repeated points + if (numPoints == 0 || posLon != prevLon || posLat != prevLat) { + commands[pos++] = BitUtil.zigZagEncode(posLon - prevLon); + commands[pos++] = BitUtil.zigZagEncode(posLat - prevLat); + prevLon = posLon; + prevLat = posLat; + numPoints++; + } + } + if (numPoints == 0) { + return EMPTY; + } + commands[0] = encodeCommand(MOVETO, numPoints); + try { + return writeCommands(commands, 1, pos); + } catch (IOException ioe) { + throw new UncheckedIOException(ioe); + } + } + + /** + * Returns a {@code byte[]} containing the mvt representation of the provided rectangle + */ + public byte[] box(double minLon, double maxLon, double minLat, double maxLat) throws IOException { + int[] commands = new int[11]; + final int minX = Math.max(0, lon(minLon)); + if (minX > extent) { + return EMPTY; + } + final int minY = Math.min(extent, lat(minLat)); + if (minY > extent) { + return EMPTY; + } + final int maxX = Math.min(extent, lon(maxLon)); + if (maxX < 0 || minX == maxX) { + return EMPTY; + } + final int maxY = Math.max(0, lat(maxLat)); + if (maxY < 0 || minY == maxY) { + return EMPTY; + } + commands[0] = encodeCommand(MOVETO, 1); + commands[1] = BitUtil.zigZagEncode(minX); + commands[2] = BitUtil.zigZagEncode(minY); + commands[3] = encodeCommand(LINETO, 3); + // 1 + commands[4] = BitUtil.zigZagEncode(maxX - minX); + commands[5] = BitUtil.zigZagEncode(0); + // 2 + commands[6] = BitUtil.zigZagEncode(0); + commands[7] = BitUtil.zigZagEncode(maxY - minY); + // 3 + commands[8] = BitUtil.zigZagEncode(minX - maxX); + commands[9] = BitUtil.zigZagEncode(0); + // close + commands[10] = encodeCommand(CLOSEPATH, 1); + return writeCommands(commands, 3, 11); + } + + private int lat(double lat) { + return (int) Math.round(pointYScale * SphericalMercatorUtils.latToSphericalMercator(lat) + pointYTranslate) + extent; + } + + private int lon(double lon) { + return (int) Math.round(pointXScale * SphericalMercatorUtils.lonToSphericalMercator(lon) + pointXTranslate); + } + + private static int encodeCommand(int id, int length) { + return (id & 0x7) | (length << 3); + } + + private static byte[] writeCommands(final int[] commands, final int type, final int length) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + for (int i = 0; i < length; i++) { + output.writeVInt(commands[i]); + } + final int dataSize = output.size(); + output.reset(); + output.writeVInt(24); + output.writeVInt(type); + output.writeVInt(34); + output.writeVInt(dataSize); + for (int i = 0; i < length; i++) { + output.writeVInt(commands[i]); + } + return output.copyBytes().array(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/SimpleVectorTileFormatter.java b/server/src/main/java/org/elasticsearch/common/geo/SimpleVectorTileFormatter.java new file mode 100644 index 0000000000000..450d7e5581ce4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/SimpleVectorTileFormatter.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; + +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +/** + * A facade for SimpleFeatureFactory that converts it into FormatterFactory for use in GeoPointFieldMapper + */ +public class SimpleVectorTileFormatter implements GeoFormatterFactory.FormatterFactory { + + public static final String MVT = "mvt"; + + @Override + public String getName() { + return MVT; + } + + @Override + public Function, List>> getFormatterBuilder() { + return params -> { + int[] parsed = parse(params); + final SimpleFeatureFactory featureFactory = new SimpleFeatureFactory(parsed[0], parsed[1], parsed[2], parsed[3]); + return points -> List.of(featureFactory.points(points)); + }; + } + + /** + * Parses string in the format we expect either z/x/y or z/x/y@extent to an array of integer parameters + */ + public static int[] parse(String param) { + // we expect either z/x/y or z/x/y@extent + final String[] parts = param.split("@", 3); + if (parts.length > 2) { + throw new IllegalArgumentException( + "Invalid mvt formatter parameter [" + param + "]. Must have the form \"zoom/x/y\" or \"zoom/x/y@extent\"." + ); + } + final int extent = parts.length == 2 ? Integer.parseInt(parts[1]) : 4096; + final String[] tileBits = parts[0].split("/", 4); + if (tileBits.length != 3) { + throw new IllegalArgumentException( + "Invalid tile string [" + parts[0] + "]. Must be three integers in a form \"zoom/x/y\"." + ); + } + final int z = GeoTileUtils.checkPrecisionRange(Integer.parseInt(tileBits[0])); + final int tiles = 1 << z; + final int x = Integer.parseInt(tileBits[1]); + final int y = Integer.parseInt(tileBits[2]); + if (x < 0 || y < 0 || x >= tiles || y >= tiles) { + throw new IllegalArgumentException(String.format(Locale.ROOT, "Zoom/X/Y combination is not valid: %d/%d/%d", z, x, y)); + } + return new int[]{z, x, y, extent}; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/SphericalMercatorUtils.java b/server/src/main/java/org/elasticsearch/common/geo/SphericalMercatorUtils.java new file mode 100644 index 0000000000000..a406b86d01916 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/SphericalMercatorUtils.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.geometry.Rectangle; + +/** + * Utility functions to transforms WGS84 coordinates into spherical mercator. + */ +public class SphericalMercatorUtils { + + public static final double MERCATOR_BOUNDS = 20037508.34; + private static final double MERCATOR_FACTOR = MERCATOR_BOUNDS / 180.0; + + /** + * Transforms WGS84 longitude to a Spherical mercator longitude + */ + public static double lonToSphericalMercator(double lon) { + return lon * MERCATOR_FACTOR; + } + + /** + * Transforms WGS84 latitude to a Spherical mercator latitude + */ + public static double latToSphericalMercator(double lat) { + double y = Math.log(Math.tan((90 + lat) * Math.PI / 360)) / (Math.PI / 180); + return y * MERCATOR_FACTOR; + } + + /** + * Transforms WGS84 rectangle to a Spherical mercator rectangle + */ + public static Rectangle recToSphericalMercator(Rectangle r) { + return new Rectangle( + lonToSphericalMercator(r.getMinLon()), + lonToSphericalMercator(r.getMaxLon()), + latToSphericalMercator(r.getMaxLat()), + latToSphericalMercator(r.getMinLat()) + ); + + } + + private SphericalMercatorUtils() { + // no instances + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index ff4db16da9668..299735b786fbc 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -33,7 +33,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder shapes = new ArrayList<>(); + final List> shapes = new ArrayList<>(); /** * Build and empty GeometryCollectionBuilder. @@ -54,12 +54,12 @@ public GeometryCollectionBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(shapes.size()); - for (ShapeBuilder shape : shapes) { + for (ShapeBuilder shape : shapes) { out.writeNamedWriteable(shape); } } - public GeometryCollectionBuilder shape(ShapeBuilder shape) { + public GeometryCollectionBuilder shape(ShapeBuilder shape) { this.shapes.add(shape); return this; } @@ -104,7 +104,7 @@ public GeometryCollectionBuilder circle(CircleBuilder circle) { return this; } - public ShapeBuilder getShapeAt(int i) { + public ShapeBuilder getShapeAt(int i) { if (i >= this.shapes.size() || i < 0) { throw new ElasticsearchException("GeometryCollection contains " + this.shapes.size() + " shapes. + " + "No shape found at index " + i); @@ -121,7 +121,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(ShapeParser.FIELD_TYPE.getPreferredName(), TYPE.shapeName()); builder.startArray(ShapeParser.FIELD_GEOMETRIES.getPreferredName()); - for (ShapeBuilder shape : shapes) { + for (ShapeBuilder shape : shapes) { shape.toXContent(builder, params); } builder.endArray(); @@ -164,7 +164,7 @@ public Shape buildS4J() { List shapes = new ArrayList<>(this.shapes.size()); - for (ShapeBuilder shape : this.shapes) { + for (ShapeBuilder shape : this.shapes) { shapes.add(shape.buildS4J()); } @@ -182,7 +182,7 @@ public GeometryCollection buildGeometry() { } List shapes = new ArrayList<>(this.shapes.size()); - for (ShapeBuilder shape : this.shapes) { + for (ShapeBuilder shape : this.shapes) { shapes.add(shape.buildGeometry()); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index d5117fd7e0aec..3608609212e16 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -30,7 +30,7 @@ * complies with geojson specification: https://tools.ietf.org/html/rfc7946 */ abstract class GeoJsonParser { - protected static ShapeBuilder parse(XContentParser parser, AbstractShapeGeometryFieldMapper shapeMapper) + protected static ShapeBuilder parse(XContentParser parser, AbstractShapeGeometryFieldMapper shapeMapper) throws IOException { GeoShapeType shapeType = null; DistanceUnit.Distance radius = null; @@ -80,7 +80,7 @@ protected static ShapeBuilder parse(XContentParser parser, AbstractShapeGeometry } else if (CircleBuilder.FIELD_RADIUS.match(fieldName, subParser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.CIRCLE; - } else if (shapeType != null && shapeType.equals(GeoShapeType.CIRCLE) == false) { + } else if (shapeType.equals(GeoShapeType.CIRCLE) == false) { malformedException = "cannot have [" + CircleBuilder.FIELD_RADIUS + "] with type set to [" + shapeType + "]"; } @@ -192,7 +192,7 @@ private static Coordinate parseCoordinate(XContentParser parser, boolean ignoreZ * @return Geometry[] geometries of the GeometryCollection * @throws IOException Thrown if an error occurs while reading from the XContentParser */ - static GeometryCollectionBuilder parseGeometries(XContentParser parser, AbstractShapeGeometryFieldMapper mapper) throws + static GeometryCollectionBuilder parseGeometries(XContentParser parser, AbstractShapeGeometryFieldMapper mapper) throws IOException { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("geometries must be an array of geojson objects"); @@ -201,7 +201,7 @@ static GeometryCollectionBuilder parseGeometries(XContentParser parser, Abstract XContentParser.Token token = parser.nextToken(); GeometryCollectionBuilder geometryCollection = new GeometryCollectionBuilder(); while (token != XContentParser.Token.END_ARRAY) { - ShapeBuilder shapeBuilder = ShapeParser.parse(parser); + ShapeBuilder shapeBuilder = ShapeParser.parse(parser); geometryCollection.shape(shapeBuilder); token = parser.nextToken(); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index a87fefe478577..cf9e853add472 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -52,19 +52,19 @@ public class GeoWKTParser { // no instance private GeoWKTParser() {} - public static ShapeBuilder parse(XContentParser parser, final AbstractShapeGeometryFieldMapper shapeMapper) + public static ShapeBuilder parse(XContentParser parser, final AbstractShapeGeometryFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { return parseExpectedType(parser, null, shapeMapper); } - public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) throws IOException, ElasticsearchParseException { return parseExpectedType(parser, shapeType, null); } /** throws an exception if the parsed geometry type does not match the expected shape type */ - public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, - final AbstractShapeGeometryFieldMapper shapeMapper) + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, + final AbstractShapeGeometryFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { try (StringReader reader = new StringReader(parser.text())) { boolean coerce = shapeMapper != null && shapeMapper.coerce(); @@ -81,14 +81,14 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue, coerce); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue, coerce); checkEOF(tokenizer); return builder; } } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue, + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index cb2c267cc6645..56f2fe3ff5205 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -40,13 +40,13 @@ public interface ShapeParser { * if the parsers current token has been null * @throws IOException if the input could not be read */ - static ShapeBuilder parse(XContentParser parser, AbstractGeometryFieldMapper geometryMapper) throws IOException { - AbstractShapeGeometryFieldMapper shapeMapper = null; + static ShapeBuilder parse(XContentParser parser, AbstractGeometryFieldMapper geometryMapper) throws IOException { + AbstractShapeGeometryFieldMapper shapeMapper = null; if (geometryMapper != null) { if (geometryMapper instanceof AbstractShapeGeometryFieldMapper == false) { throw new IllegalArgumentException("geometry must be a shape type"); } - shapeMapper = (AbstractShapeGeometryFieldMapper) geometryMapper; + shapeMapper = (AbstractShapeGeometryFieldMapper) geometryMapper; } if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { return null; @@ -65,11 +65,11 @@ static ShapeBuilder parse(XContentParser parser, AbstractGeometryFieldMapper geo * if the parsers current token has been null * @throws IOException if the input could not be read */ - static ShapeBuilder parse(XContentParser parser) throws IOException { + static ShapeBuilder parse(XContentParser parser) throws IOException { return parse(parser, null); } - static ShapeBuilder parse(Object value) throws IOException { + static ShapeBuilder parse(Object value) throws IOException { try (XContentParser parser = new MapXContentParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Collections.singletonMap("value", value), null)) { parser.nextToken(); // start object diff --git a/server/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java b/server/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java index aa7029f2a9cae..2b92d1ad745bb 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ConstantFactory.java @@ -35,7 +35,7 @@ class ConstantFactory implements InternalFactory { } @Override - public T get(Errors errors, InternalContext context, Dependency dependency) + public T get(Errors errors, InternalContext context, Dependency dependency) throws ErrorsException { return initializable.get(errors); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/InheritingState.java b/server/src/main/java/org/elasticsearch/common/inject/InheritingState.java index eb05b1dbe6e4d..70ff107a41de8 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InheritingState.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InheritingState.java @@ -152,10 +152,11 @@ public void clearBlacklisted() { public void makeAllBindingsToEagerSingletons(Injector injector) { Map, Binding> x = new LinkedHashMap<>(); for (Map.Entry, Binding> entry : this.explicitBindingsMutable.entrySet()) { - Key key = entry.getKey(); + @SuppressWarnings("unchecked") + Key key = (Key) entry.getKey(); BindingImpl binding = (BindingImpl) entry.getValue(); Object value = binding.getProvider().get(); - x.put(key, new InstanceBindingImpl(injector, key, SourceProvider.UNKNOWN_SOURCE, new InternalFactory.Instance(value), + x.put(key, new InstanceBindingImpl<>(injector, key, SourceProvider.UNKNOWN_SOURCE, new InternalFactory.Instance<>(value), emptySet(), value)); } this.explicitBindingsMutable.clear(); diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java b/server/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java index 909779cb442a6..4b4e163ef400a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java @@ -51,7 +51,7 @@ public Boolean visit(StaticInjectionRequest request) { } @Override - public Boolean visit(InjectionRequest request) { + public Boolean visit(InjectionRequest request) { Set injectionPoints; try { injectionPoints = request.getInjectionPoints(); diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index e2ee9aa8eee34..fd37a52bead54 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -206,7 +206,7 @@ static InternalFactory> createInternalFactory(Binding provide final Provider provider = providedBinding.getProvider(); return new InternalFactory>() { @Override - public Provider get(Errors errors, InternalContext context, Dependency dependency) { + public Provider get(Errors errors, InternalContext context, Dependency dependency) { return provider; } }; @@ -473,7 +473,7 @@ BindingImpl createProvidedByBinding(Key key, Scoping scoping, InternalFactory internalFactory = new InternalFactory() { @Override - public T get(Errors errors, InternalContext context, Dependency dependency) + public T get(Errors errors, InternalContext context, Dependency dependency) throws ErrorsException { errors = errors.withSource(providerKey); Provider provider = providerBinding.getInternalFactory().get( @@ -585,7 +585,7 @@ BindingImpl createJustInTimeBinding(Key key, Errors errors) throws Err // These casts are safe. We know T extends Provider and that given Key>, // createProviderBinding() will return BindingImpl>. @SuppressWarnings("unchecked") - BindingImpl binding = createProviderBinding((Key) key, errors); + BindingImpl binding = (BindingImpl) createProviderBinding((Key>) key, errors); return binding; } @@ -594,7 +594,7 @@ BindingImpl createJustInTimeBinding(Key key, Errors errors) throws Err // These casts are safe. T extends MembersInjector and that given Key>, // createMembersInjectorBinding() will return BindingImpl>. @SuppressWarnings("unchecked") - BindingImpl binding = createMembersInjectorBinding((Key) key, errors); + BindingImpl binding = (BindingImpl) createMembersInjectorBinding((Key>) key, errors); return binding; } @@ -701,9 +701,9 @@ Object invoke(Object target, Object... parameters) MembersInjectorStore membersInjectorStore; @Override - @SuppressWarnings("unchecked") // the members injector type is consistent with instance's type public void injectMembers(Object instance) { - MembersInjector membersInjector = getMembersInjector(instance.getClass()); + @SuppressWarnings("unchecked") // the members injector type is consistent with instance's type + MembersInjector membersInjector = getMembersInjector((Class) instance.getClass()); membersInjector.injectMembers(instance); } @@ -733,9 +733,10 @@ Provider getProviderOrThrow(final Key key, Errors errors) throws Error if (factory instanceof InternalFactory.Instance) { return new Provider() { @Override + @SuppressWarnings("unchecked") public T get() { try { - return (T) ((InternalFactory.Instance) factory).get(null, null, null); + return (T) ((InternalFactory.Instance) factory).get(null, null, null); } catch (ErrorsException e) { // ignore } diff --git a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java index cc5e356aeec3f..4bbee4d860f90 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/PrivateBinder.java @@ -50,5 +50,5 @@ public interface PrivateBinder extends Binder { PrivateBinder withSource(Object source); @Override - PrivateBinder skipSources(Class... classesToSkip); + PrivateBinder skipSources(Class... classesToSkip); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java b/server/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java index f443756908867..2a710c6d24cdd 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ProviderToInternalFactoryAdapter.java @@ -43,7 +43,7 @@ public T get() { T t = injector.callInContext(new ContextualCallable() { @Override public T call(InternalContext context) throws ErrorsException { - Dependency dependency = context.getDependency(); + Dependency dependency = context.getDependency(); return internalFactory.get(errors, context, dependency); } }); diff --git a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index adf3687772abd..5ba47ed7483fa 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/server/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -149,7 +149,7 @@ public final int hashCode() { @Override public final boolean equals(Object o) { return o instanceof TypeLiteral - && MoreTypes.equals(type, ((TypeLiteral) o).type); + && MoreTypes.equals(type, ((TypeLiteral) o).type); } @Override @@ -194,7 +194,7 @@ Type resolveType(Type toResolve) { // this implementation is made a little more complicated in an attempt to avoid object-creation while (true) { if (toResolve instanceof TypeVariable) { - TypeVariable original = (TypeVariable) toResolve; + TypeVariable original = (TypeVariable) toResolve; toResolve = MoreTypes.resolveTypeVariable(type, rawType, original); if (toResolve == original) { return toResolve; @@ -299,7 +299,7 @@ public List> getParameterTypes(Member methodOrConstructor) { genericParameterTypes = method.getGenericParameterTypes(); } else if (methodOrConstructor instanceof Constructor) { - Constructor constructor = (Constructor) methodOrConstructor; + Constructor constructor = (Constructor) methodOrConstructor; if (constructor.getDeclaringClass().isAssignableFrom(rawType) == false) { throw new IllegalArgumentException(constructor + " does not construct a supertype of " + type); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java b/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java index b99e3ac75c30f..32347cdd44b36 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java +++ b/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java @@ -216,7 +216,7 @@ public Object[] gatherArgsForConstructor( @SuppressWarnings("unchecked") // we imprecisely treat the class literal of T as a Class Class factoryRawType = (Class) factoryType.getRawType(); return factoryRawType.cast(Proxy.newProxyInstance(factoryRawType.getClassLoader(), - new Class[]{factoryRawType}, invocationHandler)); + new Class[]{factoryRawType}, invocationHandler)); } private static ConfigurationException newConfigurationException(String format, Object... args) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java index 3127fb97efdcc..43124988ade2b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java @@ -82,7 +82,7 @@ public Object createProxy(Errors errors, Class expectedType) throws ErrorsExc ClassLoader classLoader = expectedType.getClassLoader() == null ? ClassLoader.getSystemClassLoader() : expectedType.getClassLoader(); return expectedType.cast(Proxy.newProxyInstance(classLoader, - new Class[]{expectedType}, invocationHandler)); + new Class[]{expectedType}, invocationHandler)); } public void setProxyDelegates(T delegate) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index dc60f15460ede..25f24202a20d1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -65,6 +65,7 @@ * * @author jessewilson@google.com (Jesse Wilson) */ +@SuppressWarnings("rawtypes") public final class Errors { /** diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java b/server/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java index 1184a97edc293..6d53bb2c40da6 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/InternalContext.java @@ -30,7 +30,7 @@ public final class InternalContext { private Map> constructionContexts = new HashMap<>(); - private Dependency dependency; + private Dependency dependency; @SuppressWarnings("unchecked") public ConstructionContext getConstructionContext(Object key) { @@ -43,11 +43,11 @@ public ConstructionContext getConstructionContext(Object key) { return constructionContext; } - public Dependency getDependency() { + public Dependency getDependency() { return dependency; } - public void setDependency(Dependency dependency) { + public void setDependency(Dependency dependency) { this.dependency = dependency; } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java b/server/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java index 96206e44600b7..6ed33af3529ad 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/MoreTypes.java @@ -213,8 +213,8 @@ public static boolean equals(Type a, Type b) { if ((b instanceof TypeVariable) == false) { return false; } - TypeVariable va = (TypeVariable) a; - TypeVariable vb = (TypeVariable) b; + TypeVariable va = (TypeVariable) a; + TypeVariable vb = (TypeVariable) b; return va.getGenericDeclaration() == vb.getGenericDeclaration() && va.getName().equals(vb.getName()); @@ -257,7 +257,7 @@ private static int hashCodeOrZero(Object o) { public static String toString(Type type) { if (type instanceof Class) { - return ((Class) type).getName(); + return ((Class) type).getName(); } else if (type instanceof ParameterizedType) { ParameterizedType parameterizedType = (ParameterizedType) type; @@ -366,7 +366,7 @@ public static Type getGenericSupertype(Type type, Class rawType, Class toR // we skip searching through interfaces if unknown is an interface if (toResolve.isInterface()) { - Class[] interfaces = rawType.getInterfaces(); + Class[] interfaces = rawType.getInterfaces(); for (int i = 0, length = interfaces.length; i < length; i++) { if (interfaces[i] == toResolve) { return rawType.getGenericInterfaces()[i]; @@ -393,7 +393,7 @@ public static Type getGenericSupertype(Type type, Class rawType, Class toR return toResolve; } - public static Type resolveTypeVariable(Type type, Class rawType, TypeVariable unknown) { + public static Type resolveTypeVariable(Type type, Class rawType, TypeVariable unknown) { Class declaredByRaw = declaringClassOf(unknown); // we can't reduce this further @@ -423,7 +423,7 @@ private static int indexOf(Object[] array, Object toFind) { * Returns the declaring class of {@code typeVariable}, or {@code null} if it was not declared by * a class. */ - private static Class declaringClassOf(TypeVariable typeVariable) { + private static Class declaringClassOf(TypeVariable typeVariable) { GenericDeclaration genericDeclaration = typeVariable.getGenericDeclaration(); return genericDeclaration instanceof Class ? (Class) genericDeclaration @@ -439,7 +439,7 @@ public static class ParameterizedTypeImpl public ParameterizedTypeImpl(Type ownerType, Type rawType, Type... typeArguments) { // require an owner type if the raw type needs it if (rawType instanceof Class) { - Class rawTypeAsClass = (Class) rawType; + Class rawTypeAsClass = (Class) rawType; if (ownerType == null && rawTypeAsClass.getEnclosingClass() != null) { throw new IllegalArgumentException("No owner type for enclosed " + rawType); } @@ -638,7 +638,7 @@ private MemberImpl(Member member) { } @Override - public Class getDeclaringClass() { + public Class getDeclaringClass() { return declaringClass; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java b/server/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java index 5589c7b3082a9..85542bdba90e0 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/StackTraceElements.java @@ -31,7 +31,7 @@ public static Object forMember(Member member) { return SourceProvider.UNKNOWN_SOURCE; } - Class declaringClass = member.getDeclaringClass(); + Class declaringClass = member.getDeclaringClass(); String fileName = null; int lineNumber = -1; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java index 4e975280cb768..d955d3be42053 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ToStringBuilder.java @@ -35,7 +35,7 @@ public ToStringBuilder(String name) { this.name = name; } - public ToStringBuilder(Class type) { + public ToStringBuilder(Class type) { this.name = type.getSimpleName(); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java index 4d6e0e28a5de0..7bce314a6b8e9 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java +++ b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java @@ -81,7 +81,7 @@ public boolean matches(T t) { @Override public boolean equals(Object other) { return other instanceof Not - && ((Not) other).delegate.equals(delegate); + && ((Not) other).delegate.equals(delegate); } @Override @@ -186,11 +186,11 @@ public String toString() { * Returns a matcher which matches subclasses of the given type (as well as * the given type). */ - public static Matcher subclassesOf(final Class superclass) { + public static Matcher> subclassesOf(final Class superclass) { return new SubclassesOf(superclass); } - private static class SubclassesOf extends AbstractMatcher { + private static class SubclassesOf extends AbstractMatcher> { private final Class superclass; SubclassesOf(Class superclass) { @@ -198,7 +198,7 @@ private static class SubclassesOf extends AbstractMatcher { } @Override - public boolean matches(Class subclass) { + public boolean matches(Class subclass) { return superclass.isAssignableFrom(subclass); } @@ -295,11 +295,11 @@ public String toString() { * Returns a matcher which matches classes in the given package. Packages are specific to their * classloader, so classes with the same package name may not have the same package at runtime. */ - public static Matcher inPackage(final Package targetPackage) { + public static Matcher> inPackage(final Package targetPackage) { return new InPackage(targetPackage); } - private static class InPackage extends AbstractMatcher { + private static class InPackage extends AbstractMatcher> { private final transient Package targetPackage; private final String packageName; @@ -309,7 +309,7 @@ private static class InPackage extends AbstractMatcher { } @Override - public boolean matches(Class c) { + public boolean matches(Class c) { return c.getPackage().equals(targetPackage); } @@ -342,11 +342,11 @@ public Object readResolve() { * * @since 2.0 */ - public static Matcher inSubpackage(final String targetPackageName) { + public static Matcher> inSubpackage(final String targetPackageName) { return new InSubpackage(targetPackageName); } - private static class InSubpackage extends AbstractMatcher { + private static class InSubpackage extends AbstractMatcher> { private final String targetPackageName; InSubpackage(String targetPackageName) { @@ -354,7 +354,7 @@ private static class InSubpackage extends AbstractMatcher { } @Override - public boolean matches(Class c) { + public boolean matches(Class c) { String classPackageName = c.getPackage().getName(); return classPackageName.equals(targetPackageName) || classPackageName.startsWith(targetPackageName + "."); diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 63ac838d87a4b..1002de8683269 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -286,7 +286,7 @@ public RecordingBinder withSource(final Object source) { } @Override - public RecordingBinder skipSources(Class... classesToSkip) { + public RecordingBinder skipSources(Class... classesToSkip) { // if a source is specified explicitly, we don't need to skip sources if (source != null) { return this; diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index f85b8d7000bab..94711e152a769 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -53,6 +53,7 @@ * @author crazybob@google.com (Bob Lee) * @since 2.0 */ +@SuppressWarnings("rawtypes") public final class InjectionPoint { private final boolean optional; diff --git a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java b/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java index 5341a6be255c9..b9d48d1d67e86 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java +++ b/server/src/main/java/org/elasticsearch/common/inject/util/Modules.java @@ -152,7 +152,7 @@ public void configure() { final List elements = Elements.getElements(baseModules); final List overrideElements = Elements.getElements(overrides); - final Set overriddenKeys = new HashSet<>(); + final Set> overriddenKeys = new HashSet<>(); final Set> overridesScopeAnnotations = new HashSet<>(); // execute the overrides module, keeping track of which keys and scopes are bound diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index dd6f7f8c2bccb..3a1138fdc38b7 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -170,7 +170,7 @@ public long ramBytesUsed() { return bytes.ramBytesUsed(); } - void ensureCapacity(long offset) { + protected void ensureCapacity(long offset) { if (offset > Integer.MAX_VALUE) { throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data"); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 1db03fc6c6b92..20168ef9296f8 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -844,7 +844,7 @@ private Object[] readArray() throws IOException { return list8; } - private Map readLinkedHashMap() throws IOException { + private Map readLinkedHashMap() throws IOException { int size9 = readArraySize(); if (size9 == 0) { return Collections.emptyMap(); @@ -856,7 +856,7 @@ private Map readLinkedHashMap() throws IOException { return map9; } - private Map readHashMap() throws IOException { + private Map readHashMap() throws IOException { int size10 = readArraySize(); if (size10 == 0) { return Collections.emptyMap(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 7bf0edff6e9bf..61cf0ad0424ad 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -629,7 +629,7 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep } } - private static final Map, Writer> WRITERS = Map.ofEntries( + private static final Map, Writer> WRITERS = Map.ofEntries( entry( String.class, (o, v) -> { @@ -678,7 +678,7 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep List.class, (o, v) -> { o.writeByte((byte) 7); - final List list = (List) v; + final List list = (List) v; o.writeVInt(list.size()); for (Object item : list) { o.writeGenericValue(item); @@ -859,7 +859,8 @@ public void writeGenericValue(@Nullable Object value) throws IOException { return; } final Class type = getGenericType(value); - final Writer writer = WRITERS.get(type); + @SuppressWarnings("unchecked") + final Writer writer = (Writer) WRITERS.get(type); if (writer != null) { writer.write(this, value); } else { diff --git a/server/src/main/java/org/elasticsearch/common/joda/Joda.java b/server/src/main/java/org/elasticsearch/common/joda/Joda.java index 04e2993fa088c..7835ca8bec826 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/server/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -47,7 +47,7 @@ public class Joda { // it results in errors sent to status logger and startup to fail. // Hence a lazy initialization. private static final LazyInitializable deprecationLogger - = new LazyInitializable(() -> DeprecationLogger.getLogger(FormatNames.class)); + = new LazyInitializable<>(() -> DeprecationLogger.getLogger(FormatNames.class)); /** * Parses a joda based pattern, including some named ones (similar to the built in Joda ISO ones). */ diff --git a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java index 96597ec98d7eb..849bf96a5b13d 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java @@ -55,11 +55,12 @@ public EcsLayout build() { private KeyValuePair[] additionalFields() { return new KeyValuePair[] { new KeyValuePair("event.dataset", dataset), + new KeyValuePair("trace.id", "%trace_id"), new KeyValuePair("elasticsearch.cluster.uuid", "%cluster_id"), new KeyValuePair("elasticsearch.node.id", "%node_id"), new KeyValuePair("elasticsearch.node.name", "%ESnode_name"), new KeyValuePair("elasticsearch.cluster.name", "${sys:es.logs.cluster_name}"), }; - } + } public String getDataset() { return dataset; diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java b/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java index 694810510c177..f0f6ed81dabfd 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESLogMessage.java @@ -77,7 +77,8 @@ protected void addJsonNoBrackets(StringBuilder sb) { StringBuilders.escapeJson(sb, start); sb.append(Chars.DQUOTE).append(':').append(Chars.DQUOTE); start = sb.length(); - sb.append((Object) getIndexedReadOnlyStringMap().getValueAt(i)); + Object value = getIndexedReadOnlyStringMap().getValueAt(i); + sb.append(value); StringBuilders.escapeJson(sb, start); sb.append(Chars.DQUOTE); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java index 1b8c32d2a2fac..59ae5a5dc42dd 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -20,9 +20,9 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import java.util.Arrays; import java.util.Map; - -import static org.elasticsearch.common.util.CollectionUtils.asArrayList; +import java.util.stream.Stream; /** * A set of utilities around Logging. @@ -38,7 +38,8 @@ public class Loggers { Setting.Property.NodeScope)); public static Logger getLogger(Class clazz, ShardId shardId, String... prefixes) { - return getLogger(clazz, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + return getLogger(clazz, shardId.getIndex(), Stream.concat(Stream.of(Integer.toString(shardId.id())), + Arrays.stream(prefixes)).toArray(String[]::new)); } /** @@ -51,7 +52,7 @@ public static Logger getLogger(String loggerName, ShardId shardId) { } public static Logger getLogger(Class clazz, Index index, String... prefixes) { - return getLogger(clazz, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); + return getLogger(clazz, Stream.concat(Stream.of(Loggers.SPACE, index.getName()), Arrays.stream(prefixes)).toArray(String[]::new)); } public static Logger getLogger(Class clazz, String... prefixes) { diff --git a/server/src/main/java/org/elasticsearch/common/logging/TraceIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/TraceIdConverter.java new file mode 100644 index 0000000000000..d57fa4a0ae1bd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/TraceIdConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.elasticsearch.tasks.Task; + +import java.util.Objects; + +/** + * Pattern converter to format the trace id provided in the traceparent header into JSON fields trace.id. + */ +@Plugin(category = PatternConverter.CATEGORY, name = "TraceIdConverter") +@ConverterKeys({"trace_id"}) +public final class TraceIdConverter extends LogEventPatternConverter { + /** + * Called by log4j2 to initialize this converter. + */ + public static TraceIdConverter newInstance(@SuppressWarnings("unused") final String[] options) { + return new TraceIdConverter(); + } + + public TraceIdConverter() { + super("trace_id", "trace_id"); + } + + public static String getTraceId() { + return HeaderWarning.THREAD_CONTEXT.stream() + .map(t -> t.getHeader(Task.TRACE_ID)) + .filter(Objects::nonNull) + .findFirst() + .orElse(null); + } + + /** + * Formats the trace.id into json fields. + * + * @param event - a log event is ignored in this method as it uses the clusterId value + * from NodeAndClusterIdStateListener to format + */ + @Override + public void format(LogEvent event, StringBuilder toAppendTo) { + String traceId = getTraceId(); + if (traceId != null) { + toAppendTo.append(traceId); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/FilterIndexCommit.java b/server/src/main/java/org/elasticsearch/common/lucene/FilterIndexCommit.java new file mode 100644 index 0000000000000..93d801be305b8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/lucene/FilterIndexCommit.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.store.Directory; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +public abstract class FilterIndexCommit extends IndexCommit { + protected final IndexCommit in; + + public FilterIndexCommit(IndexCommit in) { + this.in = in; + } + + public IndexCommit getIndexCommit() { + return in; + } + + @Override + public String getSegmentsFileName() { + return in.getSegmentsFileName(); + } + + @Override + public Collection getFileNames() throws IOException { + return in.getFileNames(); + } + + @Override + public Directory getDirectory() { + return in.getDirectory(); + } + + @Override + public void delete() { + in.delete(); + } + + @Override + public boolean isDeleted() { + return in.isDeleted(); + } + + @Override + public int getSegmentCount() { + return in.getSegmentCount(); + } + + @Override + public long getGeneration() { + return in.getGeneration(); + } + + @Override + public Map getUserData() throws IOException { + return in.getUserData(); + } + + @Override + public String toString() { + return "FilterIndexCommit{" + "in=" + in + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 1573f11b88eca..f180dbf22a1e0 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -11,9 +11,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Scorable; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ExplainableScoreScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; +import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Objects; @@ -38,21 +40,23 @@ public float score() { private final Script sScript; private final ScoreScript.LeafFactory script; + private final SearchLookup lookup; private final int shardId; private final String indexName; - public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script, String indexName, int shardId) { + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script, SearchLookup lookup, String indexName, int shardId) { super(CombineFunction.REPLACE); this.sScript = sScript; this.script = script; + this.lookup = lookup; this.indexName = indexName; this.shardId = shardId; } @Override public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException { - final ScoreScript leafScript = script.newInstance(ctx); + final ScoreScript leafScript = script.newInstance(new DocValuesDocReader(lookup, ctx)); final CannedScorer scorer = new CannedScorer(); leafScript.setScorer(scorer); leafScript._setIndexName(indexName); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java index cfbe9440998e3..71dfe2690215b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java @@ -26,9 +26,11 @@ import org.apache.lucene.search.BulkScorer; import org.apache.lucene.util.Bits; import org.elasticsearch.Version; +import org.elasticsearch.script.DocValuesDocReader; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScoreScript.ExplanationHolder; import org.elasticsearch.script.Script; +import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Objects; @@ -41,16 +43,18 @@ public class ScriptScoreQuery extends Query { private final Query subQuery; private final Script script; private final ScoreScript.LeafFactory scriptBuilder; + private final SearchLookup lookup; private final Float minScore; private final String indexName; private final int shardId; private final Version indexVersion; - public ScriptScoreQuery(Query subQuery, Script script, ScoreScript.LeafFactory scriptBuilder, + public ScriptScoreQuery(Query subQuery, Script script, ScoreScript.LeafFactory scriptBuilder, SearchLookup lookup, Float minScore, String indexName, int shardId, Version indexVersion) { this.subQuery = subQuery; this.script = script; this.scriptBuilder = scriptBuilder; + this.lookup = lookup; this.minScore = minScore; this.indexName = indexName; this.shardId = shardId; @@ -61,7 +65,7 @@ public ScriptScoreQuery(Query subQuery, Script script, ScoreScript.LeafFactory s public Query rewrite(IndexReader reader) throws IOException { Query newQ = subQuery.rewrite(reader); if (newQ != subQuery) { - return new ScriptScoreQuery(newQ, script, scriptBuilder, minScore, indexName, shardId, indexVersion); + return new ScriptScoreQuery(newQ, script, scriptBuilder, lookup, minScore, indexName, shardId, indexVersion); } return super.rewrite(reader); } @@ -143,7 +147,7 @@ public Explanation explain(LeafReaderContext context, int doc) throws IOExceptio } private ScoreScript makeScoreScript(LeafReaderContext context) throws IOException { - final ScoreScript scoreScript = scriptBuilder.newInstance(context); + final ScoreScript scoreScript = scriptBuilder.newInstance(new DocValuesDocReader(lookup, context)); scoreScript._setIndexName(indexName); scoreScript._setShard(shardId); return scoreScript; @@ -174,7 +178,7 @@ public String toString(String field) { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (sameClassAs(o) == false) return false; ScriptScoreQuery that = (ScriptScoreQuery) o; return shardId == that.shardId && subQuery.equals(that.subQuery) && @@ -186,7 +190,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(subQuery, script, minScore, indexName, shardId, indexVersion); + return Objects.hash(classHash(), subQuery, script, minScore, indexName, shardId, indexVersion); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index 222e934d96a73..40c8896aaabeb 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -49,7 +49,7 @@ public double score(int docId, float subQueryScore) throws IOException { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { Explanation functionExplanation = leafFunction.explainScore(docId, subQueryScore); return Explanation.match( - functionExplanation.getValue().floatValue() * (float) getWeight(), "product of:", + functionExplanation.getValue().floatValue() * getWeight(), "product of:", functionExplanation, explainWeight()); } }; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index eb86c41ec7c12..ea50e3db5196d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -56,7 +56,7 @@ final class PerThreadIDVersionAndSeqNoLookup { /** * Initialize lookup for the provided segment */ - PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { + PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean trackReaderKey) throws IOException { this.uidField = uidField; final Terms terms = reader.terms(uidField); if (terms == null) { @@ -77,10 +77,14 @@ final class PerThreadIDVersionAndSeqNoLookup { throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field; _uid terms [" + terms + "]"); } Object readerKey = null; - assert (readerKey = reader.getCoreCacheHelper().getKey()) != null; + assert trackReaderKey ? (readerKey = reader.getCoreCacheHelper().getKey()) != null : readerKey == null; this.readerKey = readerKey; } + PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { + this(reader, uidField, true); + } + /** Return null if id is not found. * We pass the {@link LeafReaderContext} as an argument so that things * still work with reader wrappers that hide some documents while still @@ -89,7 +93,7 @@ final class PerThreadIDVersionAndSeqNoLookup { */ public DocIdAndVersion lookupVersion(BytesRef id, boolean loadSeqNo, LeafReaderContext context) throws IOException { - assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) : + assert readerKey == null || context.reader().getCoreCacheHelper().getKey().equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; int docID = getDocID(id, context); @@ -144,7 +148,7 @@ private static long readNumericDocValues(LeafReader reader, String field, int do /** Return null if id is not found. */ DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException { - assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) : + assert readerKey == null || context.reader().getCoreCacheHelper().getKey().equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; final int docID = getDocID(id, context); if (docID != DocIdSetIterator.NO_MORE_DOCS) { diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 3aeb5c4d18cb7..a5e9fbbb96155 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -134,6 +134,19 @@ public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term, return null; } + public static DocIdAndVersion loadDocIdAndVersionUncached(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { + List leaves = reader.leaves(); + for (int i = leaves.size() - 1; i >= 0; i--) { + final LeafReaderContext leaf = leaves.get(i); + PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), term.field(), false); + DocIdAndVersion result = lookup.lookupVersion(term.bytes(), loadSeqNo, leaf); + if (result != null) { + return result; + } + } + return null; + } + /** * Loads the internal docId and sequence number of the latest copy for a given uid from the provided reader. * The result is either null or the live and latest version of the given uid. diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index c897379974488..6108c8cdb5a74 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -111,7 +111,7 @@ public static Recycler concurrent(final Recycler.Factory factory, fina private final Recycler[] recyclers; { - @SuppressWarnings("unchecked") + @SuppressWarnings({"rawtypes", "unchecked"}) final Recycler[] recyclers = new Recycler[concurrencyLevel]; this.recyclers = recyclers; for (int i = 0; i < concurrencyLevel; ++i) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0861e483d52b7..cf223cd383030 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -213,6 +213,8 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, + RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, @@ -281,12 +283,11 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING, IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + MasterService.MASTER_SERVICE_STARVATION_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, @@ -296,6 +297,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterService.REMOTE_NODE_ATTRIBUTE, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, + RemoteClusterService.REMOTE_CLUSTER_COMPRESSION_SCHEME, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, @@ -320,6 +322,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSettings.PUBLISH_PORT, TransportSettings.PUBLISH_PORT_PROFILE, TransportSettings.TRANSPORT_COMPRESS, + TransportSettings.TRANSPORT_COMPRESSION_SCHEME, TransportSettings.PING_SCHEDULE, TransportSettings.CONNECT_TIMEOUT, TransportSettings.DEFAULT_FEATURES_SETTING, @@ -398,6 +401,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.LOW_LEVEL_CANCELLATION_SETTING, SearchService.MAX_OPEN_SCROLL_CONTEXT, SearchService.ENABLE_REWRITE_AGGS_TO_FILTER_BY_FILTER, + SearchService.MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, Node.NODE_ATTRIBUTES, diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 9745ba80ddc6a..8e0f5de3d9e64 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -144,6 +144,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING, MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING, + MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 75d08a3427192..83a959ce97733 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -13,13 +13,15 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.BufferedChecksumIndexInput; import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.util.SetOnce; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.hash.MessageDigests; @@ -44,6 +46,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; import java.nio.file.Files; +import java.nio.file.LinkOption; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.PosixFileAttributeView; @@ -71,6 +74,9 @@ */ public class KeyStoreWrapper implements SecureSettings { + /** Arbitrarily chosen maximum passphrase length */ + public static final int MAX_PASSPHRASE_LENGTH = 128; + /** An identifier for the type of data that may be stored in a keystore entry. */ private enum EntryType { STRING, @@ -100,7 +106,7 @@ private static class Entry { "~!@#$%^&*-_=+?").toCharArray(); /** The name of the keystore file to read and write. */ - private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; + public static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; /** The version of the metadata written before the keystore data. */ static final int FORMAT_VERSION = 4; @@ -193,6 +199,29 @@ public static void addBootstrapSeed(KeyStoreWrapper wrapper) { Arrays.fill(characters, (char)0); } + public static KeyStoreWrapper bootstrap(Path configDir, CheckedSupplier passwordSupplier) throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); + + SecureString password; + if (keystore != null && keystore.hasPassword()) { + password = passwordSupplier.get(); + } else { + password = new SecureString(new char[0]); + } + + try (password) { + if (keystore == null) { + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + keyStoreWrapper.save(configDir, new char[0]); + return keyStoreWrapper; + } else { + keystore.decrypt(password.getChars()); + KeyStoreWrapper.upgrade(keystore, configDir, password.getChars()); + } + } + return keystore; + } + /** * Loads information about the Elasticsearch keystore from the provided config directory. * @@ -205,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { return null; } - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + Directory directory = new NIOFSDirectory(configDir); try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) { ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput); final int formatVersion; @@ -476,11 +505,16 @@ private void decryptLegacyEntries() throws GeneralSecurityException, IOException /** Write the keystore to the given config directory. */ public synchronized void save(Path configDir, char[] password) throws Exception { + save(configDir, password, true); + } + + public synchronized void save(Path configDir, char[] password, boolean preservePermissions) throws Exception { ensureOpen(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + Directory directory = new NIOFSDirectory(configDir); // write to tmp file first, then overwrite String tmpFile = KEYSTORE_FILENAME + ".tmp"; + Path keystoreTempFile = configDir.resolve(tmpFile); try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) { CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION); output.writeByte(password.length == 0 ? (byte)0 : (byte)1); @@ -514,17 +548,55 @@ public synchronized void save(Path configDir, char[] password) throws Exception final String message = String.format( Locale.ROOT, "unable to create temporary keystore at [%s], write permissions required for [%s] or run [elasticsearch-keystore upgrade]", - configDir.resolve(tmpFile), + keystoreTempFile, configDir); throw new UserException(ExitCodes.CONFIG, message, e); + } catch (final Exception e) { + try { + Files.deleteIfExists(keystoreTempFile); + } catch (Exception ex) { + e.addSuppressed(e); + } + throw e; } Path keystoreFile = keystorePath(configDir); - Files.move(configDir.resolve(tmpFile), keystoreFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); - PosixFileAttributeView attrs = Files.getFileAttributeView(keystoreFile, PosixFileAttributeView.class); - if (attrs != null) { - // don't rely on umask: ensure the keystore has minimal permissions - attrs.setPermissions(PosixFilePermissions.fromString("rw-rw----")); + if (preservePermissions) { + try { + // check that replace doesn't change the owner + if (Files.exists(keystoreFile, LinkOption.NOFOLLOW_LINKS) && + false == Files.getOwner(keystoreTempFile, LinkOption.NOFOLLOW_LINKS).equals(Files.getOwner(keystoreFile, + LinkOption.NOFOLLOW_LINKS))) { + String message = String.format( + Locale.ROOT, + "will not overwrite keystore at [%s], because this incurs changing the file owner", + keystoreFile); + throw new UserException(ExitCodes.CONFIG, message); + } + PosixFileAttributeView attrs = Files.getFileAttributeView(keystoreTempFile, PosixFileAttributeView.class); + if (attrs != null) { + // don't rely on umask: ensure the keystore has minimal permissions + attrs.setPermissions(PosixFilePermissions.fromString("rw-rw----")); + } + } catch (Exception e) { + try { + Files.deleteIfExists(keystoreTempFile); + } catch (Exception ex) { + e.addSuppressed(ex); + } + throw e; + } + } + + try { + Files.move(keystoreTempFile, keystoreFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + } catch (Exception e) { + try { + Files.deleteIfExists(keystoreTempFile); + } catch (Exception ex) { + e.addSuppressed(ex); + } + throw e; } } @@ -583,7 +655,7 @@ public static void validateSettingName(String setting) { /** * Set a string setting. */ - synchronized void setString(String setting, char[] value) { + public synchronized void setString(String setting, char[] value) { ensureOpen(); validateSettingName(setting); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index d888f25231131..487bacf1dd821 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -615,7 +615,7 @@ public interface SettingDependency { * * @return the setting */ - Setting getSetting(); + Setting getSetting(); /** * Validates the dependent setting value. @@ -749,7 +749,7 @@ public String toString() { public interface AffixSettingDependency extends SettingDependency { @Override - AffixSetting getSetting(); + AffixSetting getSetting(); } @@ -793,7 +793,7 @@ public Set getSettingsDependencies(String settingsKey) { .map(s -> new SettingDependency() { @Override - public Setting getSetting() { + public Setting getSetting() { return s.getSetting().getConcreteSettingForNamespace(namespace); } @@ -1405,6 +1405,23 @@ public static > Setting enumSetting(Class clazz, String return new Setting<>(key, defaultValue.toString(), e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), properties); } + /** + * Creates a setting where the allowed values are defined as enum constants. All enum constants must be uppercase. + * + * @param clazz the enum class + * @param key the key for the setting + * @param fallbackSetting the fallback setting for this setting + * @param validator validator for this setting + * @param properties properties for this setting like scope, filtering... + * @param the generics type parameter reflecting the actual type of the enum + * @return the setting object + */ + public static > Setting enumSetting(Class clazz, String key, Setting fallbackSetting, + Validator validator, Property... properties) { + return new Setting<>(new SimpleKey(key), fallbackSetting, fallbackSetting::getRaw, + e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), validator, properties); + } + /** * Creates a setting which specifies a memory size. This can either be * specified as an absolute bytes value or as a percentage of the heap @@ -1589,7 +1606,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett } } - static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { + static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { if (logger.isInfoEnabled()) { if (setting.isFiltered()) { logger.info("updating [{}]", setting.key); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 0139776885751..b190c0899adf0 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -386,7 +386,9 @@ public List getAsList(String key, List defaultValue, Boolean com final Object valueFromPrefix = settings.get(key); if (valueFromPrefix != null) { if (valueFromPrefix instanceof List) { - return Collections.unmodifiableList((List) valueFromPrefix); + @SuppressWarnings("unchecked") + final List valuesAsList = (List) valueFromPrefix; + return Collections.unmodifiableList(valuesAsList); } else if (commaDelimited) { String[] strings = Strings.splitStringByCommaToArray(get(key)); if (strings.length > 0) { @@ -524,7 +526,9 @@ public static Settings readSettingsFromStream(StreamInput in) throws IOException if (value == null) { builder.putNull(key); } else if (value instanceof List) { - builder.putList(key, (List) value); + @SuppressWarnings("unchecked") + List stringList = (List) value; + builder.putList(key, stringList); } else { builder.put(key, value.toString()); } @@ -832,7 +836,9 @@ public Builder copy(String key, String sourceKey, Settings source) { } final Object value = source.settings.get(sourceKey); if (value instanceof List) { - return putList(key, (List)value); + @SuppressWarnings("unchecked") + final List stringList = (List) value; + return putList(key, stringList); } else if (value == null) { return putNull(key); } else { @@ -1124,6 +1130,7 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { continue; } if (entry.getValue() instanceof List) { + @SuppressWarnings("unchecked") final ListIterator li = ((List) entry.getValue()).listIterator(); while (li.hasNext()) { final String settingValueRaw = li.next(); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 761c3883fb386..f326010ed2582 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -48,7 +48,7 @@ public class DateFormatters { // it results in errors sent to status logger and startup to fail. // Hence a lazy initialization. private static final LazyInitializable deprecationLogger - = new LazyInitializable(() -> DeprecationLogger.getLogger(FormatNames.class)); + = new LazyInitializable<>(() -> DeprecationLogger.getLogger(FormatNames.class)); public static final WeekFields WEEK_FIELDS_ROOT = WeekFields.of(Locale.ROOT); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 3d195301ecfe6..08bb0ef5a2da8 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -204,6 +204,8 @@ public static ZoneId of(String zoneId) { static final long MAX_NANOSECOND_IN_MILLIS = MAX_NANOSECOND_INSTANT.toEpochMilli(); + public static final long MAX_NANOSECOND = toLong(MAX_NANOSECOND_INSTANT); + /** * convert a java time instant to a long value which is stored in lucene * the long value resembles the nanoseconds since the epoch diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index f1ae098238922..d0debefac403b 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -224,7 +224,7 @@ public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue def } else if (lowerSValue.endsWith("pb")) { return parse(sValue, lowerSValue, "pb", ByteSizeUnit.PB, settingName); } else if (lowerSValue.endsWith("b")) { - return new ByteSizeValue(Long.parseLong(lowerSValue.substring(0, lowerSValue.length() - 1).trim()), ByteSizeUnit.BYTES); + return parseBytes(lowerSValue, settingName, sValue); } else if (lowerSValue.equals("-1")) { // Allow this special value to be unit-less: return new ByteSizeValue(-1, ByteSizeUnit.BYTES); @@ -239,6 +239,18 @@ public static ByteSizeValue parseBytesSizeValue(String sValue, ByteSizeValue def } } + private static ByteSizeValue parseBytes(String lowerSValue, String settingName, String initialInput) { + String s = lowerSValue.substring(0, lowerSValue.length() - 1).trim(); + try { + return new ByteSizeValue(Long.parseLong(s), ByteSizeUnit.BYTES); + } catch (NumberFormatException e) { + throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", e, settingName, initialInput); + } catch (IllegalArgumentException e) { + throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}] as a size in bytes", e, settingName, + initialInput); + } + } + private static ByteSizeValue parse(final String initialInput, final String normalized, final String suffix, ByteSizeUnit unit, final String settingName) { final String s = normalized.substring(0, normalized.length() - suffix.length()).trim(); diff --git a/server/src/main/java/org/elasticsearch/common/unit/RelativeByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/RelativeByteSizeValue.java index bbb28958b9d3a..b637d195f66d5 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/RelativeByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/RelativeByteSizeValue.java @@ -84,8 +84,7 @@ public static RelativeByteSizeValue parseRelativeByteSizeValue(String value, Str } try { return new RelativeByteSizeValue(ByteSizeValue.parseBytesSizeValue(value, settingName)); - // todo: fix NumberFormatException case in ByteSizeValue. - } catch (NumberFormatException | ElasticsearchParseException e) { + } catch (ElasticsearchParseException e) { throw new ElasticsearchParseException("unable to parse [{}={}] as either percentage or bytes", e, settingName, value); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index 239fb7522e5f7..fe0bda7c74e95 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -68,6 +68,7 @@ public static String[] concat(String[] one, String[] other) { * Concatenates 2 arrays */ public static T[] concat(T[] one, T[] other, Class clazz) { + @SuppressWarnings("unchecked") T[] target = (T[]) Array.newInstance(clazz, one.length + other.length); System.arraycopy(one, 0, target, 0, one.length); System.arraycopy(other, 0, target, one.length, other.length); diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 62ae3a026003c..5cf52c47fcabd 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -281,31 +281,6 @@ public static ArrayList arrayAsArrayList(E... elements) { return new ArrayList<>(Arrays.asList(elements)); } - @SafeVarargs - @SuppressWarnings("varargs") - public static ArrayList asArrayList(E first, E... other) { - if (other == null) { - throw new NullPointerException("other"); - } - ArrayList list = new ArrayList<>(1 + other.length); - list.add(first); - list.addAll(Arrays.asList(other)); - return list; - } - - @SafeVarargs - @SuppressWarnings("varargs") - public static ArrayList asArrayList(E first, E second, E... other) { - if (other == null) { - throw new NullPointerException("other"); - } - ArrayList list = new ArrayList<>(1 + 1 + other.length); - list.add(first); - list.add(second); - list.addAll(Arrays.asList(other)); - return list; - } - /** * Creates a copy of the given collection with the given element appended. * @@ -350,4 +325,11 @@ public static List> eagerPartition(List list, int size) { return result; } + + public static List concatLists(List listA, List listB) { + List concatList = new ArrayList<>(listA.size() + listB.size()); + concatList.addAll(listA); + concatList.addAll(listB); + return concatList; + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/CuckooFilter.java b/server/src/main/java/org/elasticsearch/common/util/CuckooFilter.java index a7c9673582f02..bdd9f83dbdcdd 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CuckooFilter.java +++ b/server/src/main/java/org/elasticsearch/common/util/CuckooFilter.java @@ -7,8 +7,7 @@ */ package org.elasticsearch.common.util; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -56,7 +55,7 @@ public class CuckooFilter implements Writeable { private static final int MAX_EVICTIONS = 500; static final int EMPTY = 0; - private final PackedInts.Mutable data; + private final PackedArray data; private final int numBuckets; private final int bitsPerEntry; private final int fingerprintMask; @@ -82,7 +81,7 @@ public class CuckooFilter implements Writeable { throw new IllegalArgumentException("Attempted to create [" + numBuckets * entriesPerBucket + "] entries which is > Integer.MAX_VALUE"); } - this.data = PackedInts.getMutable(numBuckets * entriesPerBucket, bitsPerEntry, PackedInts.COMPACT); + this.data = new PackedArray(numBuckets * entriesPerBucket, bitsPerEntry); // puts the bits at the right side of the mask, e.g. `0000000000001111` for bitsPerEntry = 4 this.fingerprintMask = (0x80000000 >> (bitsPerEntry - 1)) >>> (Integer.SIZE - bitsPerEntry); @@ -106,7 +105,7 @@ public class CuckooFilter implements Writeable { + "] entries which is > Integer.MAX_VALUE"); } // TODO this is probably super slow, but just used for testing atm - this.data = PackedInts.getMutable(numBuckets * entriesPerBucket, bitsPerEntry, PackedInts.COMPACT); + this.data = new PackedArray(numBuckets * entriesPerBucket, bitsPerEntry); for (int i = 0; i < other.data.size(); i++) { data.set(i, other.data.get(i)); } @@ -121,18 +120,7 @@ public class CuckooFilter implements Writeable { this.rng = rng; this.fingerprintMask = (0x80000000 >> (bitsPerEntry - 1)) >>> (Integer.SIZE - bitsPerEntry); - - data = (PackedInts.Mutable) PackedInts.getReader(new DataInput() { - @Override - public byte readByte() throws IOException { - return in.readByte(); - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - in.readBytes(b, offset, len); - } - }); + this.data = new PackedArray(in); } @Override @@ -142,18 +130,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(entriesPerBucket); out.writeVInt(count); out.writeVInt(evictedFingerprint); - - data.save(new DataOutput() { - @Override - public void writeByte(byte b) throws IOException { - out.writeByte(b); - } - - @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { - out.writeBytes(b, offset, length); - } - }); + this.data.save(out); } /** @@ -507,4 +484,201 @@ public boolean equals(Object other) { && Objects.equals(this.count, that.count) && Objects.equals(this.evictedFingerprint, that.evictedFingerprint); } + + /** + * Forked from Lucene's Packed64 class. The main difference is that this version + * can be read from / write to Elasticsearch streams. + */ + private static class PackedArray { + private static final int BLOCK_SIZE = 64; // 32 = int, 64 = long + private static final int BLOCK_BITS = 6; // The #bits representing BLOCK_SIZE + private static final int MOD_MASK = BLOCK_SIZE - 1; // x % BLOCK_SIZE + + /** + * Values are stores contiguously in the blocks array. + */ + private final long[] blocks; + /** + * A right-aligned mask of width BitsPerValue used by {@link #get(int)}. + */ + private final long maskRight; + /** + * Optimization: Saves one lookup in {@link #get(int)}. + */ + private final int bpvMinusBlockSize; + + private final int bitsPerValue; + private final int valueCount; + + PackedArray(int valueCount, int bitsPerValue) { + this.bitsPerValue = bitsPerValue; + this.valueCount = valueCount; + final int longCount = PackedInts.Format.PACKED.longCount(PackedInts.VERSION_CURRENT, valueCount, bitsPerValue); + this.blocks = new long[longCount]; + maskRight = ~0L << (BLOCK_SIZE-bitsPerValue) >>> (BLOCK_SIZE-bitsPerValue); + bpvMinusBlockSize = bitsPerValue - BLOCK_SIZE; + } + + PackedArray(StreamInput in) + throws IOException { + this.bitsPerValue = in.readVInt(); + this.valueCount = in.readVInt(); + this.blocks = in.readLongArray(); + maskRight = ~0L << (BLOCK_SIZE - bitsPerValue) >>> (BLOCK_SIZE - bitsPerValue); + bpvMinusBlockSize = bitsPerValue - BLOCK_SIZE; + } + + public void save(StreamOutput out) throws IOException { + out.writeVInt(bitsPerValue); + out.writeVInt(valueCount); + out.writeLongArray(blocks); + } + + public int size() { + return valueCount; + } + + public long get(final int index) { + // The abstract index in a bit stream + final long majorBitPos = (long)index * bitsPerValue; + // The index in the backing long-array + final int elementPos = (int)(majorBitPos >>> BLOCK_BITS); + // The number of value-bits in the second long + final long endBits = (majorBitPos & MOD_MASK) + bpvMinusBlockSize; + + if (endBits <= 0) { // Single block + return (blocks[elementPos] >>> -endBits) & maskRight; + } + // Two blocks + return ((blocks[elementPos] << endBits) + | (blocks[elementPos+1] >>> (BLOCK_SIZE - endBits))) + & maskRight; + } + + public int get(int index, long[] arr, int off, int len) { + assert len > 0 : "len must be > 0 (got " + len + ")"; + assert index >= 0 && index < valueCount; + len = Math.min(len, valueCount - index); + assert off + len <= arr.length; + + final int originalIndex = index; + final PackedInts.Decoder decoder = PackedInts.getDecoder(PackedInts.Format.PACKED, PackedInts.VERSION_CURRENT, bitsPerValue); + // go to the next block where the value does not span across two blocks + final int offsetInBlocks = index % decoder.longValueCount(); + if (offsetInBlocks != 0) { + for (int i = offsetInBlocks; i < decoder.longValueCount() && len > 0; ++i) { + arr[off++] = get(index++); + --len; + } + if (len == 0) { + return index - originalIndex; + } + } + + // bulk get + assert index % decoder.longValueCount() == 0; + int blockIndex = (int) (((long) index * bitsPerValue) >>> BLOCK_BITS); + assert (((long)index * bitsPerValue) & MOD_MASK) == 0; + final int iterations = len / decoder.longValueCount(); + decoder.decode(blocks, blockIndex, arr, off, iterations); + final int gotValues = iterations * decoder.longValueCount(); + index += gotValues; + len -= gotValues; + assert len >= 0; + + if (index > originalIndex) { + // stay at the block boundary + return index - originalIndex; + } else { + // no progress so far => already at a block boundary but no full block to get + assert index == originalIndex; + assert len > 0 : "len must be > 0 (got " + len + ")"; + assert index >= 0 && index < size(); + assert off + len <= arr.length; + + final int gets = Math.min(size() - index, len); + for (int i = index, o = off, end = index + gets; i < end; ++i, ++o) { + arr[o] = get(i); + } + return gets; + } + } + + public void set(final int index, final long value) { + // The abstract index in a contiguous bit stream + final long majorBitPos = (long)index * bitsPerValue; + // The index in the backing long-array + final int elementPos = (int)(majorBitPos >>> BLOCK_BITS); // / BLOCK_SIZE + // The number of value-bits in the second long + final long endBits = (majorBitPos & MOD_MASK) + bpvMinusBlockSize; + + if (endBits <= 0) { // Single block + blocks[elementPos] = blocks[elementPos] & ~(maskRight << -endBits) + | (value << -endBits); + return; + } + // Two blocks + blocks[elementPos] = blocks[elementPos] & ~(maskRight >>> endBits) + | (value >>> endBits); + blocks[elementPos+1] = blocks[elementPos+1] & (~0L >>> endBits) + | (value << (BLOCK_SIZE - endBits)); + } + + public int set(int index, long[] arr, int off, int len) { + assert len > 0 : "len must be > 0 (got " + len + ")"; + assert index >= 0 && index < valueCount; + len = Math.min(len, valueCount - index); + assert off + len <= arr.length; + + final int originalIndex = index; + final PackedInts.Encoder encoder = PackedInts.getEncoder(PackedInts.Format.PACKED, PackedInts.VERSION_CURRENT, bitsPerValue); + + // go to the next block where the value does not span across two blocks + final int offsetInBlocks = index % encoder.longValueCount(); + if (offsetInBlocks != 0) { + for (int i = offsetInBlocks; i < encoder.longValueCount() && len > 0; ++i) { + set(index++, arr[off++]); + --len; + } + if (len == 0) { + return index - originalIndex; + } + } + + // bulk set + assert index % encoder.longValueCount() == 0; + int blockIndex = (int) (((long) index * bitsPerValue) >>> BLOCK_BITS); + assert (((long)index * bitsPerValue) & MOD_MASK) == 0; + final int iterations = len / encoder.longValueCount(); + encoder.encode(arr, off, blocks, blockIndex, iterations); + final int setValues = iterations * encoder.longValueCount(); + index += setValues; + len -= setValues; + assert len >= 0; + + if (index > originalIndex) { + // stay at the block boundary + return index - originalIndex; + } else { + // no progress so far => already at a block boundary but no full block to get + assert index == originalIndex; + len = Math.min(len, size() - index); + assert off + len <= arr.length; + + for (int i = index, o = off, end = index + len; i < end; ++i, ++o) { + set(i, arr[o]); + } + return len; + } + } + + public long ramBytesUsed() { + return RamUsageEstimator.alignObjectSize( + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + + 3 * Integer.BYTES // bpvMinusBlockSize,valueCount,bitsPerValue + + Long.BYTES // maskRight + + RamUsageEstimator.NUM_BYTES_OBJECT_REF) // blocks ref + + RamUsageEstimator.sizeOf(blocks); + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/Maps.java b/server/src/main/java/org/elasticsearch/common/util/Maps.java index 02a041fdc9ff7..bed67feaae09e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Maps.java +++ b/server/src/main/java/org/elasticsearch/common/util/Maps.java @@ -41,6 +41,7 @@ public static Map copyMapWithAddedEntry(final Map map, final Objects.requireNonNull(value); assert checkIsImmutableMap(map, key, value); assert map.containsKey(key) == false : "expected entry [" + key + "] to not already be present in map"; + @SuppressWarnings("rawtypes") final Map.Entry[] entries = new Map.Entry[map.size() + 1]; map.entrySet().toArray(entries); entries[entries.length - 1] = Map.entry(key, value); diff --git a/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java index ce3b0cb4705fb..b3d0951cf2135 100644 --- a/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java @@ -103,23 +103,6 @@ public SetBackedScalingCuckooFilter(int threshold, Random rng, double fpp) { this.fpp = fpp; } - public SetBackedScalingCuckooFilter(SetBackedScalingCuckooFilter other) { - this.threshold = other.threshold; - this.isSetMode = other.isSetMode; - this.rng = other.rng; - this.breaker = other.breaker; - this.capacity = other.capacity; - this.fpp = other.fpp; - if (isSetMode) { - this.hashes = new HashSet<>(other.hashes); - } else { - this.filters = new ArrayList<>(other.filters); - this.numBuckets = filters.get(0).getNumBuckets(); - this.fingerprintMask = filters.get(0).getFingerprintMask(); - this.bitsPerEntry = filters.get(0).getBitsPerEntry(); - } - } - public SetBackedScalingCuckooFilter(StreamInput in, Random rng) throws IOException { this.threshold = in.readVInt(); this.isSetMode = in.readBoolean(); @@ -150,6 +133,27 @@ public void writeTo(StreamOutput out) throws IOException { } } + /** + * Returns the number of distinct values that are tracked before converting to an approximate representation. + * */ + public int getThreshold() { + return threshold; + } + + /** + * Returns the random number generator used for the cuckoo hashing process. + * */ + public Random getRng() { + return rng; + } + + /** + * Returns the false-positive rate used for the cuckoo filters. + * */ + public double getFpp() { + return fpp; + } + /** * Registers a circuit breaker with the datastructure. * diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java index c33fbc864f13b..ab6de57cc2b9e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java @@ -46,17 +46,6 @@ public static ConcurrentMap newConcurrentMap() { return new ConcurrentHashMap<>(); } - /** - * Creates a new CHM with an aggressive concurrency level, aimed at highly updateable long living maps. - */ - public static ConcurrentMapLong newConcurrentMapLongWithAggressiveConcurrency() { - return new ConcurrentHashMapLong<>(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()); - } - - public static ConcurrentMapLong newConcurrentMapLong() { - return new ConcurrentHashMapLong<>(ConcurrentCollections.newConcurrentMap()); - } - public static Set newConcurrentSet() { return Collections.newSetFromMap(ConcurrentCollections.newConcurrentMap()); } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java deleted file mode 100644 index 6112b214a4952..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentHashMapLong.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.util.concurrent; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - -public class ConcurrentHashMapLong implements ConcurrentMapLong { - - private final ConcurrentMap map; - - public ConcurrentHashMapLong(ConcurrentMap map) { - this.map = map; - } - - @Override - public T get(long key) { - return map.get(key); - } - - @Override - public T remove(long key) { - return map.remove(key); - } - - @Override - public T put(long key, T value) { - return map.put(key, value); - } - - // MAP DELEGATION - - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - @Override - public int size() { - return map.size(); - } - - @Override - public T get(Object key) { - return map.get(key); - } - - @Override - public boolean containsKey(Object key) { - return map.containsKey(key); - } - - @Override - public boolean containsValue(Object value) { - return map.containsValue(value); - } - - @Override - public T put(Long key, T value) { - return map.put(key, value); - } - - @Override - public T putIfAbsent(Long key, T value) { - return map.putIfAbsent(key, value); - } - - @Override - public void putAll(Map m) { - map.putAll(m); - } - - @Override - public T remove(Object key) { - return map.remove(key); - } - - @Override - public boolean remove(Object key, Object value) { - return map.remove(key, value); - } - - @Override - public boolean replace(Long key, T oldValue, T newValue) { - return map.replace(key, oldValue, newValue); - } - - @Override - public T replace(Long key, T value) { - return map.replace(key, value); - } - - @Override - public void clear() { - map.clear(); - } - - @Override - public Set keySet() { - return map.keySet(); - } - - @Override - public Collection values() { - return map.values(); - } - - @Override - public Set> entrySet() { - return map.entrySet(); - } - - @Override - public boolean equals(Object o) { - return map.equals(o); - } - - @Override - public int hashCode() { - return map.hashCode(); - } - - @Override - public String toString() { - return map.toString(); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index 7dcd681ac27ce..d770e5547b604 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -25,7 +25,7 @@ public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { throw new IllegalStateException("forced execution, but expected a size queue"); } try { - ((SizeBlockingQueue) queue).forcePut(r); + ((SizeBlockingQueue) queue).forcePut(r); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException("forced execution, but got interrupted", e); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index b547eed083144..ef429699a0233 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -55,9 +55,22 @@ public static int allocatedProcessors(final Settings settings) { return NODE_PROCESSORS_SETTING.get(settings); } - public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, - ThreadContext contextHolder, ScheduledExecutorService timer) { - return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer); + public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing( + String name, + ThreadFactory threadFactory, + ThreadContext contextHolder, + ScheduledExecutorService timer, + PrioritizedEsThreadPoolExecutor.StarvationWatcher starvationWatcher) { + return new PrioritizedEsThreadPoolExecutor( + name, + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + threadFactory, + contextHolder, + timer, + starvationWatcher); } public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java index 28e31306e126a..75fecae571040 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedCallable.java @@ -11,7 +11,7 @@ import java.util.concurrent.Callable; -public abstract class PrioritizedCallable implements Callable, Comparable { +public abstract class PrioritizedCallable implements Callable, Comparable> { private final Priority priority; @@ -24,7 +24,7 @@ protected PrioritizedCallable(Priority priority) { } @Override - public int compareTo(PrioritizedCallable pc) { + public int compareTo(PrioritizedCallable pc) { return priority.compareTo(pc.priority); } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java index bafc98e07d81c..8399dc8c66168 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedEsThreadPoolExecutor.java @@ -36,11 +36,21 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor { private final AtomicLong insertionOrder = new AtomicLong(); private final Queue current = ConcurrentCollections.newQueue(); private final ScheduledExecutorService timer; - - public PrioritizedEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, - ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) { + private final StarvationWatcher starvationWatcher; + + public PrioritizedEsThreadPoolExecutor( + String name, + int corePoolSize, + int maximumPoolSize, + long keepAliveTime, + TimeUnit unit, + ThreadFactory threadFactory, + ThreadContext contextHolder, + ScheduledExecutorService timer, + StarvationWatcher starvationWatcher) { super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<>(), threadFactory, contextHolder); this.timer = timer; + this.starvationWatcher = starvationWatcher; } public Pending[] getPending() { @@ -88,7 +98,7 @@ private void addPending(List runnables, List pending, boolean pending.add(new Pending(super.unwrap(innerRunnable), t.priority(), t.insertionOrder, executing)); } } else if (runnable instanceof PrioritizedFutureTask) { - PrioritizedFutureTask t = (PrioritizedFutureTask) runnable; + PrioritizedFutureTask t = (PrioritizedFutureTask) runnable; Object task = t.task; if (t.task instanceof Runnable) { task = super.unwrap((Runnable) t.task); @@ -101,12 +111,20 @@ private void addPending(List runnables, List pending, boolean @Override protected void beforeExecute(Thread t, Runnable r) { current.add(r); + if (getQueue().isEmpty()) { + starvationWatcher.onEmptyQueue(); + } } @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); current.remove(r); + if (getQueue().isEmpty()) { + starvationWatcher.onEmptyQueue(); + } else { + starvationWatcher.onNonemptyQueue(); + } } public void execute(Runnable command, final TimeValue timeout, final Runnable timeoutCallback) { @@ -161,7 +179,7 @@ protected RunnableFuture newTaskFor(Callable callable) { if ((callable instanceof PrioritizedCallable) == false) { callable = PrioritizedCallable.wrap(callable, Priority.NORMAL); } - return new PrioritizedFutureTask<>((PrioritizedCallable)callable, insertionOrder.incrementAndGet()); + return new PrioritizedFutureTask((PrioritizedCallable)callable, insertionOrder.incrementAndGet()); } public static class Pending { @@ -252,7 +270,7 @@ public Runnable unwrap() { } - private static final class PrioritizedFutureTask extends FutureTask implements Comparable { + private static final class PrioritizedFutureTask extends FutureTask implements Comparable> { final Object task; final Priority priority; @@ -273,7 +291,7 @@ private static final class PrioritizedFutureTask extends FutureTask implem } @Override - public int compareTo(PrioritizedFutureTask pft) { + public int compareTo(PrioritizedFutureTask pft) { int res = priority.compareTo(pft.priority); if (res != 0) { return res; @@ -282,4 +300,33 @@ public int compareTo(PrioritizedFutureTask pft) { } } + /** + * We expect the work queue to be empty fairly frequently; if the queue remains nonempty for sufficiently long then there's a risk that + * some lower-priority tasks are being starved of access to the executor. Implementations of this interface are notified whether the + * work queue is empty or not before and after execution of each task, so that we can warn the user of this possible starvation. + */ + public interface StarvationWatcher { + + /** + * Called before and after the execution of each task if the queue is empty (excluding the task being executed) + */ + void onEmptyQueue(); + + /** + * Called after the execution of each task if the queue is nonempty (excluding the task being executed) + */ + void onNonemptyQueue(); + + StarvationWatcher NOOP_STARVATION_WATCHER = new StarvationWatcher() { + @Override + public void onEmptyQueue() { + } + + @Override + public void onNonemptyQueue() { + } + }; + + } + } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java index c844f95234b46..760574a013e42 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java @@ -185,7 +185,7 @@ public Object[] toArray() { @Override public T[] toArray(T[] a) { - return (T[]) queue.toArray(a); + return queue.toArray(a); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index a2e2a3faf8aa0..400abbfa461c8 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -107,13 +107,22 @@ public StoredContext stashContext() { /** * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + * The same is applied to Task.TRACE_ID. * Otherwise when context is stash, it should be empty. */ - if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - ThreadContextStruct threadContextStruct = - DEFAULT_CONTEXT.putHeaders(Map.of(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID))); + + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID) || context.requestHeaders.containsKey(Task.TRACE_ID)) { + Map map = new HashMap<>(2, 1); + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { + map.put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)); + } + if (context.requestHeaders.containsKey(Task.TRACE_ID)) { + map.put(Task.TRACE_ID, context.requestHeaders.get(Task.TRACE_ID)); + } + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders(map); threadLocal.set(threadContextStruct); - } else { + } + else { threadLocal.set(DEFAULT_CONTEXT); } return () -> { diff --git a/server/src/main/java/org/elasticsearch/common/util/iterable/Iterables.java b/server/src/main/java/org/elasticsearch/common/util/iterable/Iterables.java index 550361ed15717..27f2f5d6f21eb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/iterable/Iterables.java +++ b/server/src/main/java/org/elasticsearch/common/util/iterable/Iterables.java @@ -19,6 +19,8 @@ public class Iterables { + @SafeVarargs + @SuppressWarnings("varargs") public static Iterable concat(Iterable... inputs) { Objects.requireNonNull(inputs); return new ConcatenatedIterable<>(inputs); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index 16179741e25c1..67cc87ee93ef8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -54,7 +54,7 @@ public static List extractRawValues(String path, Map map } @SuppressWarnings({"unchecked"}) - private static void extractRawValues(List values, Map part, String[] pathElements, int index) { + private static void extractRawValues(List values, Map part, String[] pathElements, int index) { if (index == pathElements.length) { return; } @@ -69,9 +69,11 @@ private static void extractRawValues(List values, Map part, Stri if (currentValue instanceof Map) { extractRawValues(values, (Map) currentValue, pathElements, nextIndex); } else if (currentValue instanceof List) { - extractRawValues(values, (List) currentValue, pathElements, nextIndex); + extractRawValues(values, (List) currentValue, pathElements, nextIndex); } else { - values.add(currentValue); + if (nextIndex == pathElements.length) { + values.add(currentValue); + } } } if (nextIndex == pathElements.length) { @@ -83,7 +85,7 @@ private static void extractRawValues(List values, Map part, Stri } @SuppressWarnings({"unchecked"}) - private static void extractRawValues(List values, List part, String[] pathElements, int index) { + private static void extractRawValues(List values, List part, String[] pathElements, int index) { for (Object value : part) { if (value == null) { continue; @@ -91,7 +93,7 @@ private static void extractRawValues(List values, List part, String[] pa if (value instanceof Map) { extractRawValues(values, (Map) value, pathElements, index); } else if (value instanceof List) { - extractRawValues(values, (List) value, pathElements, index); + extractRawValues(values, (List) value, pathElements, index); } else { if (index == pathElements.length) { values.add(value); @@ -360,6 +362,7 @@ private static Map filter(Map map, excludeState = excludeAutomaton.step(excludeState, '.'); } + @SuppressWarnings("unchecked") Map valueAsMap = (Map) value; Map filteredValue = filter(valueAsMap, subIncludeAutomaton, subIncludeState, excludeAutomaton, excludeState, matchAllAutomaton); @@ -402,7 +405,8 @@ private static List filter(Iterable iterable, if (excludeState != -1) { excludeState = excludeAutomaton.step(excludeState, '.'); } - Map filteredValue = filter((Map)value, + @SuppressWarnings("unchecked") + Map filteredValue = filter((Map) value, includeAutomaton, includeState, excludeAutomaton, excludeState, matchAllAutomaton); if (filteredValue.isEmpty() == false) { filtered.add(filteredValue); @@ -569,6 +573,7 @@ public static TimeValue nodeTimeValue(Object node) { return TimeValue.parseTimeValue(node.toString(), null, XContentMapValues.class.getSimpleName() + ".nodeTimeValue"); } + @SuppressWarnings("unchecked") public static Map nodeMapValue(Object node, String desc) { if (node instanceof Map) { return (Map) node; @@ -585,7 +590,7 @@ public static Map nodeMapValue(Object node, String desc) { */ public static String[] nodeStringArrayValue(Object node) { if (isArray(node)) { - List list = (List) node; + List list = (List) node; String[] arr = new String[list.size()]; for (int i = 0; i < arr.length; i++) { arr[i] = nodeStringValue(list.get(i), null); diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index 2270656124d88..c7a4b51c5a9ec 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -74,7 +74,7 @@ protected void doRun() { logger.trace("[{}] opening probe connection", thisConnectionAttempt); transportService.openConnection(targetNode, ConnectionProfile.buildSingleChannelProfile(Type.REG, probeConnectTimeout, probeHandshakeTimeout, - TimeValue.MINUS_ONE, null), listener.delegateFailure((l, connection) -> { + TimeValue.MINUS_ONE, null, null), listener.delegateFailure((l, connection) -> { logger.trace("[{}] opened probe connection", thisConnectionAttempt); // use NotifyOnceListener to make sure the following line does not result in onFailure being called when diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 748813fb356ae..9bfe598dc0566 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -18,8 +18,8 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NativeFSLockFactory; -import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -365,6 +365,13 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings SNAPSHOT_CACHE_FOLDER )); + final Set ignoredFileNames = new HashSet<>(Arrays.asList( + NODE_LOCK_FILENAME, + TEMP_FILE_NAME, + TEMP_FILE_NAME + ".tmp", + TEMP_FILE_NAME + ".final" + )); + try (DirectoryStream stream = Files.newDirectoryStream(legacyNodePath.path)) { for (Path subFolderPath : stream) { final String fileName = subFolderPath.getFileName().toString(); @@ -381,8 +388,7 @@ private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings targetSubFolderPath); } folderNames.add(fileName); - } else if (fileName.equals(NODE_LOCK_FILENAME) == false && - fileName.equals(TEMP_FILE_NAME) == false) { + } else if (ignoredFileNames.contains(fileName) == false) { throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " + subFolderPath); } @@ -534,7 +540,7 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh // resolve the directory the shard actually lives in Path p = shardPaths[i].resolve("index"); // open a directory (will be immediately closed) on the shard's location - dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING)); + dirs[i] = new NIOFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING)); // create a lock for the "write.lock" file try { locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index fe48f2d44be06..3a7cdfee84eef 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -64,6 +64,7 @@ public interface Lister, N private final AtomicLong round = new AtomicLong(); private boolean closed; + @SuppressWarnings("unchecked") protected AsyncShardFetch(Logger logger, String type, ShardId shardId, String customDataPath, Lister, T> action) { this.logger = logger; diff --git a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java index 3f483aa778a29..e4133a15b5b4d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java @@ -18,7 +18,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.core.Tuple; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; @@ -292,7 +292,7 @@ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) thro } protected Directory newDirectory(Path dir) throws IOException { - return new SimpleFSDirectory(dir); + return new NIOFSDirectory(dir); } diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 2108585a54c5b..43adcf9ef70e1 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -9,6 +9,7 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -23,10 +24,10 @@ import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult.ShardStoreInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.store.StoreFileMetadata; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetadata; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; @@ -103,7 +104,7 @@ && canPerformOperationBasedRecovery(primaryStore, shardStores, currentNode) == f "existing allocation of replica to [" + currentNode + "] cancelled, can perform a noop recovery on ["+ nodeWithHighestMatch + "]", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, - UnassignedInfo.AllocationStatus.NO_ATTEMPT, failedNodeIds); + UnassignedInfo.AllocationStatus.NO_ATTEMPT, failedNodeIds, null); // don't cancel shard in the loop as it will cause a ConcurrentModificationException shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, metadata.getIndexSafe(shard.index()), allocation.changes())); @@ -213,7 +214,11 @@ public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unas Metadata metadata = allocation.metadata(); IndexMetadata indexMetadata = metadata.index(unassignedShard.index()); totalDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetadata.getSettings()).getMillis(); - long remainingDelayNanos = unassignedInfo.getRemainingDelay(System.nanoTime(), indexMetadata.getSettings()); + long remainingDelayNanos = unassignedInfo.getRemainingDelay( + System.nanoTime(), + indexMetadata.getSettings(), + metadata.nodeShutdowns() + ); remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis(); } return AllocateUnassignedDecision.delayed(remainingDelayMillis, totalDelayMillis, nodeDecisions); diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index dc00c0b8fae18..dc15232b184a5 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -15,6 +15,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.network.CloseableChannel; @@ -30,6 +31,8 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; @@ -45,10 +48,8 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -62,9 +63,6 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private static final Logger logger = LogManager.getLogger(AbstractHttpServerTransport.class); private static final ActionListener NO_OP = ActionListener.wrap(() -> {}); - private static final long PRUNE_THROTTLE_INTERVAL = TimeUnit.SECONDS.toMillis(60); - private static final long MAX_CLIENT_STATS_AGE = TimeUnit.MINUTES.toMillis(5); - protected final Settings settings; public final HttpHandlingSettings handlingSettings; protected final NetworkService networkService; @@ -82,14 +80,19 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); + private final RefCounted refCounted = new AbstractRefCounted("abstract-http-server-transport") { + @Override + protected void closeInternal() { + allClientsClosedListener.onResponse(null); + } + }; private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final Map httpChannelStats = new ConcurrentHashMap<>(); + private final HttpClientStatsTracker httpClientStatsTracker; private final HttpTracer tracer; private volatile long slowLogThresholdMs; - protected volatile long lastClientStatsPruneTime; - private volatile boolean clientStatsEnabled; protected AbstractHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, ClusterSettings clusterSettings) { @@ -118,8 +121,7 @@ protected AbstractHttpServerTransport(Settings settings, NetworkService networkS clusterSettings.addSettingsUpdateConsumer(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING, slowLogThreshold -> this.slowLogThresholdMs = slowLogThreshold.getMillis()); slowLogThresholdMs = TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings).getMillis(); - clusterSettings.addSettingsUpdateConsumer(HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_ENABLED, this::enableClientStats); - clientStatsEnabled = HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_ENABLED.get(settings); + httpClientStatsTracker = new HttpClientStatsTracker(settings, clusterSettings, threadPool); } @Override @@ -138,43 +140,12 @@ public HttpInfo info() { @Override public HttpStats stats() { - pruneClientStats(false); - return new HttpStats(new ArrayList<>(httpChannelStats.values()), httpChannels.size(), totalChannelsAccepted.get()); - } - - /** - * Prunes client stats of entries that have been disconnected for more than five minutes. - * - * @param throttled When true, executes the prune process only if more than 60 seconds has elapsed since the last execution. - */ - void pruneClientStats(boolean throttled) { - if (clientStatsEnabled && throttled == false || - (threadPool.relativeTimeInMillis() - lastClientStatsPruneTime > PRUNE_THROTTLE_INTERVAL)) { - long nowMillis = threadPool.absoluteTimeInMillis(); - for (var statsEntry : httpChannelStats.entrySet()) { - long closedTimeMillis = statsEntry.getValue().closedTimeMillis; - if (closedTimeMillis > 0 && (nowMillis - closedTimeMillis > MAX_CLIENT_STATS_AGE)) { - httpChannelStats.remove(statsEntry.getKey()); - } - } - lastClientStatsPruneTime = threadPool.relativeTimeInMillis(); - } - } - - /** - * Enables or disables collection of HTTP client stats. - */ - void enableClientStats(boolean enabled) { - this.clientStatsEnabled = enabled; - if (enabled == false) { - // when disabling, immediately clear client stats - httpChannelStats.clear(); - } + return new HttpStats(httpClientStatsTracker.getClientStats(), httpChannels.size(), totalChannelsAccepted.get()); } protected void bindServer() { // Bind and start to accept incoming connections. - InetAddress hostAddresses[]; + final InetAddress[] hostAddresses; try { hostAddresses = networkService.resolveBindHostAddresses(bindHosts); } catch (IOException e) { @@ -243,14 +214,19 @@ protected void doStop() { } } } - try { + refCounted.decRef(); CloseableChannel.closeChannels(new ArrayList<>(httpChannels), true); } catch (Exception e) { logger.warn("unexpected exception while closing http channels", e); } - httpChannels.clear(); + try { + allClientsClosedListener.get(); + } catch (Exception e) { + assert false : e; + logger.warn("unexpected exception while waiting for http channels to close", e); + } stopInternal(); } @@ -331,40 +307,16 @@ protected void onServerException(HttpServerChannel channel, Exception e) { protected void serverAcceptedChannel(HttpChannel httpChannel) { boolean addedOnThisCall = httpChannels.add(httpChannel); assert addedOnThisCall : "Channel should only be added to http channel set once"; + refCounted.incRef(); + httpChannel.addCloseListener(ActionListener.wrap(() -> { + httpChannels.remove(httpChannel); + refCounted.decRef(); + })); totalChannelsAccepted.incrementAndGet(); - addClientStats(httpChannel); + httpClientStatsTracker.addClientStats(httpChannel); logger.trace(() -> new ParameterizedMessage("Http channel accepted: {}", httpChannel)); } - private HttpStats.ClientStats addClientStats(final HttpChannel httpChannel) { - if (clientStatsEnabled) { - final HttpStats.ClientStats clientStats; - if (httpChannel != null) { - clientStats = new HttpStats.ClientStats(threadPool.absoluteTimeInMillis()); - httpChannelStats.put(HttpStats.ClientStats.getChannelKey(httpChannel), clientStats); - httpChannel.addCloseListener(ActionListener.wrap(() -> { - try { - httpChannels.remove(httpChannel); - HttpStats.ClientStats disconnectedClientStats = - httpChannelStats.get(HttpStats.ClientStats.getChannelKey(httpChannel)); - if (disconnectedClientStats != null) { - disconnectedClientStats.closedTimeMillis = threadPool.absoluteTimeInMillis(); - } - } catch (Exception e) { - // the listener code above should never throw - logger.trace("error removing HTTP channel listener", e); - } - })); - } else { - clientStats = null; - } - pruneClientStats(true); - return clientStats; - } else { - return null; - } - } - /** * This method handles an incoming http request. * @@ -372,7 +324,7 @@ private HttpStats.ClientStats addClientStats(final HttpChannel httpChannel) { * @param httpChannel that received the http request */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { - updateClientStats(httpRequest, httpChannel); + httpClientStatsTracker.updateClientStats(httpRequest, httpChannel); final long startTime = threadPool.relativeTimeInMillis(); try { handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); @@ -386,61 +338,6 @@ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel htt } } - void updateClientStats(final HttpRequest httpRequest, final HttpChannel httpChannel) { - if (clientStatsEnabled && httpChannel != null) { - HttpStats.ClientStats clientStats = httpChannelStats.get(HttpStats.ClientStats.getChannelKey(httpChannel)); - if (clientStats == null) { - // will always return a non-null value when httpChannel is non-null - clientStats = addClientStats(httpChannel); - } - - if (clientStats.agent == null) { - final String elasticProductOrigin = getFirstValueForHeader(httpRequest, "x-elastic-product-origin"); - if (elasticProductOrigin != null) { - clientStats.agent = elasticProductOrigin; - } else { - final String userAgent = getFirstValueForHeader(httpRequest, "User-Agent"); - if (userAgent != null) { - clientStats.agent = userAgent; - } - } - } - if (clientStats.localAddress == null) { - clientStats.localAddress = - httpChannel.getLocalAddress() == null ? null : NetworkAddress.format(httpChannel.getLocalAddress()); - clientStats.remoteAddress = - httpChannel.getRemoteAddress() == null ? null : NetworkAddress.format(httpChannel.getRemoteAddress()); - } - if (clientStats.forwardedFor == null) { - final String forwardedFor = getFirstValueForHeader(httpRequest, "x-forwarded-for"); - if (forwardedFor != null) { - clientStats.forwardedFor = forwardedFor; - } - } - if (clientStats.opaqueId == null) { - final String opaqueId = getFirstValueForHeader(httpRequest, "x-opaque-id"); - if (opaqueId != null) { - clientStats.opaqueId = opaqueId; - } - } - clientStats.lastRequestTimeMillis = threadPool.absoluteTimeInMillis(); - clientStats.lastUri = httpRequest.uri(); - clientStats.requestCount.increment(); - clientStats.requestSizeBytes.add(httpRequest.content().length()); - } - } - - private static String getFirstValueForHeader(final HttpRequest request, final String header) { - for (Map.Entry> entry : request.getHeaders().entrySet()) { - if (entry.getKey().equalsIgnoreCase(header)) { - if (entry.getValue().size() > 0) { - return entry.getValue().get(0); - } - } - } - return null; - } - // Visible for testing void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) { final ThreadContext threadContext = threadPool.getThreadContext(); diff --git a/server/src/main/java/org/elasticsearch/http/HttpClientStatsTracker.java b/server/src/main/java/org/elasticsearch/http/HttpClientStatsTracker.java new file mode 100644 index 0000000000000..1a3c2345e9280 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpClientStatsTracker.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.http; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +/** + * Tracks a collection of {@link org.elasticsearch.http.HttpStats.ClientStats} for current and recently-closed HTTP connections. + */ +public class HttpClientStatsTracker { + + private static final Logger logger = LogManager.getLogger(); + + private static final long PRUNE_THROTTLE_INTERVAL = TimeUnit.SECONDS.toMillis(60); + private static final long MAX_CLIENT_STATS_AGE = TimeUnit.MINUTES.toMillis(5); + + private final Map httpChannelStats = new ConcurrentHashMap<>(); + private final ThreadPool threadPool; + + private volatile long lastClientStatsPruneTime; + private volatile boolean clientStatsEnabled; + + HttpClientStatsTracker(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.threadPool = threadPool; + clientStatsEnabled = HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_ENABLED.get(settings); + clusterSettings.addSettingsUpdateConsumer(HttpTransportSettings.SETTING_HTTP_CLIENT_STATS_ENABLED, this::enableClientStats); + } + + /** + * Prunes client stats of entries that have been disconnected for more than {@link #MAX_CLIENT_STATS_AGE} (i.e. 5 minutes). + * + * @param throttled When true, executes the prune process only if more than {@link #PRUNE_THROTTLE_INTERVAL} (i.e. 60 seconds) has + * elapsed since the last execution. + */ + private void pruneClientStats(boolean throttled) { + if (clientStatsEnabled && throttled == false || + (threadPool.relativeTimeInMillis() - lastClientStatsPruneTime > PRUNE_THROTTLE_INTERVAL)) { + long nowMillis = threadPool.absoluteTimeInMillis(); + for (var statsEntry : httpChannelStats.entrySet()) { + long closedTimeMillis = statsEntry.getValue().closedTimeMillis; + if (closedTimeMillis > 0 && (nowMillis - closedTimeMillis > MAX_CLIENT_STATS_AGE)) { + httpChannelStats.remove(statsEntry.getKey()); + } + } + lastClientStatsPruneTime = threadPool.relativeTimeInMillis(); + } + } + + /** + * Enables or disables collection of HTTP client stats. + */ + private void enableClientStats(boolean enabled) { + this.clientStatsEnabled = enabled; + if (enabled == false) { + // when disabling, immediately clear client stats + httpChannelStats.clear(); + } + } + + /** + * Register the given channel with this tracker. + * + * @return the corresponding newly-created stats object, or {@code null} if disabled. + */ + HttpStats.ClientStats addClientStats(final HttpChannel httpChannel) { + if (clientStatsEnabled) { + final HttpStats.ClientStats clientStats; + if (httpChannel != null) { + clientStats = new HttpStats.ClientStats(threadPool.absoluteTimeInMillis()); + httpChannelStats.put(getChannelKey(httpChannel), clientStats); + httpChannel.addCloseListener(ActionListener.wrap(() -> { + try { + HttpStats.ClientStats disconnectedClientStats = + httpChannelStats.get(getChannelKey(httpChannel)); + if (disconnectedClientStats != null) { + disconnectedClientStats.closedTimeMillis = threadPool.absoluteTimeInMillis(); + } + } catch (Exception e) { + assert false : e; // the listener code above should never throw + logger.warn("error removing HTTP channel listener", e); + } + })); + } else { + clientStats = null; + } + pruneClientStats(true); + return clientStats; + } else { + return null; + } + } + + private static String getFirstValueForHeader(final HttpRequest request, final String header) { + for (Map.Entry> entry : request.getHeaders().entrySet()) { + if (entry.getKey().equalsIgnoreCase(header)) { + if (entry.getValue().size() > 0) { + return entry.getValue().get(0); + } + } + } + return null; + } + + /** + * Adjust the stats for the given channel to reflect the latest request received. + */ + void updateClientStats(final HttpRequest httpRequest, final HttpChannel httpChannel) { + if (clientStatsEnabled && httpChannel != null) { + HttpStats.ClientStats clientStats = httpChannelStats.get(getChannelKey(httpChannel)); + if (clientStats == null) { + // will always return a non-null value when httpChannel is non-null + clientStats = addClientStats(httpChannel); + } + + if (clientStats.agent == null) { + final String elasticProductOrigin = getFirstValueForHeader(httpRequest, "x-elastic-product-origin"); + if (elasticProductOrigin != null) { + clientStats.agent = elasticProductOrigin; + } else { + final String userAgent = getFirstValueForHeader(httpRequest, "User-Agent"); + if (userAgent != null) { + clientStats.agent = userAgent; + } + } + } + if (clientStats.localAddress == null) { + clientStats.localAddress = + httpChannel.getLocalAddress() == null ? null : NetworkAddress.format(httpChannel.getLocalAddress()); + clientStats.remoteAddress = + httpChannel.getRemoteAddress() == null ? null : NetworkAddress.format(httpChannel.getRemoteAddress()); + } + if (clientStats.forwardedFor == null) { + final String forwardedFor = getFirstValueForHeader(httpRequest, "x-forwarded-for"); + if (forwardedFor != null) { + clientStats.forwardedFor = forwardedFor; + } + } + if (clientStats.opaqueId == null) { + final String opaqueId = getFirstValueForHeader(httpRequest, "x-opaque-id"); + if (opaqueId != null) { + clientStats.opaqueId = opaqueId; + } + } + clientStats.lastRequestTimeMillis = threadPool.absoluteTimeInMillis(); + clientStats.lastUri = httpRequest.uri(); + clientStats.requestCount.increment(); + clientStats.requestSizeBytes.add(httpRequest.content().length()); + } + } + + /** + * @return a list of the stats for the channels that are currently being tracked. + */ + List getClientStats() { + pruneClientStats(false); + return new ArrayList<>(httpChannelStats.values()); + } + + /** + * Returns a key suitable for use in a hash table for the specified HttpChannel + */ + private static int getChannelKey(HttpChannel channel) { + // always use an identity-based hash code rather than one based on object state + return System.identityHashCode(channel); + } + +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpStats.java b/server/src/main/java/org/elasticsearch/http/HttpStats.java index 6401f5f43d822..ccf47c73dce76 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpStats.java +++ b/server/src/main/java/org/elasticsearch/http/HttpStats.java @@ -190,13 +190,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(requestCount.longValue()); out.writeLong(requestSizeBytes.longValue()); } - - /** - * Returns a key suitable for use in a hash table for the specified HttpChannel - */ - public static int getChannelKey(HttpChannel channel) { - // always use an identity-based hash code rather than one based on object state - return System.identityHashCode(channel); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 5d5c338c27526..2b5d1ac865267 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -380,8 +380,6 @@ public boolean match(String setting) { public static Type defaultStoreType(final boolean allowMmap) { if (allowMmap && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { return Type.HYBRIDFS; - } else if (Constants.WINDOWS) { - return Type.SIMPLEFS; } else { return Type.NIOFS; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index f19941cf6e378..cac67a18ef04a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -23,18 +23,18 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -77,7 +77,6 @@ import java.io.Closeable; import java.io.IOException; -import java.nio.file.Path; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -421,22 +420,7 @@ public synchronized IndexShard createShard( } if (path == null) { - // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard - // that's being relocated/replicated we know how large it will become once it's done copying: - // Count up how many shards are currently on each data path: - Map dataPathToShardCount = new HashMap<>(); - for (IndexShard shard : this) { - Path dataPath = shard.shardPath().getRootStatePath(); - Integer curCount = dataPathToShardCount.get(dataPath); - if (curCount == null) { - curCount = 0; - } - dataPathToShardCount.put(dataPath, curCount + 1); - } - path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, - routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE - ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), - dataPathToShardCount); + path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings); logger.debug("{} creating using a new path [{}]", shardId, path); } else { logger.debug("{} creating using an existing path [{}]", shardId, path); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index f866bb60f883a..c36db7e5a385d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -31,6 +31,7 @@ import java.util.function.Function; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; +import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; @@ -379,6 +380,7 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile long mappingTotalFieldsLimit; private volatile long mappingDepthLimit; private volatile long mappingFieldNameLengthLimit; + private volatile long mappingDimensionFieldsLimit; /** * The maximum number of refresh listeners allows on this shard. @@ -503,6 +505,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mappingTotalFieldsLimit = scopedSettings.get(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING); mappingDepthLimit = scopedSettings.get(INDEX_MAPPING_DEPTH_LIMIT_SETTING); mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); + mappingDimensionFieldsLimit = scopedSettings.get(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, @@ -558,6 +561,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, this::setMappingTotalFieldsLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DEPTH_LIMIT_SETTING, this::setMappingDepthLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit); + scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING, this::setMappingDimensionFieldsLimit); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { this.searchIdleAfter = searchIdleAfter; } @@ -1021,4 +1025,12 @@ public long getMappingFieldNameLengthLimit() { private void setMappingFieldNameLengthLimit(long value) { this.mappingFieldNameLengthLimit = value; } + + public long getMappingDimensionFieldsLimit() { + return mappingDimensionFieldsLimit; + } + + private void setMappingDimensionFieldsLimit(long value) { + this.mappingDimensionFieldsLimit = value; + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 2f4392c7c22cb..f4b3a78dc0667 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -155,12 +155,12 @@ public static ESLogMessage of( private static Map prepareMap(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { Map map = new HashMap<>(); - map.put("message", index); - map.put("took", TimeValue.timeValueNanos(tookInNanos)); - map.put("took_millis", ""+TimeUnit.NANOSECONDS.toMillis(tookInNanos)); - map.put("id", doc.id()); + map.put("elasticsearch.slowlog.message", index); + map.put("elasticsearch.slowlog.took", TimeValue.timeValueNanos(tookInNanos).toString()); + map.put("elasticsearch.slowlog.took_millis", String.valueOf(TimeUnit.NANOSECONDS.toMillis(tookInNanos))); + map.put("elasticsearch.slowlog.id", doc.id()); if (doc.routing() != null) { - map.put("routing", doc.routing()); + map.put("elasticsearch.slowlog.routing", doc.routing()); } if (maxSourceCharsToLog == 0 || doc.source() == null || doc.source().length() == 0) { @@ -171,11 +171,11 @@ private static Map prepareMap(Index index, ParsedDocument doc, l String trim = Strings.cleanTruncate(source, maxSourceCharsToLog).trim(); StringBuilder sb = new StringBuilder(trim); StringBuilders.escapeJson(sb,0); - map.put("source", sb.toString()); + map.put("elasticsearch.slowlog.source", sb.toString()); } catch (IOException e) { StringBuilder sb = new StringBuilder("_failed_to_convert_[" + e.getMessage()+"]"); StringBuilders.escapeJson(sb,0); - map.put("source", sb.toString()); + map.put("elasticsearch.slowlog.source", sb.toString()); /* * We choose to fail to write to the slow log and instead let this percolate up to the post index listener loop where this * will be logged at the warn level. diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index 3577f8003ac6d..c38b0f01600db 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -141,28 +141,28 @@ public static ESLogMessage of(SearchContext context, long tookInNanos) { private static Map prepareMap(SearchContext context, long tookInNanos) { Map messageFields = new HashMap<>(); - messageFields.put("message", context.indexShard().shardId()); - messageFields.put("took", TimeValue.timeValueNanos(tookInNanos)); - messageFields.put("took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + messageFields.put("elasticsearch.slowlog.message", context.indexShard().shardId()); + messageFields.put("elasticsearch.slowlog.took", TimeValue.timeValueNanos(tookInNanos).toString()); + messageFields.put("elasticsearch.slowlog.took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); if (context.queryResult().getTotalHits() != null) { - messageFields.put("total_hits", context.queryResult().getTotalHits()); + messageFields.put("elasticsearch.slowlog.total_hits", context.queryResult().getTotalHits()); } else { - messageFields.put("total_hits", "-1"); + messageFields.put("elasticsearch.slowlog.total_hits", "-1"); } - messageFields.put("stats", escapeJson(ESLogMessage.asJsonArray( + messageFields.put("elasticsearch.slowlog.stats", escapeJson(ESLogMessage.asJsonArray( context.groupStats() != null ? context.groupStats().stream() : Stream.empty()))); - messageFields.put("search_type", context.searchType()); - messageFields.put("total_shards", context.numberOfShards()); + messageFields.put("elasticsearch.slowlog.search_type", context.searchType()); + messageFields.put("elasticsearch.slowlog.total_shards", context.numberOfShards()); if (context.request().source() != null) { String source = escapeJson(context.request().source().toString(FORMAT_PARAMS)); - messageFields.put("source", source); + messageFields.put("elasticsearch.slowlog.source", source); } else { - messageFields.put("source", "{}"); + messageFields.put("elasticsearch.slowlog.source", "{}"); } - messageFields.put("id", context.getTask().getHeader(Task.X_OPAQUE_ID)); + messageFields.put("elasticsearch.slowlog.id", context.getTask().getHeader(Task.X_OPAQUE_ID)); return messageFields; } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 157804a0f8004..a3eedb088e6f2 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.IndexWarmer.TerminationHandle; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; @@ -232,7 +232,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin MappingLookup lookup = mapperService.mappingLookup(); if (lookup.hasNested()) { warmUp.add(Queries.newNonNestedFilter()); - lookup.getNestedParentMappers().stream().map(ObjectMapper::nestedTypeFilter).forEach(warmUp::add); + lookup.getNestedParentMappers().stream().map(NestedObjectMapper::nestedTypeFilter).forEach(warmUp::add); } final CountDownLatch latch = new CountDownLatch(reader.leaves().size() * warmUp.size()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 3a0abdd6752f5..5dacf4219b9f5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -13,14 +13,13 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.store.Directory; +import org.elasticsearch.common.lucene.FilterIndexCommit; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; import java.io.IOException; import java.nio.file.Path; -import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; @@ -144,7 +143,7 @@ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { * @return true if the snapshotting commit can be clean up. */ synchronized boolean releaseCommit(final IndexCommit snapshotCommit) { - final IndexCommit releasingCommit = ((SnapshotIndexCommit) snapshotCommit).delegate; + final IndexCommit releasingCommit = ((SnapshotIndexCommit) snapshotCommit).getIndexCommit(); assert snapshottedCommits.containsKey(releasingCommit) : "Release non-snapshotted commit;" + "snapshotted commits [" + snapshottedCommits + "], releasing commit [" + releasingCommit + "]"; final int refCount = snapshottedCommits.addTo(releasingCommit, -1); // release refCount @@ -222,56 +221,14 @@ public static String commitDescription(IndexCommit commit) throws IOException { /** * A wrapper of an index commit that prevents it from being deleted. */ - private static class SnapshotIndexCommit extends IndexCommit { - private final IndexCommit delegate; - + private static class SnapshotIndexCommit extends FilterIndexCommit { SnapshotIndexCommit(IndexCommit delegate) { - this.delegate = delegate; - } - - @Override - public String getSegmentsFileName() { - return delegate.getSegmentsFileName(); - } - - @Override - public Collection getFileNames() throws IOException { - return delegate.getFileNames(); - } - - @Override - public Directory getDirectory() { - return delegate.getDirectory(); + super(delegate); } @Override public void delete() { throw new UnsupportedOperationException("A snapshot commit does not support deletion"); } - - @Override - public boolean isDeleted() { - return delegate.isDeleted(); - } - - @Override - public int getSegmentCount() { - return delegate.getSegmentCount(); - } - - @Override - public long getGeneration() { - return delegate.getGeneration(); - } - - @Override - public Map getUserData() throws IOException { - return delegate.getUserData(); - } - - @Override - public String toString() { - return "SnapshotIndexCommit{" + delegate + "}"; - } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java index 27123711c812c..aed572c5396dd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.engine; import java.io.IOException; -import java.util.function.BiConsumer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.search.ReferenceManager; @@ -28,7 +27,6 @@ */ @SuppressForbidden(reason = "reference counting is required here") class ElasticsearchReaderManager extends ReferenceManager { - private final BiConsumer refreshListener; /** * Creates and returns a new ElasticsearchReaderManager from the given @@ -36,13 +34,9 @@ class ElasticsearchReaderManager extends ReferenceManager refreshListener) { + ElasticsearchReaderManager(ElasticsearchDirectoryReader reader) { this.current = reader; - this.refreshListener = refreshListener; - refreshListener.accept(current, null); } @Override @@ -52,11 +46,7 @@ protected void decRef(ElasticsearchDirectoryReader reference) throws IOException @Override protected ElasticsearchDirectoryReader refreshIfNeeded(ElasticsearchDirectoryReader referenceToRefresh) throws IOException { - final ElasticsearchDirectoryReader reader = (ElasticsearchDirectoryReader) DirectoryReader.openIfChanged(referenceToRefresh); - if (reader != null) { - refreshListener.accept(reader, referenceToRefresh); - } - return reader; + return (ElasticsearchDirectoryReader) DirectoryReader.openIfChanged(referenceToRefresh); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 58bf6b9a29076..57ba0ac48f5b4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -25,17 +25,12 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.core.CheckedRunnable; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -43,14 +38,18 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; @@ -137,14 +136,6 @@ protected Engine(EngineConfig engineConfig) { this.eventListener = engineConfig.getEventListener(); } - /** Returns 0 in the case where accountable is null, otherwise returns {@code ramBytesUsed()} */ - protected static long guardedRamBytesUsed(Accountable a) { - if (a == null) { - return 0; - } - return a.ramBytesUsed(); - } - public final EngineConfig config() { return engineConfig; } @@ -548,10 +539,14 @@ public static class NoOpResult extends Result { } - protected final GetResult getFromSearcher(Get get, Engine.Searcher searcher) throws EngineException { + protected final GetResult getFromSearcher(Get get, Engine.Searcher searcher, boolean uncachedLookup) throws EngineException { final DocIdAndVersion docIdAndVersion; try { - docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.getIndexReader(), get.uid(), true); + if (uncachedLookup) { + docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersionUncached(searcher.getIndexReader(), get.uid(), true); + } else { + docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.getIndexReader(), get.uid(), true); + } } catch (Exception e) { Releasables.closeWhileHandlingException(searcher); //TODO: A better exception goes here @@ -576,7 +571,7 @@ protected final GetResult getFromSearcher(Get get, Engine.Searcher searcher) thr if (docIdAndVersion != null) { // don't release the searcher on this path, it is the // responsibility of the caller to call GetResult.release - return new GetResult(searcher, docIdAndVersion, false); + return new GetResult(searcher, docIdAndVersion); } else { Releasables.close(searcher); return GetResult.NOT_EXISTS; @@ -779,13 +774,7 @@ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes, boolean incl } protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegmentFileSizes, SegmentsStats stats) { - stats.add(1, segmentReader.ramBytesUsed()); - stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader())); - stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader())); - stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader())); - stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader())); - stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader())); - stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader())); + stats.add(1); if (includeSegmentFileSizes) { stats.addFiles(getSegmentFileSizes(segmentReader)); } @@ -893,7 +882,6 @@ private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boole } catch (IOException e) { logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } - segment.memoryInBytes = segmentReader.ramBytesUsed(); segment.segmentSort = info.info.getIndexSort(); if (verbose) { segment.ramTree = Accountables.namedAccountable("root", segmentReader); @@ -1346,7 +1334,7 @@ public String routing() { return this.doc.routing(); } - public List docs() { + public List docs() { return this.doc.docs(); } @@ -1561,21 +1549,18 @@ public static class GetResult implements Releasable { private final long version; private final DocIdAndVersion docIdAndVersion; private final Engine.Searcher searcher; - private final boolean fromTranslog; - public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null, null, false); + public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null, null); - private GetResult(boolean exists, long version, DocIdAndVersion docIdAndVersion, Engine.Searcher searcher, boolean fromTranslog) { + private GetResult(boolean exists, long version, DocIdAndVersion docIdAndVersion, Engine.Searcher searcher) { this.exists = exists; this.version = version; this.docIdAndVersion = docIdAndVersion; this.searcher = searcher; - this.fromTranslog = fromTranslog; - assert fromTranslog == false || searcher.getIndexReader() instanceof TranslogLeafReader; } - public GetResult(Engine.Searcher searcher, DocIdAndVersion docIdAndVersion, boolean fromTranslog) { - this(true, docIdAndVersion.version, docIdAndVersion, searcher, fromTranslog); + public GetResult(Engine.Searcher searcher, DocIdAndVersion docIdAndVersion) { + this(true, docIdAndVersion.version, docIdAndVersion, searcher); } public boolean exists() { @@ -1586,14 +1571,6 @@ public long version() { return this.version; } - /** - * Returns {@code true} iff the get was performed from a translog operation. Notes that this returns {@code false} - * if the get was performed on an in-memory Lucene segment created from the corresponding translog operation. - */ - public boolean isFromTranslog() { - return fromTranslog; - } - public Engine.Searcher searcher() { return this.searcher; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1113c05bc2e25..629e6bd68a9b5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -43,10 +44,6 @@ import org.elasticsearch.Assertions; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.Releasable; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -58,13 +55,17 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -585,8 +586,7 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external try { final ElasticsearchDirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); - internalReaderManager = new ElasticsearchReaderManager(directoryReader, - new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService())); + internalReaderManager = new ElasticsearchReaderManager(directoryReader); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); success = true; @@ -607,26 +607,31 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external } } + private static final QueryCachingPolicy NEVER_CACHE_POLICY = new QueryCachingPolicy() { + @Override + public void onUse(Query query) { + + } + + @Override + public boolean shouldCache(Query query) { + return false; + } + }; + + public final AtomicLong translogGetCount = new AtomicLong(); // number of times realtime get was done on translog + public final AtomicLong translogInMemorySegmentsCount = new AtomicLong(); // number of times in-memory index needed to be created + private GetResult getFromTranslog(Get get, Translog.Index index, MappingLookup mappingLookup, DocumentParser documentParser, Function searcherWrapper) throws IOException { assert get.isReadFromTranslog(); - final SingleDocDirectoryReader inMemoryReader = new SingleDocDirectoryReader(shardId, index, mappingLookup, documentParser, - config().getAnalyzer()); + translogGetCount.incrementAndGet(); + final TranslogDirectoryReader inMemoryReader = new TranslogDirectoryReader(shardId, index, mappingLookup, documentParser, + config().getAnalyzer(), translogInMemorySegmentsCount::incrementAndGet); final Engine.Searcher searcher = new Engine.Searcher("realtime_get", ElasticsearchDirectoryReader.wrap(inMemoryReader, shardId), - config().getSimilarity(), config().getQueryCache(), config().getQueryCachingPolicy(), inMemoryReader); + config().getSimilarity(), null /*query cache disabled*/, NEVER_CACHE_POLICY, inMemoryReader); final Searcher wrappedSearcher = searcherWrapper.apply(searcher); - if (wrappedSearcher == searcher) { - searcher.close(); - assert inMemoryReader.assertMemorySegmentStatus(false); - final TranslogLeafReader translogLeafReader = new TranslogLeafReader(index); - return new GetResult(new Engine.Searcher("realtime_get", translogLeafReader, - IndexSearcher.getDefaultSimilarity(), null, IndexSearcher.getDefaultQueryCachingPolicy(), translogLeafReader), - new VersionsAndSeqNoResolver.DocIdAndVersion( - 0, index.version(), index.seqNo(), index.primaryTerm(), translogLeafReader, 0), true); - } else { - assert inMemoryReader.assertMemorySegmentStatus(true); - return getFromSearcher(get, wrappedSearcher); - } + return getFromSearcher(get, wrappedSearcher, true); } @Override @@ -675,10 +680,10 @@ public GetResult get(Get get, MappingLookup mappingLookup, DocumentParser docume assert versionValue.seqNo >= 0 : versionValue; refreshIfNeeded("realtime_get", versionValue.seqNo); } - return getFromSearcher(get, acquireSearcher("realtime_get", SearcherScope.INTERNAL, searcherWrapper)); + return getFromSearcher(get, acquireSearcher("realtime_get", SearcherScope.INTERNAL, searcherWrapper), false); } else { // we expose what has been externally expose in a point in time snapshot via an explicit refresh - return getFromSearcher(get, acquireSearcher("get", SearcherScope.EXTERNAL, searcherWrapper)); + return getFromSearcher(get, acquireSearcher("get", SearcherScope.EXTERNAL, searcherWrapper), false); } } } @@ -1136,7 +1141,7 @@ private boolean mayHaveBeenIndexedBefore(Index index) { return mayHaveBeenIndexBefore; } - private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { + private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.addDocuments(docs); } else { @@ -1145,8 +1150,8 @@ private void addDocs(final List docs, final IndexWriter i numDocAppends.inc(docs.size()); } - private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { - for (ParseContext.Document doc : docs) { + private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { + for (LuceneDocument doc : docs) { doc.add(softDeletesField); // soft-deleted every document before adding to Lucene } if (docs.size() > 1) { @@ -1239,7 +1244,7 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele return true; } - private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.softUpdateDocuments(uid, docs, softDeletesField); } else { @@ -1424,7 +1429,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) throws assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; tombstone.updateSeqID(delete.seqNo(), delete.primaryTerm()); tombstone.version().setLongValue(plan.versionOfDeletion); - final ParseContext.Document doc = tombstone.docs().get(0); + final LuceneDocument doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + doc + " ]"; doc.add(softDeletesField); @@ -1553,7 +1558,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { // version field. tombstone.version().setLongValue(1L); assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; - final ParseContext.Document doc = tombstone.docs().get(0); + final LuceneDocument doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; doc.add(softDeletesField); @@ -1913,6 +1918,14 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu } if (flush) { flush(false, true); + + // If any merges happened then we need to release the unmerged input segments so they can be deleted. A periodic refresh + // will do this eventually unless the user has disabled refreshes or isn't searching this shard frequently, in which + // case we should do something here to ensure a timely refresh occurs. However there's no real need to defer it nor to + // have any should-we-actually-refresh-here logic: we're already doing an expensive force-merge operation at the user's + // request and therefore don't expect any further writes so we may as well do the final refresh immediately and get it + // out of the way. + refresh("force-merge"); } } finally { store.decRef(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/RamAccountingRefreshListener.java b/server/src/main/java/org/elasticsearch/index/engine/RamAccountingRefreshListener.java deleted file mode 100644 index 0d5c89a7479a2..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/RamAccountingRefreshListener.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.indices.breaker.CircuitBreakerService; - -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.function.BiConsumer; - -/** - * A refresh listener that tracks the amount of memory used by segments in the accounting circuit breaker. - */ -final class RamAccountingRefreshListener implements BiConsumer { - - private final CircuitBreakerService breakerService; - - RamAccountingRefreshListener(CircuitBreakerService breakerService) { - this.breakerService = breakerService; - } - - @Override - public void accept(ElasticsearchDirectoryReader reader, ElasticsearchDirectoryReader previousReader) { - final CircuitBreaker breaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - - // Construct a list of the previous segment readers, we only want to track memory used - // by new readers, so these will be exempted from the circuit breaking accounting. - // - // The Core CacheKey is used as the key for the set so that deletions still keep the correct - // accounting, as using the Reader or Reader's CacheKey causes incorrect accounting. - final Set prevReaders; - if (previousReader == null) { - prevReaders = Collections.emptySet(); - } else { - final List previousReaderLeaves = previousReader.leaves(); - prevReaders = new HashSet<>(previousReaderLeaves.size()); - for (LeafReaderContext lrc : previousReaderLeaves) { - prevReaders.add(Lucene.segmentReader(lrc.reader()).getCoreCacheHelper().getKey()); - } - } - - for (LeafReaderContext lrc : reader.leaves()) { - final SegmentReader segmentReader = Lucene.segmentReader(lrc.reader()); - // don't add the segment's memory unless it is not referenced by the previous reader - // (only new segments) - if (prevReaders.contains(segmentReader.getCoreCacheHelper().getKey()) == false) { - final long ramBytesUsed = segmentReader.ramBytesUsed(); - // add the segment memory to the breaker (non-breaking) - breaker.addWithoutBreaking(ramBytesUsed); - // and register a listener for when the segment is closed to decrement the - // breaker accounting - segmentReader.getCoreCacheHelper().addClosedListener(k -> breaker.addWithoutBreaking(-ramBytesUsed)); - } - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 2f9bb8acf8f43..54c10dbef9449 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -69,7 +69,6 @@ public class ReadOnlyEngine extends Engine { private final ElasticsearchReaderManager readerManager; private final IndexCommit indexCommit; private final Lock indexWriterLock; - private final RamAccountingRefreshListener refreshListener; private final SafeCommitInfo safeCommitInfo; private final CompletionStatsCache completionStatsCache; private final boolean requireCompleteHistory; @@ -96,7 +95,6 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats Function readerWrapperFunction, boolean requireCompleteHistory, boolean lazilyLoadSoftDeletes) { super(config); - this.refreshListener = new RamAccountingRefreshListener(engineConfig.getCircuitBreakerService()); this.requireCompleteHistory = requireCompleteHistory; try { Store store = config.getStore(); @@ -119,7 +117,7 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory); this.lazilyLoadSoftDeletes = lazilyLoadSoftDeletes; reader = wrapReader(open(indexCommit), readerWrapperFunction); - readerManager = new ElasticsearchReaderManager(reader, refreshListener); + readerManager = new ElasticsearchReaderManager(reader); assert translogStats != null || obtainLock : "mutiple translogs instances should not be opened at the same time"; this.translogStats = translogStats != null ? translogStats : translogStats(config, lastCommittedSegmentInfos); this.indexWriterLock = indexWriterLock; @@ -249,7 +247,7 @@ private static TranslogStats translogStats(final EngineConfig config, final Segm @Override public GetResult get(Get get, MappingLookup mappingLookup, DocumentParser documentParser, Function searcherWrapper) { - return getFromSearcher(get, acquireSearcher("get", SearcherScope.EXTERNAL, searcherWrapper)); + return getFromSearcher(get, acquireSearcher("get", SearcherScope.EXTERNAL, searcherWrapper), false); } @Override @@ -487,10 +485,6 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { } - protected void processReader(ElasticsearchDirectoryReader reader) { - refreshListener.accept(reader, null); - } - @Override public boolean refreshNeeded() { return false; @@ -566,7 +560,7 @@ public ShardLongFieldRange getRawFieldRange(String field) throws IOException { @Override public SearcherSupplier acquireSearcherSupplier(Function wrapper, SearcherScope scope) throws EngineException { final SearcherSupplier delegate = super.acquireSearcherSupplier(wrapper, scope); - return new SearcherSupplier(Function.identity()) { + return new SearcherSupplier(wrapper) { @Override protected void doClose() { delegate.close(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 750dabf187bf1..d4efb5eacb1ab 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; +import org.elasticsearch.search.internal.FilterStoredFieldVisitor; import java.io.IOException; import java.util.Arrays; @@ -257,47 +258,6 @@ public StoredFieldsReader clone() { } - private static class FilterStoredFieldVisitor extends StoredFieldVisitor { - private final StoredFieldVisitor visitor; - - FilterStoredFieldVisitor(StoredFieldVisitor visitor) { - this.visitor = visitor; - } - - @Override - public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { - visitor.binaryField(fieldInfo, value); - } - - @Override - public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { - visitor.stringField(fieldInfo, value); - } - - @Override - public void intField(FieldInfo fieldInfo, int value) throws IOException { - visitor.intField(fieldInfo, value); - } - - @Override - public void longField(FieldInfo fieldInfo, long value) throws IOException { - visitor.longField(fieldInfo, value); - } - - @Override - public void floatField(FieldInfo fieldInfo, float value) throws IOException { - visitor.floatField(fieldInfo, value); - } - - @Override - public void doubleField(FieldInfo fieldInfo, double value) throws IOException { - visitor.doubleField(fieldInfo, value); - } - - @Override - public Status needsField(FieldInfo fieldInfo) throws IOException { - return visitor.needsField(fieldInfo); - } - } } + } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index ae00dbb8eaffa..d04bf41aa0971 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -17,6 +17,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.elasticsearch.core.Nullable; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +43,6 @@ public class Segment implements Writeable { public org.apache.lucene.util.Version version = null; public Boolean compound = null; public String mergeId; - public long memoryInBytes; public Sort segmentSort; public Accountable ramTree = null; public Map attributes; @@ -58,7 +58,9 @@ public Segment(StreamInput in) throws IOException { version = Lucene.parseVersionLenient(in.readOptionalString(), null); compound = in.readOptionalBoolean(); mergeId = in.readOptionalString(); - memoryInBytes = in.readLong(); + if (in.getVersion().before(Version.V_8_0_0)) { + in.readLong(); // memoryInBytes + } if (in.readBoolean()) { // verbose mode ramTree = readRamTree(in); @@ -122,13 +124,6 @@ public String getMergeId() { return this.mergeId; } - /** - * Estimation of the memory usage used by a segment. - */ - public long getMemoryInBytes() { - return this.memoryInBytes; - } - /** * Return the sort order of this segment, or null if the segment has no sort. */ @@ -171,7 +166,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(version.toString()); out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); - out.writeLong(memoryInBytes); + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeLong(0); // memoryInBytes + } boolean verbose = ramTree != null; out.writeBoolean(verbose); @@ -315,7 +312,6 @@ public String toString() { ", version='" + version + '\'' + ", compound=" + compound + ", mergeId='" + mergeId + '\'' + - ", memoryInBytes=" + memoryInBytes + (segmentSort != null ? ", sort=" + segmentSort : "") + ", attributes=" + attributes + '}'; diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 624fcd16a062b..75a292790b1cf 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -26,13 +26,6 @@ public class SegmentsStats implements Writeable, ToXContentFragment { private long count; - private long memoryInBytes; - private long termsMemoryInBytes; - private long storedFieldsMemoryInBytes; - private long termVectorsMemoryInBytes; - private long normsMemoryInBytes; - private long pointsMemoryInBytes; - private long docValuesMemoryInBytes; private long indexWriterMemoryInBytes; private long versionMapMemoryInBytes; private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; @@ -44,13 +37,15 @@ public SegmentsStats() { public SegmentsStats(StreamInput in) throws IOException { count = in.readVLong(); - memoryInBytes = in.readLong(); - termsMemoryInBytes = in.readLong(); - storedFieldsMemoryInBytes = in.readLong(); - termVectorsMemoryInBytes = in.readLong(); - normsMemoryInBytes = in.readLong(); - pointsMemoryInBytes = in.readLong(); - docValuesMemoryInBytes = in.readLong(); + if (in.getVersion().before(Version.V_8_0_0)) { + in.readLong(); // memoryInBytes + in.readLong(); // termsMemoryInBytes + in.readLong(); // storedFieldsMemoryInBytes + in.readLong(); // termVectorsMemoryInBytes + in.readLong(); // normsMemoryInBytes + in.readLong(); // pointsMemoryInBytes + in.readLong(); // docValuesMemoryInBytes + } indexWriterMemoryInBytes = in.readLong(); versionMapMemoryInBytes = in.readLong(); bitsetMemoryInBytes = in.readLong(); @@ -65,33 +60,8 @@ public SegmentsStats(StreamInput in) throws IOException { this.files = files.build(); } - public void add(long count, long memoryInBytes) { + public void add(long count) { this.count += count; - this.memoryInBytes += memoryInBytes; - } - - public void addTermsMemoryInBytes(long termsMemoryInBytes) { - this.termsMemoryInBytes += termsMemoryInBytes; - } - - public void addStoredFieldsMemoryInBytes(long storedFieldsMemoryInBytes) { - this.storedFieldsMemoryInBytes += storedFieldsMemoryInBytes; - } - - public void addTermVectorsMemoryInBytes(long termVectorsMemoryInBytes) { - this.termVectorsMemoryInBytes += termVectorsMemoryInBytes; - } - - public void addNormsMemoryInBytes(long normsMemoryInBytes) { - this.normsMemoryInBytes += normsMemoryInBytes; - } - - public void addPointsMemoryInBytes(long pointsMemoryInBytes) { - this.pointsMemoryInBytes += pointsMemoryInBytes; - } - - public void addDocValuesMemoryInBytes(long docValuesMemoryInBytes) { - this.docValuesMemoryInBytes += docValuesMemoryInBytes; } public void addIndexWriterMemoryInBytes(long indexWriterMemoryInBytes) { @@ -129,13 +99,7 @@ public void add(SegmentsStats mergeStats) { return; } updateMaxUnsafeAutoIdTimestamp(mergeStats.maxUnsafeAutoIdTimestamp); - add(mergeStats.count, mergeStats.memoryInBytes); - addTermsMemoryInBytes(mergeStats.termsMemoryInBytes); - addStoredFieldsMemoryInBytes(mergeStats.storedFieldsMemoryInBytes); - addTermVectorsMemoryInBytes(mergeStats.termVectorsMemoryInBytes); - addNormsMemoryInBytes(mergeStats.normsMemoryInBytes); - addPointsMemoryInBytes(mergeStats.pointsMemoryInBytes); - addDocValuesMemoryInBytes(mergeStats.docValuesMemoryInBytes); + add(mergeStats.count); addIndexWriterMemoryInBytes(mergeStats.indexWriterMemoryInBytes); addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes); addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes); @@ -149,83 +113,6 @@ public long getCount() { return this.count; } - /** - * Estimation of the memory usage used by a segment. - */ - public long getMemoryInBytes() { - return this.memoryInBytes; - } - - public ByteSizeValue getMemory() { - return new ByteSizeValue(memoryInBytes); - } - - /** - * Estimation of the terms dictionary memory usage by a segment. - */ - public long getTermsMemoryInBytes() { - return this.termsMemoryInBytes; - } - - private ByteSizeValue getTermsMemory() { - return new ByteSizeValue(termsMemoryInBytes); - } - - /** - * Estimation of the stored fields memory usage by a segment. - */ - public long getStoredFieldsMemoryInBytes() { - return this.storedFieldsMemoryInBytes; - } - - private ByteSizeValue getStoredFieldsMemory() { - return new ByteSizeValue(storedFieldsMemoryInBytes); - } - - /** - * Estimation of the term vectors memory usage by a segment. - */ - public long getTermVectorsMemoryInBytes() { - return this.termVectorsMemoryInBytes; - } - - private ByteSizeValue getTermVectorsMemory() { - return new ByteSizeValue(termVectorsMemoryInBytes); - } - - /** - * Estimation of the norms memory usage by a segment. - */ - public long getNormsMemoryInBytes() { - return this.normsMemoryInBytes; - } - - private ByteSizeValue getNormsMemory() { - return new ByteSizeValue(normsMemoryInBytes); - } - - /** - * Estimation of the points memory usage by a segment. - */ - public long getPointsMemoryInBytes() { - return this.pointsMemoryInBytes; - } - - private ByteSizeValue getPointsMemory() { - return new ByteSizeValue(pointsMemoryInBytes); - } - - /** - * Estimation of the doc values memory usage by a segment. - */ - public long getDocValuesMemoryInBytes() { - return this.docValuesMemoryInBytes; - } - - private ByteSizeValue getDocValuesMemory() { - return new ByteSizeValue(docValuesMemoryInBytes); - } - /** * Estimation of the memory usage by index writer */ @@ -275,13 +162,14 @@ public long getMaxUnsafeAutoIdTimestamp() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENTS); builder.field(Fields.COUNT, count); - builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, getMemory()); - builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, getTermsMemory()); - builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, getStoredFieldsMemory()); - builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, getTermVectorsMemory()); - builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, getNormsMemory()); - builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, getPointsMemory()); - builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, getDocValuesMemory()); + final ByteSizeValue zeroBytes = new ByteSizeValue(0); + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, zeroBytes); + builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, zeroBytes); + builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, zeroBytes); + builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, zeroBytes); + builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, zeroBytes); + builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, zeroBytes); + builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, zeroBytes); builder.humanReadableField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, getIndexWriterMemory()); builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory()); builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); @@ -325,13 +213,15 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); - out.writeLong(memoryInBytes); - out.writeLong(termsMemoryInBytes); - out.writeLong(storedFieldsMemoryInBytes); - out.writeLong(termVectorsMemoryInBytes); - out.writeLong(normsMemoryInBytes); - out.writeLong(pointsMemoryInBytes); - out.writeLong(docValuesMemoryInBytes); + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeLong(0L); // memoryInBytes + out.writeLong(0L); // termsMemoryInBytes + out.writeLong(0L); // storedFieldsMemoryInBytes + out.writeLong(0L); // termVectorsMemoryInBytes + out.writeLong(0L); // normsMemoryInBytes + out.writeLong(0L); // pointsMemoryInBytes + out.writeLong(0L); // docValuesMemoryInBytes + } out.writeLong(indexWriterMemoryInBytes); out.writeLong(versionMapMemoryInBytes); out.writeLong(bitsetMemoryInBytes); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SingleDocDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/SingleDocDirectoryReader.java deleted file mode 100644 index 00262b9684e71..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/SingleDocDirectoryReader.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.BinaryDocValues; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.LeafMetaData; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.Terms; -import org.apache.lucene.store.ByteBuffersDirectory; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.index.mapper.DocumentParser; -import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; - -/** - * A {@link DirectoryReader} contains a single leaf reader delegating to an in-memory Lucene segment that is lazily created from - * a single document. - */ -final class SingleDocDirectoryReader extends DirectoryReader { - private final SingleDocLeafReader leafReader; - - SingleDocDirectoryReader(ShardId shardId, Translog.Index operation, MappingLookup mappingLookup, DocumentParser documentParser, - Analyzer analyzer) throws IOException { - this(new SingleDocLeafReader(shardId, operation, mappingLookup, documentParser, analyzer)); - } - - private SingleDocDirectoryReader(SingleDocLeafReader leafReader) throws IOException { - super(leafReader.directory, new LeafReader[]{leafReader}, null); - this.leafReader = leafReader; - } - - boolean assertMemorySegmentStatus(boolean loaded) { - return leafReader.assertMemorySegmentStatus(loaded); - } - - private static UnsupportedOperationException unsupported() { - assert false : "unsupported operation"; - return new UnsupportedOperationException(); - } - - @Override - protected DirectoryReader doOpenIfChanged() { - throw unsupported(); - } - - @Override - protected DirectoryReader doOpenIfChanged(IndexCommit commit) { - throw unsupported(); - } - - @Override - protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) { - throw unsupported(); - } - - @Override - public long getVersion() { - throw unsupported(); - } - - @Override - public boolean isCurrent() { - throw unsupported(); - } - - @Override - public IndexCommit getIndexCommit() { - throw unsupported(); - } - - @Override - protected void doClose() throws IOException { - leafReader.close(); - } - - @Override - public CacheHelper getReaderCacheHelper() { - return leafReader.getReaderCacheHelper(); - } - - private static class SingleDocLeafReader extends LeafReader { - - private final ShardId shardId; - private final Translog.Index operation; - private final MappingLookup mappingLookup; - private final DocumentParser documentParser; - private final Analyzer analyzer; - private final Directory directory; - private final AtomicReference delegate = new AtomicReference<>(); - - SingleDocLeafReader(ShardId shardId, Translog.Index operation, MappingLookup mappingLookup, DocumentParser documentParser, - Analyzer analyzer) { - this.shardId = shardId; - this.operation = operation; - this.mappingLookup = mappingLookup; - this.documentParser = documentParser; - this.analyzer = analyzer; - this.directory = new ByteBuffersDirectory(); - } - - private LeafReader getDelegate() { - ensureOpen(); - LeafReader reader = delegate.get(); - if (reader == null) { - synchronized (this) { - reader = delegate.get(); - if (reader == null) { - reader = createInMemoryLeafReader(); - final LeafReader existing = delegate.getAndSet(reader); - assert existing == null; - } - } - } - return reader; - } - - private LeafReader createInMemoryLeafReader() { - assert Thread.holdsLock(this); - final ParsedDocument parsedDocs = documentParser.parseDocument(new SourceToParse(shardId.getIndexName(), operation.id(), - operation.source(), XContentHelper.xContentType(operation.source()), operation.routing(), Map.of()), mappingLookup); - - parsedDocs.updateSeqID(operation.seqNo(), operation.primaryTerm()); - parsedDocs.version().setLongValue(operation.version()); - final IndexWriterConfig writeConfig = new IndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE); - try (IndexWriter writer = new IndexWriter(directory, writeConfig)) { - writer.addDocument(parsedDocs.rootDoc()); - final DirectoryReader reader = open(writer); - if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != 1) { - reader.close(); - throw new IllegalStateException("Expected a single document segment; " + - "but [" + reader.leaves().size() + " segments with " + reader.leaves().get(0).reader().numDocs() + " documents"); - } - return reader.leaves().get(0).reader(); - } catch (IOException e) { - throw new EngineException(shardId, "failed to create an in-memory segment for get [" + operation.id() + "]", e); - } - } - - @Override - public CacheHelper getCoreCacheHelper() { - return getDelegate().getCoreCacheHelper(); - } - - @Override - public CacheHelper getReaderCacheHelper() { - return getDelegate().getReaderCacheHelper(); - } - - @Override - public Terms terms(String field) throws IOException { - return getDelegate().terms(field); - } - - @Override - public NumericDocValues getNumericDocValues(String field) throws IOException { - return getDelegate().getNumericDocValues(field); - } - - @Override - public BinaryDocValues getBinaryDocValues(String field) throws IOException { - return getDelegate().getBinaryDocValues(field); - } - - @Override - public SortedDocValues getSortedDocValues(String field) throws IOException { - return getDelegate().getSortedDocValues(field); - } - - @Override - public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException { - return getDelegate().getSortedNumericDocValues(field); - } - - @Override - public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { - return getDelegate().getSortedSetDocValues(field); - } - - @Override - public NumericDocValues getNormValues(String field) throws IOException { - return getDelegate().getNormValues(field); - } - - @Override - public FieldInfos getFieldInfos() { - return getDelegate().getFieldInfos(); - } - - @Override - public Bits getLiveDocs() { - return getDelegate().getLiveDocs(); - } - - @Override - public PointValues getPointValues(String field) throws IOException { - return getDelegate().getPointValues(field); - } - - @Override - public void checkIntegrity() throws IOException { - } - - @Override - public LeafMetaData getMetaData() { - return getDelegate().getMetaData(); - } - - @Override - public Fields getTermVectors(int docID) throws IOException { - return getDelegate().getTermVectors(docID); - } - - @Override - public int numDocs() { - return 1; - } - - @Override - public int maxDoc() { - return 1; - } - - synchronized boolean assertMemorySegmentStatus(boolean loaded) { - if (loaded) { - assert delegate.get() != null : - "Expected an in memory segment was loaded; but it wasn't. Please check the reader wrapper implementation"; - } else { - assert delegate.get() == null : - "Expected an in memory segment wasn't loaded; but it was. Please check the reader wrapper implementation"; - } - return true; - } - - @Override - public void document(int docID, StoredFieldVisitor visitor) throws IOException { - assert assertMemorySegmentStatus(true); - getDelegate().document(docID, visitor); - } - - @Override - protected void doClose() throws IOException { - IOUtils.close(delegate.get(), directory); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java new file mode 100644 index 0000000000000..d14af5a264d37 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -0,0 +1,579 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafMetaData; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.fieldvisitor.FieldNamesProvidingStoredFieldsVisitor; +import org.elasticsearch.index.mapper.DocumentParser; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A {@link DirectoryReader} that contains a single translog indexing operation. + * This can be used during a realtime get to access documents that haven't been refreshed yet. + * In the normal case, all information relevant to resolve the realtime get is mocked out + * to provide fast access to _id and _source. In case where more values are requested + * (e.g. access to other stored fields) etc., this reader will index the document + * into an in-memory Lucene segment that is created on-demand. + */ +final class TranslogDirectoryReader extends DirectoryReader { + private final TranslogLeafReader leafReader; + + TranslogDirectoryReader(ShardId shardId, Translog.Index operation, MappingLookup mappingLookup, DocumentParser documentParser, + Analyzer analyzer, Runnable onSegmentCreated) throws IOException { + this(new TranslogLeafReader(shardId, operation, mappingLookup, documentParser, analyzer, onSegmentCreated)); + } + + private TranslogDirectoryReader(TranslogLeafReader leafReader) throws IOException { + super(leafReader.directory, new LeafReader[]{leafReader}, null); + this.leafReader = leafReader; + } + + private static UnsupportedOperationException unsupported() { + assert false : "unsupported operation"; + return new UnsupportedOperationException(); + } + + public TranslogLeafReader getLeafReader() { + return leafReader; + } + + @Override + protected DirectoryReader doOpenIfChanged() { + throw unsupported(); + } + + @Override + protected DirectoryReader doOpenIfChanged(IndexCommit commit) { + throw unsupported(); + } + + @Override + protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) { + throw unsupported(); + } + + @Override + public long getVersion() { + throw unsupported(); + } + + @Override + public boolean isCurrent() { + throw unsupported(); + } + + @Override + public IndexCommit getIndexCommit() { + throw unsupported(); + } + + @Override + protected void doClose() throws IOException { + leafReader.close(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return leafReader.getReaderCacheHelper(); + } + + private static class TranslogLeafReader extends LeafReader { + + private static final FieldInfo FAKE_SOURCE_FIELD + = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, + DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); + private static final FieldInfo FAKE_ROUTING_FIELD + = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, + DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); + private static final FieldInfo FAKE_ID_FIELD + = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.DOCS, + DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); + private static Set TRANSLOG_FIELD_NAMES = + Sets.newHashSet(SourceFieldMapper.NAME, RoutingFieldMapper.NAME, IdFieldMapper.NAME); + + + private final ShardId shardId; + private final Translog.Index operation; + private final MappingLookup mappingLookup; + private final DocumentParser documentParser; + private final Analyzer analyzer; + private final Directory directory; + private final Runnable onSegmentCreated; + + private final AtomicReference delegate = new AtomicReference<>(); + private final BytesRef uid; + + TranslogLeafReader(ShardId shardId, Translog.Index operation, MappingLookup mappingLookup, DocumentParser documentParser, + Analyzer analyzer, Runnable onSegmentCreated) { + this.shardId = shardId; + this.operation = operation; + this.mappingLookup = mappingLookup; + this.documentParser = documentParser; + this.analyzer = analyzer; + this.onSegmentCreated = onSegmentCreated; + this.directory = new ByteBuffersDirectory(); + this.uid = Uid.encodeId(operation.id()); + } + + private LeafReader getDelegate() { + ensureOpen(); + LeafReader reader = delegate.get(); + if (reader == null) { + synchronized (this) { + ensureOpen(); + reader = delegate.get(); + if (reader == null) { + reader = createInMemoryLeafReader(); + final LeafReader existing = delegate.getAndSet(reader); + assert existing == null; + onSegmentCreated.run(); + } + } + } + return reader; + } + + private LeafReader createInMemoryLeafReader() { + assert Thread.holdsLock(this); + final ParsedDocument parsedDocs = documentParser.parseDocument(new SourceToParse(shardId.getIndexName(), operation.id(), + operation.source(), XContentHelper.xContentType(operation.source()), operation.routing(), Map.of()), mappingLookup); + + parsedDocs.updateSeqID(operation.seqNo(), operation.primaryTerm()); + parsedDocs.version().setLongValue(operation.version()); + final IndexWriterConfig writeConfig = new IndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE); + try (IndexWriter writer = new IndexWriter(directory, writeConfig)) { + writer.addDocument(parsedDocs.rootDoc()); + final DirectoryReader reader = open(writer); + if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != 1) { + reader.close(); + throw new IllegalStateException("Expected a single document segment; " + + "but [" + reader.leaves().size() + " segments with " + reader.leaves().get(0).reader().numDocs() + " documents"); + } + return reader.leaves().get(0).reader(); + } catch (IOException e) { + throw new EngineException(shardId, "failed to create an in-memory segment for get [" + operation.id() + "]", e); + } + } + + @Override + public CacheHelper getCoreCacheHelper() { + return getDelegate().getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; + } + + @Override + public Terms terms(String field) throws IOException { + if (delegate.get() == null) { + // override this for VersionsAndSeqNoResolver + if (field.equals(IdFieldMapper.NAME)) { + return new FakeTerms(uid); + } + } + return getDelegate().terms(field); + } + + @Override + public NumericDocValues getNumericDocValues(String field) throws IOException { + if (delegate.get() == null) { + // override this for VersionsAndSeqNoResolver + if (field.equals(VersionFieldMapper.NAME)) { + return new FakeNumericDocValues(operation.version()); + } + if (field.equals(SeqNoFieldMapper.NAME)) { + return new FakeNumericDocValues(operation.seqNo()); + } + if (field.equals(SeqNoFieldMapper.PRIMARY_TERM_NAME)) { + return new FakeNumericDocValues(operation.primaryTerm()); + } + } + return getDelegate().getNumericDocValues(field); + } + + @Override + public BinaryDocValues getBinaryDocValues(String field) throws IOException { + return getDelegate().getBinaryDocValues(field); + } + + @Override + public SortedDocValues getSortedDocValues(String field) throws IOException { + return getDelegate().getSortedDocValues(field); + } + + @Override + public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException { + return getDelegate().getSortedNumericDocValues(field); + } + + @Override + public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { + return getDelegate().getSortedSetDocValues(field); + } + + @Override + public NumericDocValues getNormValues(String field) throws IOException { + return getDelegate().getNormValues(field); + } + + @Override + public FieldInfos getFieldInfos() { + return getDelegate().getFieldInfos(); + } + + @Override + public Bits getLiveDocs() { + return null; + } + + @Override + public PointValues getPointValues(String field) throws IOException { + return getDelegate().getPointValues(field); + } + + @Override + public void checkIntegrity() throws IOException { + } + + @Override + public LeafMetaData getMetaData() { + return getDelegate().getMetaData(); + } + + @Override + public Fields getTermVectors(int docID) throws IOException { + return getDelegate().getTermVectors(docID); + } + + @Override + public int numDocs() { + return 1; + } + + @Override + public int maxDoc() { + return 1; + } + + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + assert docID == 0; + if (docID != 0) { + throw new IllegalArgumentException("no such doc ID " + docID); + } + if (delegate.get() == null) { + if (visitor instanceof FieldNamesProvidingStoredFieldsVisitor) { + // override this for ShardGetService + if (TRANSLOG_FIELD_NAMES.containsAll(((FieldNamesProvidingStoredFieldsVisitor) visitor).getFieldNames())) { + readStoredFieldsDirectly(visitor); + return; + } + } + } + + getDelegate().document(docID, visitor); + } + + private void readStoredFieldsDirectly(StoredFieldVisitor visitor) throws IOException { + if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) { + BytesReference sourceBytes = operation.source(); + assert BytesReference.toBytes(sourceBytes) == sourceBytes.toBytesRef().bytes; + SourceFieldMapper mapper = mappingLookup.getMapping().getMetadataMapperByClass(SourceFieldMapper.class); + if (mapper != null) { + try { + sourceBytes = mapper.applyFilters(sourceBytes, null); + } catch (IOException e) { + throw new IOException("Failed to reapply filters after reading from translog", e); + } + } + if (sourceBytes != null) { + visitor.binaryField(FAKE_SOURCE_FIELD, BytesReference.toBytes(sourceBytes)); + } + } + if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); + } + if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { + final byte[] id = new byte[uid.length]; + System.arraycopy(uid.bytes, uid.offset, id, 0, uid.length); + visitor.binaryField(FAKE_ID_FIELD, id); + } + } + + @Override + protected synchronized void doClose() throws IOException { + IOUtils.close(delegate.get(), directory); + } + } + + private static class FakeTerms extends Terms { + private final BytesRef uid; + + FakeTerms(BytesRef uid) { + this.uid = uid; + } + + @Override + public TermsEnum iterator() throws IOException { + return new FakeTermsEnum(uid); + } + + @Override + public long size() throws IOException { + return 1; + } + + @Override + public long getSumTotalTermFreq() throws IOException { + return 1; + } + + @Override + public long getSumDocFreq() throws IOException { + return 1; + } + + @Override + public int getDocCount() throws IOException { + return 1; + } + + @Override + public boolean hasFreqs() { + return false; + } + + @Override + public boolean hasOffsets() { + return false; + } + + @Override + public boolean hasPositions() { + return false; + } + + @Override + public boolean hasPayloads() { + return false; + } + } + + private static class FakeTermsEnum extends BaseTermsEnum { + private final BytesRef term; + private long position = -1; + + FakeTermsEnum(BytesRef term) { + this.term = term; + } + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + int cmp = text.compareTo(term); + if (cmp == 0) { + position = 0; + return SeekStatus.FOUND; + } else if (cmp < 0) { + position = 0; + return SeekStatus.NOT_FOUND; + } + position = Long.MAX_VALUE; + return SeekStatus.END; + } + + @Override + public void seekExact(long ord) throws IOException { + position = ord; + } + + @Override + public BytesRef term() throws IOException { + assert position == 0; + return term; + } + + @Override + public long ord() throws IOException { + return position; + } + + @Override + public int docFreq() throws IOException { + return 1; + } + + @Override + public long totalTermFreq() throws IOException { + return 1; + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return new FakePostingsEnum(term); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + throw unsupported(); + } + + @Override + public BytesRef next() throws IOException { + return ++position == 0 ? term : null; + } + } + + private static class FakePostingsEnum extends PostingsEnum { + private final BytesRef term; + + private int iter = -1; + + private FakePostingsEnum(BytesRef term) { + this.term = term; + } + + @Override + public int freq() { + return 1; + } + + @Override + public int nextPosition() { + return 0; + } + + @Override + public int startOffset() { + return 0; + } + + @Override + public int endOffset() { + return term.length; + } + + @Override + public BytesRef getPayload() { + return null; + } + + @Override + public int docID() { + return iter > 0 ? NO_MORE_DOCS : iter; + } + + @Override + public int nextDoc() { + return ++iter == 0 ? 0 : NO_MORE_DOCS; + } + + @Override + public int advance(int target) { + int doc; + while ((doc = nextDoc()) < target) { + } + return doc; + } + + @Override + public long cost() { + return 0; + } + } + + private static class FakeNumericDocValues extends NumericDocValues { + private final long value; + private final DocIdSetIterator disi = DocIdSetIterator.all(1); + + FakeNumericDocValues(long value) { + this.value = value; + } + + @Override + public long longValue() { + return value; + } + + @Override + public boolean advanceExact(int target) throws IOException { + return disi.advance(target) == target; + } + + @Override + public int docID() { + return disi.docID(); + } + + @Override + public int nextDoc() throws IOException { + return disi.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + return disi.advance(target); + } + + @Override + public long cost() { + return disi.cost(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java deleted file mode 100644 index 548b25b29e4cd..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.BinaryDocValues; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.LeafMetaData; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.Terms; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.RoutingFieldMapper; -import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.translog.Translog; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.Set; - -/** - * Internal class that mocks a single doc read from the transaction log as a leaf reader. - */ -public final class TranslogLeafReader extends LeafReader { - - private final Translog.Index operation; - private static final FieldInfo FAKE_SOURCE_FIELD - = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, 0, false); - private static final FieldInfo FAKE_ROUTING_FIELD - = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, 0, false); - private static final FieldInfo FAKE_ID_FIELD - = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, 0, false); - public static Set ALL_FIELD_NAMES = Sets.newHashSet(FAKE_SOURCE_FIELD.name, FAKE_ROUTING_FIELD.name, FAKE_ID_FIELD.name); - - TranslogLeafReader(Translog.Index operation) { - this.operation = operation; - } - @Override - public CacheHelper getCoreCacheHelper() { - throw new UnsupportedOperationException(); - } - - @Override - public Terms terms(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public NumericDocValues getNumericDocValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public BinaryDocValues getBinaryDocValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public SortedDocValues getSortedDocValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public SortedNumericDocValues getSortedNumericDocValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public SortedSetDocValues getSortedSetDocValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public NumericDocValues getNormValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public FieldInfos getFieldInfos() { - throw new UnsupportedOperationException(); - } - - @Override - public Bits getLiveDocs() { - throw new UnsupportedOperationException(); - } - - @Override - public PointValues getPointValues(String field) { - throw new UnsupportedOperationException(); - } - - @Override - public void checkIntegrity() { - - } - - @Override - public LeafMetaData getMetaData() { - throw new UnsupportedOperationException(); - } - - @Override - public Fields getTermVectors(int docID) { - throw new UnsupportedOperationException(); - } - - @Override - public int numDocs() { - return 1; - } - - @Override - public int maxDoc() { - return 1; - } - - @Override - public void document(int docID, StoredFieldVisitor visitor) throws IOException { - if (docID != 0) { - throw new IllegalArgumentException("no such doc ID " + docID); - } - if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) { - assert operation.source().toBytesRef().offset == 0; - assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length; - visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes); - } - if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) { - visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); - } - if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { - BytesRef bytesRef = Uid.encodeId(operation.id()); - final byte[] id = new byte[bytesRef.length]; - System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); - visitor.binaryField(FAKE_ID_FIELD, id); - } - } - - @Override - protected void doClose() { - - } - - @Override - public CacheHelper getReaderCacheHelper() { - throw new UnsupportedOperationException(); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index f50fe5f023290..4c3a8fbc5fdbf 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -23,8 +23,8 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.DocValueFormat; @@ -142,17 +142,17 @@ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { } /** Whether missing values should be sorted first. */ - public final boolean sortMissingFirst(Object missingValue) { + public static final boolean sortMissingFirst(Object missingValue) { return "_first".equals(missingValue); } /** Whether missing values should be sorted last, this is the default. */ - public final boolean sortMissingLast(Object missingValue) { + public static final boolean sortMissingLast(Object missingValue) { return missingValue == null || "_last".equals(missingValue); } /** Return the missing object value according to the reduced type of the comparator. */ - public final Object missingObject(Object missingValue, boolean reversed) { + public Object missingObject(Object missingValue, boolean reversed) { if (sortMissingFirst(missingValue) || sortMissingLast(missingValue)) { final boolean min = sortMissingFirst(missingValue) ^ reversed; switch (reducedType()) { @@ -199,7 +199,7 @@ public final Object missingObject(Object missingValue, boolean reversed) { case STRING: case STRING_VAL: if (missingValue instanceof BytesRef) { - return (BytesRef) missingValue; + return missingValue; } else if (missingValue instanceof byte[]) { return new BytesRef((byte[]) missingValue); } else { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java index 72b69215508a3..8dfa8a1e14502 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java @@ -12,9 +12,9 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.search.SortedNumericSortField; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource; @@ -156,16 +156,31 @@ private XFieldComparatorSource comparatorSource( return dateNanosComparatorSource(missingValue, sortMode, nested); default: assert targetNumericType.isFloatingPoint() == false; - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + return new LongValuesComparatorSource(this, missingValue, sortMode, nested, targetNumericType); } } - protected XFieldComparatorSource dateComparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + protected XFieldComparatorSource dateComparatorSource( + @Nullable Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { + return new LongValuesComparatorSource(this, missingValue, sortMode, nested, NumericType.DATE); } - protected XFieldComparatorSource dateNanosComparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - return new LongValuesComparatorSource(this, missingValue, sortMode, nested, dvs -> convertNumeric(dvs, DateUtils::toNanoSeconds)); + protected XFieldComparatorSource dateNanosComparatorSource( + @Nullable Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { + return new LongValuesComparatorSource( + this, + missingValue, + sortMode, + nested, + dvs -> convertNumeric(dvs, DateUtils::toNanoSeconds), + NumericType.DATE_NANOSECONDS + ); } /** diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 3f35a3f8e9609..86d007072197e 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -255,6 +255,10 @@ public abstract static class Geometry extends ScriptDocValues { public abstract GeoBoundingBox getBoundingBox(); /** Returns the centroid of this geometry */ public abstract GeoPoint getCentroid(); + /** Returns the width of the bounding box diagonal in the spherical Mercator projection (meters) */ + public abstract double getMercatorWidth(); + /** Returns the height of the bounding box diagonal in the spherical Mercator projection (meters) */ + public abstract double getMercatorHeight(); } public static final class GeoPoints extends Geometry { @@ -418,6 +422,16 @@ public GeoPoint getCentroid() { return size() == 0 ? null : centroid; } + @Override + public double getMercatorWidth() { + return 0; + } + + @Override + public double getMercatorHeight() { + return 0; + } + @Override public GeoBoundingBox getBoundingBox() { return size() == 0 ? null : boundingBox; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 6c144a914e2b5..d237819502006 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -16,12 +16,14 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.BitSet; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.fielddata.LeafNumericFieldData; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; @@ -38,18 +40,20 @@ public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorS private final IndexNumericFieldData indexFieldData; private final Function converter; + private final NumericType targetNumericType; public LongValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, - MultiValueMode sortMode, Nested nested) { - this(indexFieldData, missingValue, sortMode, nested, null); + MultiValueMode sortMode, Nested nested, NumericType targetNumericType) { + this(indexFieldData, missingValue, sortMode, nested, null, targetNumericType); } public LongValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, Nested nested, - Function converter) { + Function converter, NumericType targetNumericType) { super(missingValue, sortMode, nested); this.indexFieldData = indexFieldData; this.converter = converter; + this.targetNumericType = targetNumericType; } @Override @@ -128,4 +132,16 @@ protected long docValue() { } }; } + + @Override + public Object missingObject(Object missingValue, boolean reversed) { + if (targetNumericType == NumericType.DATE_NANOSECONDS) { + // special case to prevent negative values that would cause invalid nanosecond ranges + if (sortMissingFirst(missingValue) || sortMissingLast(missingValue)) { + final boolean min = sortMissingFirst(missingValue) ^ reversed; + return min ? 0L : DateUtils.MAX_NANOSECOND; + } + } + return super.missingObject(missingValue, reversed); + } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java index c0ed3ddf00609..22c745b5c3af0 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java @@ -95,7 +95,7 @@ public SortedSetDocValues ordinals(ValuesHolder values) { if (multiValued) { return new MultiDocs(this, values); } else { - return (SortedSetDocValues) DocValues.singleton(new SingleDocs(this, values)); + return DocValues.singleton(new SingleDocs(this, values)); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java index 2e0a6eee2b152..308fb8602bc74 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java @@ -49,7 +49,7 @@ public Collection getChildResources() { @Override public SortedSetDocValues ordinals(ValuesHolder values) { - return (SortedSetDocValues) DocValues.singleton(new Docs(this, values)); + return DocValues.singleton(new Docs(this, values)); } private static class Docs extends AbstractSortedDocValues { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index e674971daedc9..000ecd21edcd9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -31,7 +31,7 @@ import java.util.function.Function; public abstract class AbstractIndexOrdinalsFieldData implements IndexOrdinalsFieldData { - private static final Logger logger = LogManager.getLogger(AbstractBinaryDVLeafFieldData.class); + private static final Logger logger = LogManager.getLogger(AbstractIndexOrdinalsFieldData.class); private final String fieldName; private final ValuesSourceType valuesSourceType; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericIndexFieldData.java index af4c3e9197be6..a3c7e649ad100 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericIndexFieldData.java @@ -87,23 +87,37 @@ protected boolean sortRequiresCustomComparator() { } @Override - protected XFieldComparatorSource dateComparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { + protected XFieldComparatorSource dateComparatorSource( + Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { if (numericType == NumericType.DATE_NANOSECONDS) { // converts date_nanos values to millisecond resolution return new LongValuesComparatorSource(this, missingValue, - sortMode, nested, dvs -> convertNumeric(dvs, DateUtils::toMilliSeconds)); + sortMode, nested, dvs -> convertNumeric(dvs, DateUtils::toMilliSeconds), NumericType.DATE); } - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + return new LongValuesComparatorSource(this, missingValue, sortMode, nested, NumericType.DATE); } @Override - protected XFieldComparatorSource dateNanosComparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { + protected XFieldComparatorSource dateNanosComparatorSource( + Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { if (numericType == NumericType.DATE) { // converts date values to nanosecond resolution - return new LongValuesComparatorSource(this, missingValue, - sortMode, nested, dvs -> convertNumeric(dvs, DateUtils::toNanoSeconds)); - } - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + return new LongValuesComparatorSource( + this, + missingValue, + sortMode, + nested, + dvs -> convertNumeric(dvs, DateUtils::toNanoSeconds), + NumericType.DATE_NANOSECONDS + ); + } + return new LongValuesComparatorSource(this, missingValue, sortMode, nested, NumericType.DATE_NANOSECONDS); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java index afac4c68aa9da..8b394c35ea107 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java @@ -14,8 +14,8 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -31,6 +31,9 @@ import java.util.function.Function; +import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.sortMissingFirst; +import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.sortMissingLast; + public class SortedSetOrdinalsIndexFieldData extends AbstractIndexOrdinalsFieldData { public static class Builder implements IndexFieldData.Builder { @@ -76,12 +79,12 @@ public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMod */ if (nested != null || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) || - (source.sortMissingLast(missingValue) == false && source.sortMissingFirst(missingValue) == false)) { + (sortMissingLast(missingValue) == false && sortMissingFirst(missingValue) == false)) { return new SortField(getFieldName(), source, reverse); } SortField sortField = new SortedSetSortField(getFieldName(), reverse, sortMode == MultiValueMode.MAX ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); - sortField.setMissingValue(source.sortMissingLast(missingValue) ^ reverse ? + sortField.setMissingValue(sortMissingLast(missingValue) ^ reverse ? SortedSetSortField.STRING_LAST : SortedSetSortField.STRING_FIRST); return sortField; } diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java index a4311826b3a06..811ed8aa7bb7f 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java @@ -9,6 +9,7 @@ import org.apache.lucene.index.FieldInfo; +import java.util.HashSet; import java.util.Set; /** @@ -26,6 +27,13 @@ public CustomFieldsVisitor(Set fields, boolean loadSource) { this.fields = fields; } + @Override + public Set getFieldNames() { + Set fields = new HashSet<>(super.getFieldNames()); + fields.addAll(this.fields); + return fields; + } + @Override public Status needsField(FieldInfo fieldInfo) { if (super.needsField(fieldInfo) == Status.YES) { diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java new file mode 100644 index 0000000000000..fc3b07fd7d19e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.fieldvisitor; + +import org.apache.lucene.index.StoredFieldVisitor; + +import java.util.Set; + +/** + * Stored fields visitor which provides information about the field names that will be requested + */ +public abstract class FieldNamesProvidingStoredFieldsVisitor extends StoredFieldVisitor { + public abstract Set getFieldNames(); +} diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 7a9baa57a6bb4..1b3551bbddab3 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -35,7 +35,7 @@ /** * Base {@link StoredFieldVisitor} that retrieves all non-redundant metadata. */ -public class FieldsVisitor extends StoredFieldVisitor { +public class FieldsVisitor extends FieldNamesProvidingStoredFieldsVisitor { private static final Set BASE_REQUIRED_FIELDS = unmodifiableSet(newHashSet( IdFieldMapper.NAME, RoutingFieldMapper.NAME)); @@ -76,6 +76,11 @@ public Status needsField(FieldInfo fieldInfo) { : Status.NO; } + @Override + public Set getFieldNames() { + return requiredFields; + } + public final void postProcess(Function fieldTypeLookup) { for (Map.Entry> entry : fields().entrySet()) { MappedFieldType fieldType = fieldTypeLookup.apply(entry.getKey()); diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index a317fc8922c96..936ef7df165de 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -8,16 +8,8 @@ package org.elasticsearch.index.get; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.IndexableFieldType; -import org.apache.lucene.index.StoredFieldVisitor; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; @@ -28,30 +20,27 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.TranslogLeafReader; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -158,7 +147,6 @@ private GetResult innerGet(String id, String[] gFields, boolean realtime, long v Engine.GetResult get = indexShard.get(new Engine.Get(realtime, realtime, id) .version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm)); - assert get.isFromTranslog() == false || realtime : "should only read from translog if realtime enabled"; if (get.exists() == false) { get.close(); } @@ -197,11 +185,7 @@ private GetResult innerGetLoadFromStoredFields(String id, String[] storedFields, Map metadataFields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); - // force fetching source if we read from translog and need to recreate stored fields - boolean forceSourceForComputingTranslogStoredFields = get.isFromTranslog() && storedFields != null && - Stream.of(storedFields).anyMatch(f -> TranslogLeafReader.ALL_FIELD_NAMES.contains(f) == false); - FieldsVisitor fieldVisitor = buildFieldsVisitors(storedFields, - forceSourceForComputingTranslogStoredFields ? FetchSourceContext.FETCH_SOURCE : fetchSourceContext); + FieldsVisitor fieldVisitor = buildFieldsVisitors(storedFields, fetchSourceContext); if (fieldVisitor != null) { try { docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); @@ -210,54 +194,6 @@ private GetResult innerGetLoadFromStoredFields(String id, String[] storedFields, } source = fieldVisitor.source(); - // in case we read from translog, some extra steps are needed to make _source consistent and to load stored fields - if (get.isFromTranslog()) { - // Fast path: if only asked for the source or stored fields that have been already provided by TranslogLeafReader, - // just make source consistent by reapplying source filters from mapping (possibly also nulling the source) - if (forceSourceForComputingTranslogStoredFields == false) { - try { - source = indexShard.mapperService().documentMapper().sourceMapper().applyFilters(source, null); - } catch (IOException e) { - throw new ElasticsearchException("Failed to reapply filters for [" + id + "] after reading from translog", e); - } - } else { - // Slow path: recreate stored fields from original source - assert source != null : "original source in translog must exist"; - SourceToParse sourceToParse = new SourceToParse(shardId.getIndexName(), id, source, XContentHelper.xContentType(source), - fieldVisitor.routing(), Map.of()); - MapperService mapperService = indexShard.mapperService(); - ParsedDocument doc = mapperService.documentParser().parseDocument(sourceToParse, mapperService.mappingLookup()); - assert doc.dynamicMappingsUpdate() == null : "mapping updates should not be required on already-indexed doc"; - // update special fields - doc.updateSeqID(docIdAndVersion.seqNo, docIdAndVersion.primaryTerm); - doc.version().setLongValue(docIdAndVersion.version); - - // retrieve stored fields from parsed doc - fieldVisitor = buildFieldsVisitors(storedFields, fetchSourceContext); - for (IndexableField indexableField : doc.rootDoc().getFields()) { - IndexableFieldType fieldType = indexableField.fieldType(); - if (fieldType.stored()) { - FieldInfo fieldInfo = new FieldInfo(indexableField.name(), 0, false, false, false, IndexOptions.NONE, - DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); - StoredFieldVisitor.Status status = fieldVisitor.needsField(fieldInfo); - if (status == StoredFieldVisitor.Status.YES) { - if (indexableField.numericValue() != null) { - fieldVisitor.objectField(fieldInfo, indexableField.numericValue()); - } else if (indexableField.binaryValue() != null) { - fieldVisitor.binaryField(fieldInfo, indexableField.binaryValue()); - } else if (indexableField.stringValue() != null) { - fieldVisitor.objectField(fieldInfo, indexableField.stringValue()); - } - } else if (status == StoredFieldVisitor.Status.STOP) { - break; - } - } - } - // retrieve source (with possible transformations, e.g. source filters - source = fieldVisitor.source(); - } - } - // put stored fields into result objects if (fieldVisitor.fields().isEmpty() == false) { fieldVisitor.postProcess(mapperService::fieldType); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 05e200405b94d..914963c871dd6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -9,7 +9,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.geo.GeoFormatterFactory; +import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.core.CheckedConsumer; @@ -52,13 +52,14 @@ public abstract void parse( CheckedConsumer consumer, Consumer onMalformed) throws IOException; - private void fetchFromSource(Object sourceMap, Consumer consumer, Function formatter) { + private void fetchFromSource(Object sourceMap, Consumer consumer) { try (XContentParser parser = MapXContentParser.wrapObject(sourceMap)) { - parse(parser, v -> consumer.accept(formatter.apply(v)), e -> {}); /* ignore malformed */ + parse(parser, v -> consumer.accept(v), e -> {}); /* ignore malformed */ } catch (IOException e) { throw new UncheckedIOException(e); } } + } public abstract static class AbstractGeometryFieldType extends MappedFieldType { @@ -80,17 +81,17 @@ public final Query termQuery(Object value, SearchExecutionContext context) { /** * Gets the formatter by name. */ - protected abstract Function getFormatter(String format); + protected abstract Function, List> getFormatter(String format); @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - Function formatter = getFormatter(format != null ? format : GeoFormatterFactory.GEOJSON); + Function, List> formatter = getFormatter(format != null ? format : GeometryFormatterFactory.GEOJSON); return new ArraySourceValueFetcher(name(), context) { @Override protected Object parseSourceValue(Object value) { - List values = new ArrayList<>(); - geometryParser.fetchFromSource(value, values::add, formatter); - return values; + final List values = new ArrayList<>(); + geometryParser.fetchFromSource(value, values::add); + return formatter.apply(values); } }; } @@ -133,12 +134,13 @@ protected AbstractGeometryFieldMapper( } @Override + @SuppressWarnings("unchecked") public AbstractGeometryFieldType fieldType() { return (AbstractGeometryFieldType) mappedFieldType; } @Override - protected void parseCreateField(ParseContext context) throws IOException { + protected void parseCreateField(DocumentParserContext context) throws IOException { throw new UnsupportedOperationException("Parsing is implemented in parse(), this method should NEVER be called"); } @@ -147,10 +149,10 @@ protected void parseCreateField(ParseContext context) throws IOException { * @param context the ParseContext holding the document * @param geometry the parsed geometry object */ - protected abstract void index(ParseContext context, T geometry) throws IOException; + protected abstract void index(DocumentParserContext context, T geometry) throws IOException; @Override - public final void parse(ParseContext context) throws IOException { + public final void parse(DocumentParserContext context) throws IOException { if (hasScript) { throw new MapperParsingException("failed to parse field [" + fieldType().name() + "] of type + " + contentType() + "]", new IllegalArgumentException("Cannot index data directly into a field with a [script] parameter")); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index f36704a852e53..eea937f2ff3bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -9,11 +9,10 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.CheckedBiFunction; -import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext; +import org.elasticsearch.core.CheckedConsumer; import java.io.IOException; import java.util.function.Consumer; @@ -24,7 +23,7 @@ public abstract class AbstractPointGeometryFieldMapper extends AbstractGeometryFieldMapper { public static Parameter nullValueParam(Function initializer, - TriFunction parser, + TriFunction parser, Supplier def) { return new Parameter("null_value", false, def, parser, initializer); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index a47fd7103432e..35146ebca63e2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -17,22 +17,19 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.CompositeFieldScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.lookup.SearchLookup; -import java.io.IOException; import java.time.ZoneId; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.function.Function; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -40,32 +37,20 @@ /** * Abstract base {@linkplain MappedFieldType} for runtime fields based on a script. */ -abstract class AbstractScriptFieldType extends MappedFieldType implements RuntimeField { +abstract class AbstractScriptFieldType extends MappedFieldType { + protected final Script script; private final Function factory; - private final ToXContent toXContent; AbstractScriptFieldType( String name, Function factory, Script script, - Map meta, - ToXContent toXContent + Map meta ) { super(name, false, false, false, TextSearchInfo.SIMPLE_MATCH_WITHOUT_TERMS, meta); this.factory = factory; - this.script = script; - this.toXContent = toXContent; - } - - @Override - public final Collection asMappedFieldTypes() { - return Collections.singleton(this); - } - - @Override - public final void doXContentBody(XContentBuilder builder, Params params) throws IOException { - toXContent.toXContent(builder, params); + this.script = Objects.requireNonNull(script); } @Override @@ -205,37 +190,60 @@ protected final LeafFactory leafFactory(SearchExecutionContext context) { // Placeholder Script for source-only fields // TODO rework things so that we don't need this - private static final Script DEFAULT_SCRIPT = new Script(""); + protected static final Script DEFAULT_SCRIPT = new Script(""); abstract static class Builder extends RuntimeField.Builder { private final ScriptContext scriptContext; - private final Factory parseFromSourceFactory; final FieldMapper.Parameter