diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e46d7c76cb8a8..37d01c4f8ce79 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.10.0 -lucene = 8.7.0-snapshot-72d8528c3a6 +lucene = 8.6.3 bundled_jdk_vendor = openjdk bundled_jdk = 15+36@779bf45e88a44cbd9ea6621d33e33db1 diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 6fa44f07b8be1..2703f6506fb75 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 8.7.0 -:lucene_version_path: 8.7.0 +:lucene_version: 8.6.3 +:lucene_version_path: 8.6.3 :jdk: 1.8.0_131 :jdk_major: 8 :build_flavor: default diff --git a/modules/lang-expression/licenses/lucene-expressions-8.6.3.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..22b757781bad9 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.6.3.jar.sha1 @@ -0,0 +1 @@ +695979332e9236fe8cede959eac20b7438bb2ea2 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index a27a199ba4266..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80caf0f653da6497532d1c0a4f82686fe5219f20 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.6.3.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..bc0f1f19fd093 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.6.3.jar.sha1 @@ -0,0 +1 @@ +49f019f5f4c7800d422a92c20851df450e026bb4 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 1844e368d5e3e..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d96962c085d360c2d50fd8031b9f4d862b9efa2 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.6.3.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..27ea9b7df4328 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.6.3.jar.sha1 @@ -0,0 +1 @@ +e4b270b1a10d4b88a6b8bd403f6ef2bde2e407f6 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 4d91439d94801..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41efebeeafa9007b3e1167e1904f458e49533607 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.6.3.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..adc152164c6c4 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.6.3.jar.sha1 @@ -0,0 +1 @@ +ded97cb61d5f82b2071ccf110502ff35cc03917d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index d1f080bd940e4..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66ce33df9033cfbfac24594f0b2b58f68a77580c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.6.3.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..cd0965963e722 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.6.3.jar.sha1 @@ -0,0 +1 @@ +26a6f0c63fcba520abca8cbaac72386d588a92ff \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 46adff460101b..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b2791388ee7fba44a8675d176634724251f308e1 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.6.3.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..ddcafc910d83b --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.6.3.jar.sha1 @@ -0,0 +1 @@ +e20bdb90a7db0218833ce0a785c57f2de44004a3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 8206a5ede939b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9482f7433edfba7f1ebfcb908efc4a4153842dd6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.6.3.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..cb656e1bee348 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.6.3.jar.sha1 @@ -0,0 +1 @@ +a5f24c7b49e1a0e79ffab3945503622e55f12e78 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index bf3f24ae9811d..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -029a82805fcbf8e3749851f972d1054438a144c6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.6.3.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..4d7a82db3729e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.6.3.jar.sha1 @@ -0,0 +1 @@ +cf2869762d059550f64fd8e52f679e3f5a0ea6ba \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 7728687c415c3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f1b11d41ae030711813e38b2f477c5c3124223a \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.6.3.jar.sha1 b/server/licenses/lucene-analyzers-common-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..d74c344a497fa --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.6.3.jar.sha1 @@ -0,0 +1 @@ +fd4dfed7a41d6ce84899588fcdbeb9480ee62948 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-analyzers-common-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 7bb882591fe26..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74c3a717e0d693ee91676558cec21b0d2af9120b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.6.3.jar.sha1 b/server/licenses/lucene-backward-codecs-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..3bff0c5b7ef35 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.6.3.jar.sha1 @@ -0,0 +1 @@ +fc97a68f06e2aad664b6741f3c3046633d6dca9b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-backward-codecs-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 33b81cb5d886b..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cab0e149a9c0a8dad4f7602a6782ba65d8c6e493 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.6.3.jar.sha1 b/server/licenses/lucene-core-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..bf4ca9e8cb6cb --- /dev/null +++ b/server/licenses/lucene-core-8.6.3.jar.sha1 @@ -0,0 +1 @@ +b7acbdd00fc5552abbd30d61e14e6c0673e6b49e \ No newline at end of file diff --git a/server/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 4a8cbc9bc49c0..0000000000000 --- a/server/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4525d38643972dda5a69206416043f592bf6ad6 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.6.3.jar.sha1 b/server/licenses/lucene-grouping-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..e5d5f1e5c394b --- /dev/null +++ b/server/licenses/lucene-grouping-8.6.3.jar.sha1 @@ -0,0 +1 @@ +5d311889fa2d209225f4c97425db2b5e2554291c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-grouping-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 448e3951e9467..0000000000000 --- a/server/licenses/lucene-grouping-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0d7d5064c93c41f6f7d193fa08230d330fc61e1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.6.3.jar.sha1 b/server/licenses/lucene-highlighter-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..df2965d962503 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.6.3.jar.sha1 @@ -0,0 +1 @@ +e18183e6ee7128588bc7fb0e9ae33899ac3d2f16 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-highlighter-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 6955cdce44262..0000000000000 --- a/server/licenses/lucene-highlighter-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17020a2a3d4bf9ac1812416453eb7d640ae8b633 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.6.3.jar.sha1 b/server/licenses/lucene-join-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..ac5b3b83a863a --- /dev/null +++ b/server/licenses/lucene-join-8.6.3.jar.sha1 @@ -0,0 +1 @@ +e5fb4476b5a900cb207d8edf066f02d018c410de \ No newline at end of file diff --git a/server/licenses/lucene-join-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-join-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 4a7dff5cf12be..0000000000000 --- a/server/licenses/lucene-join-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cef052385aa5b6dfdc7fe73c2484ea4c2def4a2 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.6.3.jar.sha1 b/server/licenses/lucene-memory-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..8bac892ba3f5e --- /dev/null +++ b/server/licenses/lucene-memory-8.6.3.jar.sha1 @@ -0,0 +1 @@ +9e2d12bd1b67142902d106953327a4e3024359ec \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-memory-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index f2572d1fae348..0000000000000 --- a/server/licenses/lucene-memory-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -703ba54f53d74bca3cc7e10ef981f559b70bafdd \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.6.3.jar.sha1 b/server/licenses/lucene-misc-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..28d9e9f928ad2 --- /dev/null +++ b/server/licenses/lucene-misc-8.6.3.jar.sha1 @@ -0,0 +1 @@ +c1ec14cac5ed5dc0e486e5ad865c98134f0dacbe \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-misc-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 407c7152dc046..0000000000000 --- a/server/licenses/lucene-misc-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e567c20bad52b49e5d58bf6c6af28ad20dc3571 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.6.3.jar.sha1 b/server/licenses/lucene-queries-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..31b530fb07714 --- /dev/null +++ b/server/licenses/lucene-queries-8.6.3.jar.sha1 @@ -0,0 +1 @@ +06a07fdbf8ad92f079b56a511e168ed7ccdcab7d \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-queries-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 78931cec7bffe..0000000000000 --- a/server/licenses/lucene-queries-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -884f1db345a5c87dfa5468a4cd57126b76b36e77 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.6.3.jar.sha1 b/server/licenses/lucene-queryparser-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..6cb8aec235064 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.6.3.jar.sha1 @@ -0,0 +1 @@ +3ff205460f35665abeeb36b15716645d8544188a \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-queryparser-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 1063380f3449d..0000000000000 --- a/server/licenses/lucene-queryparser-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d874b79b9f5ce80530749198864051b9d424c9bb \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.6.3.jar.sha1 b/server/licenses/lucene-sandbox-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..d5721b34dcee3 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.6.3.jar.sha1 @@ -0,0 +1 @@ +269825215135c15bc8bc20cf641da5391b628d92 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-sandbox-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index ea9d904fe42f3..0000000000000 --- a/server/licenses/lucene-sandbox-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fedbcb7253538f9811aef44b88e3e731094104e0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.6.3.jar.sha1 b/server/licenses/lucene-spatial-extras-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..5c24ae9c7e84b --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.6.3.jar.sha1 @@ -0,0 +1 @@ +5f00f63a2ab37f1f7d1291001c8746d28ac7360f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-spatial-extras-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 739e9a7cfd752..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be6f22e5e5325f0f9b0d7c7f696228a8aa966f51 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.6.3.jar.sha1 b/server/licenses/lucene-spatial3d-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..775b2199db4b9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.6.3.jar.sha1 @@ -0,0 +1 @@ +b3b9fceb52f0a4b98d8a631c450dd2750f40092b \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-spatial3d-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 55ab8bc4144b4..0000000000000 --- a/server/licenses/lucene-spatial3d-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbbc693c9d2027bae1f805f187f85698c21a8599 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.6.3.jar.sha1 b/server/licenses/lucene-suggest-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..fdcda77e67e70 --- /dev/null +++ b/server/licenses/lucene-suggest-8.6.3.jar.sha1 @@ -0,0 +1 @@ +4b6496f775daff82e788ef01b204c37121073d9b \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/server/licenses/lucene-suggest-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 9afabe74cf32c..0000000000000 --- a/server/licenses/lucene-suggest-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -693d016e5a347cff50f450d371be67e19f16564a \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java deleted file mode 100644 index aece4c4ab27c0..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.IndexWriterMaxDocsChanger; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.plugins.EnginePlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.junit.After; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.Phaser; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.both; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -public class MaxDocsLimitIT extends ESIntegTestCase { - - private static final AtomicInteger maxDocs = new AtomicInteger(); - - public static class TestEnginePlugin extends Plugin implements EnginePlugin { - @Override - public Optional getEngineFactory(IndexSettings indexSettings) { - return Optional.of(config -> { - assert maxDocs.get() > 0 : "maxDocs is unset"; - return EngineTestCase.createEngine(config, maxDocs.get()); - }); - } - } - - @Override - protected boolean addMockInternalEngine() { - return false; - } - - @Override - protected Collection> nodePlugins() { - List> plugins = new ArrayList<>(super.nodePlugins()); - plugins.add(TestEnginePlugin.class); - return plugins; - } - - @Before - public void setMaxDocs() { - maxDocs.set(randomIntBetween(10, 100)); // Do not set this too low as we can fail to write the cluster state - IndexWriterMaxDocsChanger.setMaxDocs(maxDocs.get()); - } - - @After - public void restoreMaxDocs() { - IndexWriterMaxDocsChanger.restoreMaxDocs(); - } - - public void testMaxDocsLimit() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(1); - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST))); - IndexingResult indexingResult = indexDocs(maxDocs.get(), 1); - assertThat(indexingResult.numSuccess, equalTo(maxDocs.get())); - assertThat(indexingResult.numFailures, equalTo(0)); - int rejectedRequests = between(1, 10); - indexingResult = indexDocs(rejectedRequests, between(1, 8)); - assertThat(indexingResult.numFailures, equalTo(rejectedRequests)); - assertThat(indexingResult.numSuccess, equalTo(0)); - final IllegalArgumentException deleteError = expectThrows(IllegalArgumentException.class, - () -> client().prepareDelete("test", "_doc", "any-id").get()); - assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); - client().admin().indices().prepareRefresh("test").get(); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0).get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); - if (randomBoolean()) { - client().admin().indices().prepareFlush("test").get(); - } - internalCluster().fullRestart(); - internalCluster().ensureAtLeastNumDataNodes(2); - ensureGreen("test"); - searchResponse = client().prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0).get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); - } - - public void testMaxDocsLimitConcurrently() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(1); - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))); - IndexingResult indexingResult = indexDocs(between(maxDocs.get() + 1, maxDocs.get() * 2), between(2, 8)); - assertThat(indexingResult.numFailures, greaterThan(0)); - assertThat(indexingResult.numSuccess, both(greaterThan(0)).and(lessThanOrEqualTo(maxDocs.get()))); - client().admin().indices().prepareRefresh("test").get(); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0).get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) indexingResult.numSuccess)); - int totalSuccess = indexingResult.numSuccess; - while (totalSuccess < maxDocs.get()) { - indexingResult = indexDocs(between(1, 10), between(1, 8)); - assertThat(indexingResult.numSuccess, greaterThan(0)); - totalSuccess += indexingResult.numSuccess; - } - if (randomBoolean()) { - indexingResult = indexDocs(between(1, 10), between(1, 8)); - assertThat(indexingResult.numSuccess, equalTo(0)); - } - client().admin().indices().prepareRefresh("test").get(); - searchResponse = client().prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0).get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) totalSuccess)); - } - - static final class IndexingResult { - final int numSuccess; - final int numFailures; - - IndexingResult(int numSuccess, int numFailures) { - this.numSuccess = numSuccess; - this.numFailures = numFailures; - } - } - - static IndexingResult indexDocs(int numRequests, int numThreads) throws Exception { - final AtomicInteger completedRequests = new AtomicInteger(); - final AtomicInteger numSuccess = new AtomicInteger(); - final AtomicInteger numFailure = new AtomicInteger(); - Thread[] indexers = new Thread[numThreads]; - Phaser phaser = new Phaser(indexers.length); - for (int i = 0; i < indexers.length; i++) { - indexers[i] = new Thread(() -> { - phaser.arriveAndAwaitAdvance(); - while (completedRequests.incrementAndGet() <= numRequests) { - try { - final IndexResponse resp = client().prepareIndex("test", "_doc").setSource("{}", XContentType.JSON).get(); - numSuccess.incrementAndGet(); - assertThat(resp.status(), equalTo(RestStatus.CREATED)); - } catch (IllegalArgumentException e) { - numFailure.incrementAndGet(); - assertThat(e.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); - } - } - }); - indexers[i].start(); - } - for (Thread indexer : indexers) { - indexer.join(); - } - internalCluster().assertNoInFlightDocsInEngine(); - return new IndexingResult(numSuccess.get(), numFailure.get()); - } -} diff --git a/server/src/main/java/org/apache/lucene/search/RegExp87.java b/server/src/main/java/org/apache/lucene/search/RegExp87.java new file mode 100644 index 0000000000000..5fc4cdf856e88 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/search/RegExp87.java @@ -0,0 +1,1074 @@ +/* + * dk.brics.automaton + * + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package org.apache.lucene.search; + +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.AutomatonProvider; +import org.apache.lucene.util.automaton.MinimizationOperations; +import org.apache.lucene.util.automaton.Operations; +import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + + +/** + * Copy of Lucene 8.7's forthcoming RegExp class brought forward for + * case insensitive search feature. + * + * @deprecated Use the RegExp object coming in Lucene 8.7 when it ships + */ +@Deprecated +public class RegExp87 { + + /** + * The type of expression represented by a RegExp node. + */ + public enum Kind { + /** The union of two expressions */ + REGEXP_UNION, + /** A sequence of two expressions */ + REGEXP_CONCATENATION, + /** The intersection of two expressions */ + REGEXP_INTERSECTION, + /** An optional expression */ + REGEXP_OPTIONAL, + /** An expression that repeats */ + REGEXP_REPEAT, + /** An expression that repeats a minimum number of times*/ + REGEXP_REPEAT_MIN, + /** An expression that repeats a minimum and maximum number of times*/ + REGEXP_REPEAT_MINMAX, + /** The complement of an expression */ + REGEXP_COMPLEMENT, + /** A Character */ + REGEXP_CHAR, + /** A Character range*/ + REGEXP_CHAR_RANGE, + /** Any Character allowed*/ + REGEXP_ANYCHAR, + /** An empty expression*/ + REGEXP_EMPTY, + /** A string expression*/ + REGEXP_STRING, + /** Any string allowed */ + REGEXP_ANYSTRING, + /** An Automaton expression*/ + REGEXP_AUTOMATON, + /** An Interval expression */ + REGEXP_INTERVAL, + /** An expression for a pre-defined class e.g. \w */ + REGEXP_PRE_CLASS + } + + //----- Syntax flags ( <= 0xff ) ------ + /** + * Syntax flag, enables intersection (&). + */ + public static final int INTERSECTION = 0x0001; + + /** + * Syntax flag, enables complement (~). + */ + public static final int COMPLEMENT = 0x0002; + + /** + * Syntax flag, enables empty language (#). + */ + public static final int EMPTY = 0x0004; + + /** + * Syntax flag, enables anystring (@). + */ + public static final int ANYSTRING = 0x0008; + + /** + * Syntax flag, enables named automata (<identifier>). + */ + public static final int AUTOMATON = 0x0010; + + /** + * Syntax flag, enables numerical intervals ( + * <n-m>). + */ + public static final int INTERVAL = 0x0020; + + /** + * Syntax flag, enables all optional regexp syntax. + */ + public static final int ALL = 0xff; + + /** + * Syntax flag, enables no optional regexp syntax. + */ + public static final int NONE = 0x0000; + + //----- Matching flags ( > 0xff ) ------ + + /** + * Allows case insensitive matching of ASCII characters. + */ + public static final int ASCII_CASE_INSENSITIVE = 0x0100; + + //Immutable parsed state + /** + * The type of expression + */ + public final Kind kind; + /** + * Child expressions held by a container type expression + */ + public final RegExp87 exp1, exp2; + /** + * String expression + */ + public final String s; + /** + * Character expression + */ + public final int c; + /** + * Limits for repeatable type expressions + */ + public final int min, max, digits; + /** + * Extents for range type expressions + */ + public final int from, to; + + // Parser variables + private final String originalString; + final int flags; + int pos; + + /** + * Constructs new RegExp from a string. Same as + * RegExp(s, ALL). + * + * @param s regexp string + * @exception IllegalArgumentException if an error occurred while parsing the + * regular expression + */ + public RegExp87(String s) throws IllegalArgumentException { + this(s, ALL); + } + + /** + * Constructs new RegExp from a string. + * + * @param s regexp string + * @param syntax_flags boolean 'or' of optional syntax constructs to be + * enabled + * @exception IllegalArgumentException if an error occurred while parsing the + * regular expression + */ + public RegExp87(String s, int syntax_flags) throws IllegalArgumentException { + this(s, syntax_flags, 0); + } + /** + * Constructs new RegExp from a string. + * + * @param s regexp string + * @param syntax_flags boolean 'or' of optional syntax constructs to be + * enabled + * @param match_flags boolean 'or' of match behavior options such as case insensitivity + * @exception IllegalArgumentException if an error occurred while parsing the + * regular expression + */ + public RegExp87(String s, int syntax_flags, int match_flags) throws IllegalArgumentException { + if (syntax_flags > ALL) { + throw new IllegalArgumentException("Illegal syntax flag"); + } + + if (match_flags > 0 && match_flags <= ALL) { + throw new IllegalArgumentException("Illegal match flag"); + } + flags = syntax_flags | match_flags; + originalString = s; + RegExp87 e; + if (s.length() == 0) e = makeString(flags, ""); + else { + e = parseUnionExp(); + if (pos < originalString.length()) throw new IllegalArgumentException( + "end-of-string expected at position " + pos); + } + kind = e.kind; + exp1 = e.exp1; + exp2 = e.exp2; + this.s = e.s; + c = e.c; + min = e.min; + max = e.max; + digits = e.digits; + from = e.from; + to = e.to; + } + + RegExp87(int flags, Kind kind, RegExp87 exp1, RegExp87 exp2, String s, int c, int min, int max, int digits, int from, int to){ + this.originalString = null; + this.kind = kind; + this.flags = flags; + this.exp1 = exp1; + this.exp2 = exp2; + this.s = s; + this.c = c; + this.min = min; + this.max = max; + this.digits = digits; + this.from = from; + this.to = to; + } + + // Simplified construction of container nodes + static RegExp87 newContainerNode(int flags, Kind kind, RegExp87 exp1, RegExp87 exp2) { + return new RegExp87(flags, kind, exp1, exp2, null, 0, 0, 0, 0, 0, 0); + } + + // Simplified construction of repeating nodes + static RegExp87 newRepeatingNode(int flags, Kind kind, RegExp87 exp, int min, int max) { + return new RegExp87(flags, kind, exp, null, null, 0, min, max, 0, 0, 0); + } + + + // Simplified construction of leaf nodes + static RegExp87 newLeafNode(int flags, Kind kind, String s, int c, int min, int max, int digits, int from, int to) { + return new RegExp87(flags, kind, null, null, s, c, min, max, digits, from, to); + } + + /** + * Constructs new Automaton from this RegExp. Same + * as toAutomaton(null) (empty automaton map). + */ + public Automaton toAutomaton() { + return toAutomaton(null, null, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + } + + /** + * Constructs new Automaton from this RegExp. The + * constructed automaton is minimal and deterministic and has no transitions + * to dead states. + * + * @param maxDeterminizedStates maximum number of states in the resulting + * automata. If the automata would need more than this many states + * TooComplextToDeterminizeException is thrown. Higher number require more + * space but can process more complex regexes. + * @exception IllegalArgumentException if this regular expression uses a named + * identifier that is not available from the automaton provider + * @exception TooComplexToDeterminizeException if determinizing this regexp + * requires more than maxDeterminizedStates states + */ + public Automaton toAutomaton(int maxDeterminizedStates) + throws IllegalArgumentException, TooComplexToDeterminizeException { + return toAutomaton(null, null, maxDeterminizedStates); + } + + /** + * Constructs new Automaton from this RegExp. The + * constructed automaton is minimal and deterministic and has no transitions + * to dead states. + * + * @param automaton_provider provider of automata for named identifiers + * @param maxDeterminizedStates maximum number of states in the resulting + * automata. If the automata would need more than this many states + * TooComplextToDeterminizeException is thrown. Higher number require more + * space but can process more complex regexes. + * @exception IllegalArgumentException if this regular expression uses a named + * identifier that is not available from the automaton provider + * @exception TooComplexToDeterminizeException if determinizing this regexp + * requires more than maxDeterminizedStates states + */ + public Automaton toAutomaton(AutomatonProvider automaton_provider, + int maxDeterminizedStates) throws IllegalArgumentException, + TooComplexToDeterminizeException { + return toAutomaton(null, automaton_provider, maxDeterminizedStates); + } + + /** + * Constructs new Automaton from this RegExp. The + * constructed automaton is minimal and deterministic and has no transitions + * to dead states. + * + * @param automata a map from automaton identifiers to automata (of type + * Automaton). + * @param maxDeterminizedStates maximum number of states in the resulting + * automata. If the automata would need more than this many states + * TooComplexToDeterminizeException is thrown. Higher number require more + * space but can process more complex regexes. + * @exception IllegalArgumentException if this regular expression uses a named + * identifier that does not occur in the automaton map + * @exception TooComplexToDeterminizeException if determinizing this regexp + * requires more than maxDeterminizedStates states + */ + public Automaton toAutomaton(Map automata, + int maxDeterminizedStates) throws IllegalArgumentException, + TooComplexToDeterminizeException { + return toAutomaton(automata, null, maxDeterminizedStates); + } + + private Automaton toAutomaton(Map automata, + AutomatonProvider automaton_provider, int maxDeterminizedStates) + throws IllegalArgumentException, TooComplexToDeterminizeException { + try { + return toAutomatonInternal(automata, automaton_provider, + maxDeterminizedStates); + } catch (TooComplexToDeterminizeException e) { + // This is a little ugly. Have to pass an instance of core Lucene RegExp just to get error message. + throw new TooComplexToDeterminizeException(new RegExp(this.originalString), e); + } + } + + private Automaton toAutomatonInternal(Map automata, + AutomatonProvider automaton_provider, int maxDeterminizedStates) + throws IllegalArgumentException { + List list; + Automaton a = null; + switch (kind) { + case REGEXP_PRE_CLASS: + RegExp87 expanded = expandPredefined(); + a = expanded.toAutomatonInternal(automata, automaton_provider, maxDeterminizedStates); + break; + case REGEXP_UNION: + list = new ArrayList<>(); + findLeaves(exp1, Kind.REGEXP_UNION, list, automata, automaton_provider, + maxDeterminizedStates); + findLeaves(exp2, Kind.REGEXP_UNION, list, automata, automaton_provider, + maxDeterminizedStates); + a = Operations.union(list); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_CONCATENATION: + list = new ArrayList<>(); + findLeaves(exp1, Kind.REGEXP_CONCATENATION, list, automata, + automaton_provider, maxDeterminizedStates); + findLeaves(exp2, Kind.REGEXP_CONCATENATION, list, automata, + automaton_provider, maxDeterminizedStates); + a = Operations.concatenate(list); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_INTERSECTION: + a = Operations.intersection( + exp1.toAutomatonInternal( + automata, automaton_provider, maxDeterminizedStates), + exp2.toAutomatonInternal( + automata, automaton_provider, maxDeterminizedStates)); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_OPTIONAL: + a = Operations.optional(exp1.toAutomatonInternal(automata, + automaton_provider, maxDeterminizedStates)); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_REPEAT: + a = Operations.repeat(exp1.toAutomatonInternal( + automata, automaton_provider, maxDeterminizedStates)); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_REPEAT_MIN: + a = exp1.toAutomatonInternal(automata, automaton_provider, maxDeterminizedStates); + int minNumStates = (a.getNumStates() - 1) * min; + if (minNumStates > maxDeterminizedStates) { + throw new TooComplexToDeterminizeException(a, minNumStates); + } + a = Operations.repeat(a, min); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_REPEAT_MINMAX: + a = exp1.toAutomatonInternal(automata, automaton_provider, maxDeterminizedStates); + int minMaxNumStates = (a.getNumStates() - 1) * max; + if (minMaxNumStates > maxDeterminizedStates) { + throw new TooComplexToDeterminizeException(a, minMaxNumStates); + } + a = Operations.repeat(a, min, max); + break; + case REGEXP_COMPLEMENT: + a = Operations.complement( + exp1.toAutomatonInternal(automata, automaton_provider, + maxDeterminizedStates), + maxDeterminizedStates); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + break; + case REGEXP_CHAR: + if (check(ASCII_CASE_INSENSITIVE)) { + a = toCaseInsensitiveChar(c, maxDeterminizedStates); + } else { + a = Automata.makeChar(c); + } + break; + case REGEXP_CHAR_RANGE: + a = Automata.makeCharRange(from, to); + break; + case REGEXP_ANYCHAR: + a = Automata.makeAnyChar(); + break; + case REGEXP_EMPTY: + a = Automata.makeEmpty(); + break; + case REGEXP_STRING: + if (check(ASCII_CASE_INSENSITIVE)) { + a = toCaseInsensitiveString(maxDeterminizedStates); + } else { + a = Automata.makeString(s); + } + break; + case REGEXP_ANYSTRING: + a = Automata.makeAnyString(); + break; + case REGEXP_AUTOMATON: + Automaton aa = null; + if (automata != null) { + aa = automata.get(s); + } + if (aa == null && automaton_provider != null) { + try { + aa = automaton_provider.getAutomaton(s); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + if (aa == null) { + throw new IllegalArgumentException("'" + s + "' not found"); + } + a = aa; + break; + case REGEXP_INTERVAL: + a = Automata.makeDecimalInterval(min, max, digits); + break; + } + return a; + } + private Automaton toCaseInsensitiveChar(int codepoint, int maxDeterminizedStates) { + Automaton case1 = Automata.makeChar(codepoint); + // For now we only work with ASCII characters + if (codepoint > 128) { + return case1; + } + int altCase = Character.isLowerCase(codepoint) ? Character.toUpperCase(codepoint) : Character.toLowerCase(codepoint); + Automaton result; + if (altCase != codepoint) { + result = Operations.union(case1, Automata.makeChar(altCase)); + result = MinimizationOperations.minimize(result, maxDeterminizedStates); + } else { + result = case1; + } + return result; + } + + private Automaton toCaseInsensitiveString(int maxDeterminizedStates) { + List list = new ArrayList<>(); + + Iterator iter = s.codePoints().iterator(); + while (iter.hasNext()) { + list.add(toCaseInsensitiveChar(iter.next(), maxDeterminizedStates)); + } + Automaton a = Operations.concatenate(list); + a = MinimizationOperations.minimize(a, maxDeterminizedStates); + return a; + } + + private void findLeaves(RegExp87 exp, Kind kind, List list, + Map automata, AutomatonProvider automaton_provider, + int maxDeterminizedStates) { + if (exp.kind == kind) { + findLeaves(exp.exp1, kind, list, automata, automaton_provider, + maxDeterminizedStates); + findLeaves(exp.exp2, kind, list, automata, automaton_provider, + maxDeterminizedStates); + } else { + list.add(exp.toAutomatonInternal(automata, automaton_provider, + maxDeterminizedStates)); + } + } + + /** + * The string that was used to construct the regex. Compare to toString. + */ + public String getOriginalString() { + return originalString; + } + + /** + * Constructs string from parsed regular expression. + */ + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + toStringBuilder(b); + return b.toString(); + } + + void toStringBuilder(StringBuilder b) { + switch (kind) { + case REGEXP_UNION: + b.append("("); + exp1.toStringBuilder(b); + b.append("|"); + exp2.toStringBuilder(b); + b.append(")"); + break; + case REGEXP_CONCATENATION: + exp1.toStringBuilder(b); + exp2.toStringBuilder(b); + break; + case REGEXP_INTERSECTION: + b.append("("); + exp1.toStringBuilder(b); + b.append("&"); + exp2.toStringBuilder(b); + b.append(")"); + break; + case REGEXP_OPTIONAL: + b.append("("); + exp1.toStringBuilder(b); + b.append(")?"); + break; + case REGEXP_REPEAT: + b.append("("); + exp1.toStringBuilder(b); + b.append(")*"); + break; + case REGEXP_REPEAT_MIN: + b.append("("); + exp1.toStringBuilder(b); + b.append("){").append(min).append(",}"); + break; + case REGEXP_REPEAT_MINMAX: + b.append("("); + exp1.toStringBuilder(b); + b.append("){").append(min).append(",").append(max).append("}"); + break; + case REGEXP_COMPLEMENT: + b.append("~("); + exp1.toStringBuilder(b); + b.append(")"); + break; + case REGEXP_CHAR: + b.append("\\").appendCodePoint(c); + break; + case REGEXP_CHAR_RANGE: + b.append("[\\").appendCodePoint(from).append("-\\").appendCodePoint(to).append("]"); + break; + case REGEXP_ANYCHAR: + b.append("."); + break; + case REGEXP_EMPTY: + b.append("#"); + break; + case REGEXP_STRING: + b.append("\"").append(s).append("\""); + break; + case REGEXP_ANYSTRING: + b.append("@"); + break; + case REGEXP_AUTOMATON: + b.append("<").append(s).append(">"); + break; + case REGEXP_INTERVAL: + String s1 = Integer.toString(min); + String s2 = Integer.toString(max); + b.append("<"); + if (digits > 0) for (int i = s1.length(); i < digits; i++) + b.append('0'); + b.append(s1).append("-"); + if (digits > 0) for (int i = s2.length(); i < digits; i++) + b.append('0'); + b.append(s2).append(">"); + break; + case REGEXP_PRE_CLASS: + b.append("\\").appendCodePoint(from); + break; + } + } + + /** + * Like to string, but more verbose (shows the higherchy more clearly). + */ + public String toStringTree() { + StringBuilder b = new StringBuilder(); + toStringTree(b, ""); + return b.toString(); + } + + void toStringTree(StringBuilder b, String indent) { + switch (kind) { + // binary + case REGEXP_UNION: + case REGEXP_CONCATENATION: + case REGEXP_INTERSECTION: + b.append(indent); + b.append(kind); + b.append('\n'); + exp1.toStringTree(b, indent + " "); + exp2.toStringTree(b, indent + " "); + break; + // unary + case REGEXP_OPTIONAL: + case REGEXP_REPEAT: + case REGEXP_COMPLEMENT: + b.append(indent); + b.append(kind); + b.append('\n'); + exp1.toStringTree(b, indent + " "); + break; + case REGEXP_REPEAT_MIN: + b.append(indent); + b.append(kind); + b.append(" min="); + b.append(min); + b.append('\n'); + exp1.toStringTree(b, indent + " "); + break; + case REGEXP_REPEAT_MINMAX: + b.append(indent); + b.append(kind); + b.append(" min="); + b.append(min); + b.append(" max="); + b.append(max); + b.append('\n'); + exp1.toStringTree(b, indent + " "); + break; + case REGEXP_CHAR: + b.append(indent); + b.append(kind); + b.append(" char="); + b.appendCodePoint(c); + b.append('\n'); + break; + case REGEXP_PRE_CLASS: + b.append(indent); + b.append(kind); + b.append(" class=\\"); + b.appendCodePoint(from); + b.append('\n'); + break; + case REGEXP_CHAR_RANGE: + b.append(indent); + b.append(kind); + b.append(" from="); + b.appendCodePoint(from); + b.append(" to="); + b.appendCodePoint(to); + b.append('\n'); + break; + case REGEXP_ANYCHAR: + case REGEXP_EMPTY: + b.append(indent); + b.append(kind); + b.append('\n'); + break; + case REGEXP_STRING: + b.append(indent); + b.append(kind); + b.append(" string="); + b.append(s); + b.append('\n'); + break; + case REGEXP_ANYSTRING: + b.append(indent); + b.append(kind); + b.append('\n'); + break; + case REGEXP_AUTOMATON: + b.append(indent); + b.append(kind); + b.append('\n'); + break; + case REGEXP_INTERVAL: + b.append(indent); + b.append(kind); + String s1 = Integer.toString(min); + String s2 = Integer.toString(max); + b.append("<"); + if (digits > 0) for (int i = s1.length(); i < digits; i++) + b.append('0'); + b.append(s1).append("-"); + if (digits > 0) for (int i = s2.length(); i < digits; i++) + b.append('0'); + b.append(s2).append(">"); + b.append('\n'); + break; + } + } + + /** + * Returns set of automaton identifiers that occur in this regular expression. + */ + public Set getIdentifiers() { + HashSet set = new HashSet<>(); + getIdentifiers(set); + return set; + } + + void getIdentifiers(Set set) { + switch (kind) { + case REGEXP_UNION: + case REGEXP_CONCATENATION: + case REGEXP_INTERSECTION: + exp1.getIdentifiers(set); + exp2.getIdentifiers(set); + break; + case REGEXP_OPTIONAL: + case REGEXP_REPEAT: + case REGEXP_REPEAT_MIN: + case REGEXP_REPEAT_MINMAX: + case REGEXP_COMPLEMENT: + exp1.getIdentifiers(set); + break; + case REGEXP_AUTOMATON: + set.add(s); + break; + default: + } + } + + static RegExp87 makeUnion(int flags, RegExp87 exp1, RegExp87 exp2) { + return newContainerNode(flags, Kind.REGEXP_UNION, exp1, exp2); + } + + static RegExp87 makeConcatenation(int flags, RegExp87 exp1, RegExp87 exp2) { + if ((exp1.kind == Kind.REGEXP_CHAR || exp1.kind == Kind.REGEXP_STRING) + && (exp2.kind == Kind.REGEXP_CHAR || exp2.kind == Kind.REGEXP_STRING)) return makeString( + flags, exp1, exp2); + RegExp87 rexp1, rexp2; + if (exp1.kind == Kind.REGEXP_CONCATENATION + && (exp1.exp2.kind == Kind.REGEXP_CHAR || exp1.exp2.kind == Kind.REGEXP_STRING) + && (exp2.kind == Kind.REGEXP_CHAR || exp2.kind == Kind.REGEXP_STRING)) { + rexp1 = exp1.exp1; + rexp2 = makeString(flags, exp1.exp2, exp2); + } else if ((exp1.kind == Kind.REGEXP_CHAR || exp1.kind == Kind.REGEXP_STRING) + && exp2.kind == Kind.REGEXP_CONCATENATION + && (exp2.exp1.kind == Kind.REGEXP_CHAR || exp2.exp1.kind == Kind.REGEXP_STRING)) { + rexp1 = makeString(flags, exp1, exp2.exp1); + rexp2 = exp2.exp2; + } else { + rexp1 = exp1; + rexp2 = exp2; + } + return newContainerNode(flags, Kind.REGEXP_CONCATENATION, rexp1, rexp2); + } + + private static RegExp87 makeString(int flags, RegExp87 exp1, RegExp87 exp2) { + StringBuilder b = new StringBuilder(); + if (exp1.kind == Kind.REGEXP_STRING) b.append(exp1.s); + else b.appendCodePoint(exp1.c); + if (exp2.kind == Kind.REGEXP_STRING) b.append(exp2.s); + else b.appendCodePoint(exp2.c); + return makeString(flags, b.toString()); + } + + static RegExp87 makeIntersection(int flags, RegExp87 exp1, RegExp87 exp2) { + return newContainerNode(flags, Kind.REGEXP_INTERSECTION, exp1, exp2); + } + + static RegExp87 makeOptional(int flags, RegExp87 exp) { + return newContainerNode(flags, Kind.REGEXP_OPTIONAL, exp, null); + } + + static RegExp87 makeRepeat(int flags, RegExp87 exp) { + return newContainerNode(flags, Kind.REGEXP_REPEAT, exp, null); + } + + static RegExp87 makeRepeat(int flags, RegExp87 exp, int min) { + return newRepeatingNode(flags, Kind.REGEXP_REPEAT_MIN, exp, min, 0); + } + + static RegExp87 makeRepeat(int flags, RegExp87 exp, int min, int max) { + return newRepeatingNode(flags, Kind.REGEXP_REPEAT_MINMAX, exp, min, max); + } + + static RegExp87 makeComplement(int flags, RegExp87 exp) { + return newContainerNode(flags, Kind.REGEXP_COMPLEMENT, exp, null); + } + + static RegExp87 makeChar(int flags, int c) { + return newLeafNode(flags, Kind.REGEXP_CHAR, null, c, 0, 0, 0, 0, 0); + } + + static RegExp87 makeCharRange(int flags, int from, int to) { + if (from > to) + throw new IllegalArgumentException("invalid range: from (" + from + ") cannot be > to (" + to + ")"); + return newLeafNode(flags, Kind.REGEXP_CHAR_RANGE, null, 0, 0, 0, 0, from, to); + } + + static RegExp87 makeAnyChar(int flags) { + return newContainerNode(flags, Kind.REGEXP_ANYCHAR, null, null); + } + + static RegExp87 makeEmpty(int flags) { + return newContainerNode(flags, Kind.REGEXP_EMPTY, null, null); + } + + static RegExp87 makeString(int flags, String s) { + return newLeafNode(flags, Kind.REGEXP_STRING, s, 0, 0, 0, 0, 0, 0); + } + + static RegExp87 makeAnyString(int flags) { + return newContainerNode(flags, Kind.REGEXP_ANYSTRING, null, null); + } + + static RegExp87 makeAutomaton(int flags, String s) { + return newLeafNode(flags, Kind.REGEXP_AUTOMATON, s, 0, 0, 0, 0, 0, 0); + } + + static RegExp87 makeInterval(int flags, int min, int max, int digits) { + return newLeafNode(flags, Kind.REGEXP_INTERVAL, null, 0, min, max, digits, 0, 0); + } + + private boolean peek(String s) { + return more() && s.indexOf(originalString.codePointAt(pos)) != -1; + } + + private boolean match(int c) { + if (pos >= originalString.length()) return false; + if (originalString.codePointAt(pos) == c) { + pos += Character.charCount(c); + return true; + } + return false; + } + + private boolean more() { + return pos < originalString.length(); + } + + private int next() throws IllegalArgumentException { + if (!more()) throw new IllegalArgumentException("unexpected end-of-string"); + int ch = originalString.codePointAt(pos); + pos += Character.charCount(ch); + return ch; + } + + private boolean check(int flag) { + return (flags & flag) != 0; + } + + final RegExp87 parseUnionExp() throws IllegalArgumentException { + RegExp87 e = parseInterExp(); + if (match('|')) e = makeUnion(flags, e, parseUnionExp()); + return e; + } + + final RegExp87 parseInterExp() throws IllegalArgumentException { + RegExp87 e = parseConcatExp(); + if (check(INTERSECTION) && match('&')) e = makeIntersection(flags, e, + parseInterExp()); + return e; + } + + final RegExp87 parseConcatExp() throws IllegalArgumentException { + RegExp87 e = parseRepeatExp(); + if (more() && !peek(")|") && (!check(INTERSECTION) || !peek("&"))) e = makeConcatenation( + flags, e, parseConcatExp()); + return e; + } + + final RegExp87 parseRepeatExp() throws IllegalArgumentException { + RegExp87 e = parseComplExp(); + while (peek("?*+{")) { + if (match('?')) e = makeOptional(flags, e); + else if (match('*')) e = makeRepeat(flags, e); + else if (match('+')) e = makeRepeat(flags, e, 1); + else if (match('{')) { + int start = pos; + while (peek("0123456789")) + next(); + if (start == pos) throw new IllegalArgumentException( + "integer expected at position " + pos); + int n = Integer.parseInt(originalString.substring(start, pos)); + int m = -1; + if (match(',')) { + start = pos; + while (peek("0123456789")) + next(); + if (start != pos) m = Integer.parseInt( + originalString.substring(start, pos)); + } else m = n; + if (!match('}')) throw new IllegalArgumentException( + "expected '}' at position " + pos); + if (m == -1) e = makeRepeat(flags, e, n); + else e = makeRepeat(flags, e, n, m); + } + } + return e; + } + + final RegExp87 parseComplExp() throws IllegalArgumentException { + if (check(COMPLEMENT) && match('~')) return makeComplement(flags, parseComplExp()); + else return parseCharClassExp(); + } + + final RegExp87 parseCharClassExp() throws IllegalArgumentException { + if (match('[')) { + boolean negate = false; + if (match('^')) negate = true; + RegExp87 e = parseCharClasses(); + if (negate) e = makeIntersection(flags, makeAnyChar(flags), makeComplement(flags, e)); + if (!match(']')) throw new IllegalArgumentException( + "expected ']' at position " + pos); + return e; + } else return parseSimpleExp(); + } + + final RegExp87 parseCharClasses() throws IllegalArgumentException { + RegExp87 e = parseCharClass(); + while (more() && !peek("]")) + e = makeUnion(flags, e, parseCharClass()); + return e; + } + + final RegExp87 parseCharClass() throws IllegalArgumentException { + RegExp87 predefinedExp = matchPredefinedCharacterClass(); + if (predefinedExp != null) { + return predefinedExp; + } + + int c = parseCharExp(); + if (match('-')) return makeCharRange(flags, c, parseCharExp()); + else return makeChar(flags, c); + } + + RegExp87 expandPredefined() { + //See https://docs.oracle.com/javase/tutorial/essential/regex/pre_char_classes.html + switch (from) { + case 'd': + return new RegExp87("[0-9]"); // digit + case 'D': + return new RegExp87("[^0-9]"); // non-digit + case 's': + return new RegExp87("[ \t\n\r]"); // whitespace + case 'S': + return new RegExp87("[^\\s]"); // non-whitespace + case 'w': + return new RegExp87("[a-zA-Z_0-9]"); // word + case 'W': + return new RegExp87("[^\\w]"); // non-word + default: + throw new IllegalArgumentException( + "invalid character class " + from); + } + } + + + final RegExp87 matchPredefinedCharacterClass() { + //See https://docs.oracle.com/javase/tutorial/essential/regex/pre_char_classes.html + if (match('\\')) { + if (peek("dDwWsS")) { + return newLeafNode(flags, Kind.REGEXP_PRE_CLASS, null, 0, 0, 0, 0, next(), 0); + } + + if (peek("\\")) { + return makeChar(flags, next()); + } + + // From https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#bs + // "It is an error to use a backslash prior to any alphabetic character that does not denote an escaped + // construct;" + if (peek("abcefghijklmnopqrtuvxyz") || peek("ABCEFGHIJKLMNOPQRTUVXYZ")) { + throw new IllegalArgumentException("invalid character class \\" + next()); + } + } + + return null; + } + + + final RegExp87 parseSimpleExp() throws IllegalArgumentException { + if (match('.')) return makeAnyChar(flags); + else if (check(EMPTY) && match('#')) return makeEmpty(flags); + else if (check(ANYSTRING) && match('@')) return makeAnyString(flags); + else if (match('"')) { + int start = pos; + while (more() && !peek("\"")) + next(); + if (!match('"')) throw new IllegalArgumentException( + "expected '\"' at position " + pos); + return makeString(flags, originalString.substring(start, pos - 1)); + } else if (match('(')) { + if (match(')')) return makeString(flags, ""); + RegExp87 e = parseUnionExp(); + if (!match(')')) throw new IllegalArgumentException( + "expected ')' at position " + pos); + return e; + } else if ((check(AUTOMATON) || check(INTERVAL)) && match('<')) { + int start = pos; + while (more() && !peek(">")) + next(); + if (!match('>')) throw new IllegalArgumentException( + "expected '>' at position " + pos); + String s = originalString.substring(start, pos - 1); + int i = s.indexOf('-'); + if (i == -1) { + if (!check(AUTOMATON)) throw new IllegalArgumentException( + "interval syntax error at position " + (pos - 1)); + return makeAutomaton(flags, s); + } else { + if (!check(INTERVAL)) throw new IllegalArgumentException( + "illegal identifier at position " + (pos - 1)); + try { + if (i == 0 || i == s.length() - 1 || i != s.lastIndexOf('-')) throw new NumberFormatException(); + String smin = s.substring(0, i); + String smax = s.substring(i + 1, s.length()); + int imin = Integer.parseInt(smin); + int imax = Integer.parseInt(smax); + int digits; + if (smin.length() == smax.length()) digits = smin.length(); + else digits = 0; + if (imin > imax) { + int t = imin; + imin = imax; + imax = t; + } + return makeInterval(flags, imin, imax, digits); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "interval syntax error at position " + (pos - 1)); + } + } + } else { + RegExp87 predefined = matchPredefinedCharacterClass(); + if (predefined != null) { + return predefined; + } + return makeChar(flags, parseCharExp()); + } + } + + final int parseCharExp() throws IllegalArgumentException { + match('\\'); + return next(); + } +} diff --git a/server/src/main/java/org/apache/lucene/search/RegexpQuery87.java b/server/src/main/java/org/apache/lucene/search/RegexpQuery87.java new file mode 100644 index 0000000000000..12592530321d4 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/search/RegexpQuery87.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.lucene.search; + + +import org.apache.lucene.index.Term; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.AutomatonProvider; +import org.apache.lucene.util.automaton.Operations; + +/** + * Copy of Lucene's RegExpQuery class coming in 8.7 with case + * insensitive search option + * @deprecated + */ +@Deprecated +public class RegexpQuery87 extends AutomatonQuery { + /** + * A provider that provides no named automata + */ + private static AutomatonProvider defaultProvider = new AutomatonProvider() { + @Override + public Automaton getAutomaton(String name) { + return null; + } + }; + + /** + * Constructs a query for terms matching term. + *

+ * By default, all regular expression features are enabled. + *

+ * + * @param term regular expression. + */ + public RegexpQuery87(Term term) { + this(term, RegExp87.ALL); + } + + /** + * Constructs a query for terms matching term. + * + * @param term regular expression. + * @param flags optional RegExp features from {@link RegExp87} + */ + public RegexpQuery87(Term term, int flags) { + this(term, flags, defaultProvider, + Operations.DEFAULT_MAX_DETERMINIZED_STATES); + } + + /** + * Constructs a query for terms matching term. + * + * @param term regular expression. + * @param flags optional RegExp syntax features from {@link RegExp87} + * @param maxDeterminizedStates maximum number of states that compiling the + * automaton for the regexp can result in. Set higher to allow more complex + * queries and lower to prevent memory exhaustion. + */ + public RegexpQuery87(Term term, int flags, int maxDeterminizedStates) { + this(term, flags, defaultProvider, maxDeterminizedStates); + } + + /** + * Constructs a query for terms matching term. + * + * @param term regular expression. + * @param syntax_flags optional RegExp syntax features from {@link RegExp87} + * automaton for the regexp can result in. Set higher to allow more complex + * queries and lower to prevent memory exhaustion. + * @param match_flags boolean 'or' of match behavior options such as case insensitivity + * @param maxDeterminizedStates maximum number of states that compiling the + */ + public RegexpQuery87(Term term, int syntax_flags, int match_flags, int maxDeterminizedStates) { + this(term, syntax_flags, match_flags, defaultProvider, maxDeterminizedStates); + } + + /** + * Constructs a query for terms matching term. + * + * @param term regular expression. + * @param syntax_flags optional RegExp features from {@link RegExp87} + * @param provider custom AutomatonProvider for named automata + * @param maxDeterminizedStates maximum number of states that compiling the + * automaton for the regexp can result in. Set higher to allow more complex + * queries and lower to prevent memory exhaustion. + */ + public RegexpQuery87(Term term, int syntax_flags, AutomatonProvider provider, + int maxDeterminizedStates) { + this(term, syntax_flags, 0, provider, maxDeterminizedStates); + } + + /** + * Constructs a query for terms matching term. + * + * @param term regular expression. + * @param syntax_flags optional RegExp features from {@link RegExp87} + * @param match_flags boolean 'or' of match behavior options such as case insensitivity + * @param provider custom AutomatonProvider for named automata + * @param maxDeterminizedStates maximum number of states that compiling the + * automaton for the regexp can result in. Set higher to allow more complex + * queries and lower to prevent memory exhaustion. + */ + public RegexpQuery87(Term term, int syntax_flags, int match_flags, AutomatonProvider provider, + int maxDeterminizedStates) { + super(term, + new RegExp87(term.text(), syntax_flags, match_flags).toAutomaton( + provider, maxDeterminizedStates), maxDeterminizedStates); + } + + /** Returns the regexp of this query wrapped in a Term. */ + public Term getRegexp() { + return term; + } + + /** Prints a user-readable version of this query. */ + @Override + public String toString(String field) { + StringBuilder buffer = new StringBuilder(); + if (!term.field().equals(field)) { + buffer.append(term.field()); + buffer.append(":"); + } + buffer.append('/'); + buffer.append(term.text()); + buffer.append('/'); + return buffer.toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e65e1256b55d5..a092aad559dc1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -149,7 +149,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_9_2 = new Version(7090299, org.apache.lucene.util.Version.LUCENE_8_6_2); public static final Version V_7_9_3 = new Version(7090399, org.apache.lucene.util.Version.LUCENE_8_6_2); public static final Version V_7_9_4 = new Version(7090499, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_10_0 = new Version(7100099, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_10_0 = new Version(7100099, org.apache.lucene.util.Version.LUCENE_8_6_3); public static final Version CURRENT = V_7_10_0; private static final ImmutableOpenIntMap idToVersion; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 3ca6582cc0b2f..8d01759e90c9d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -105,7 +105,7 @@ import java.util.Map; public class Lucene { - public static final String LATEST_CODEC = "Lucene87"; + public static final String LATEST_CODEC = "Lucene86"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index 522e3d1d715e3..ed6340f7d6b12 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -21,8 +21,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene87.Lucene87Codec; -import org.apache.lucene.codecs.lucene87.Lucene87StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene86.Lucene86Codec; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.index.mapper.MapperService; @@ -47,8 +47,8 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene87Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene87Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene86Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene86Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index fda96bbd9335f..e900351fcbebd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -22,8 +22,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene87.Lucene87StoredFieldsFormat; -import org.apache.lucene.codecs.lucene87.Lucene87Codec; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; +import org.apache.lucene.codecs.lucene86.Lucene86Codec; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -37,7 +37,7 @@ * per index in real time via the mapping API. If no specific postings format is * configured for a specific field the default postings format is used. */ -public class PerFieldMappingPostingFormatCodec extends Lucene87Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene86Codec { private final Logger logger; private final MapperService mapperService; @@ -46,7 +46,7 @@ public class PerFieldMappingPostingFormatCodec extends Lucene87Codec { "PerFieldMappingPostingFormatCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC; } - public PerFieldMappingPostingFormatCodec(Lucene87StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { + public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { super(compressionMode); this.mapperService = mapperService; this.logger = logger; diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d52335a517f00..3e2c394424570 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -180,18 +180,6 @@ public class InternalEngine extends Engine { private final KeyedLock noOpKeyedLock = new KeyedLock<>(); private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); - /** - * If multiple writes passed {@link InternalEngine#tryAcquireInFlightDocs(Operation, int)} but they haven't adjusted - * {@link IndexWriter#getPendingNumDocs()} yet, then IndexWriter can fail with too many documents. In this case, we have to fail - * the engine because we already generated sequence numbers for write operations; otherwise we will have gaps in sequence numbers. - * To avoid this, we keep track the number of documents that are being added to IndexWriter, and account it in - * {@link InternalEngine#tryAcquireInFlightDocs(Operation, int)}. Although we can double count some inFlight documents in IW and Engine, - * this shouldn't be an issue because it happens for a short window and we adjust the inFlightDocCount once an indexing is completed. - */ - private final AtomicLong inFlightDocCount = new AtomicLong(); - - private final int maxDocs; - @Nullable private final String historyUUID; @@ -202,12 +190,13 @@ public class InternalEngine extends Engine { private volatile String forceMergeUUID; public InternalEngine(EngineConfig engineConfig) { - this(engineConfig, IndexWriter.MAX_DOCS, LocalCheckpointTracker::new); + this(engineConfig, LocalCheckpointTracker::new); } - InternalEngine(EngineConfig engineConfig, int maxDocs, BiFunction localCheckpointTrackerSupplier) { + InternalEngine( + final EngineConfig engineConfig, + final BiFunction localCheckpointTrackerSupplier) { super(engineConfig); - this.maxDocs = maxDocs; if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { updateAutoIdTimestamp(Long.MAX_VALUE, true); } @@ -890,7 +879,6 @@ public IndexResult index(Index index) throws IOException { try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); assert assertIncomingSequenceNumber(index.origin(), index.seqNo()); - int reservedDocs = 0; try (Releasable ignored = versionMap.acquireLock(index.uid().bytes()); Releasable indexThrottle = doThrottle ? throttle.acquireThrottle() : () -> {}) { lastWriteNanos = index.startTime(); @@ -921,11 +909,9 @@ public IndexResult index(Index index) throws IOException { * or calls updateDocument. */ final IndexingStrategy plan = indexingStrategyForOperation(index); - reservedDocs = plan.reservedDocs; final IndexResult indexResult; if (plan.earlyResultOnPreFlightError.isPresent()) { - assert index.origin() == Operation.Origin.PRIMARY : index.origin(); indexResult = plan.earlyResultOnPreFlightError.get(); assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType(); } else { @@ -980,8 +966,6 @@ public IndexResult index(Index index) throws IOException { indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.freeze(); return indexResult; - } finally { - releaseInFlightDocs(reservedDocs); } } catch (RuntimeException | IOException e) { try { @@ -1020,14 +1004,14 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO } else if (maxSeqNoOfUpdatesOrDeletes <= localCheckpointTracker.getProcessedCheckpoint()) { // see Engine#getMaxSeqNoOfUpdatesOrDeletes for the explanation of the optimization using sequence numbers assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : index.seqNo() + ">=" + maxSeqNoOfUpdatesOrDeletes; - plan = IndexingStrategy.optimizedAppendOnly(index.version(), 0); + plan = IndexingStrategy.optimizedAppendOnly(index.version()); } else { versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = IndexingStrategy.processAsStaleOp(softDeleteEnabled, index.version()); } else { - plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.version(), 0); + plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.version()); } } return plan; @@ -1044,17 +1028,11 @@ protected IndexingStrategy indexingStrategyForOperation(final Index index) throw private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); - final int reservingDocs = index.parsedDoc().docs().size(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay final boolean canOptimizeAddDocument = canOptimizeAddDocument(index); if (canOptimizeAddDocument && mayHaveBeenIndexedBefore(index) == false) { - final Exception reserveError = tryAcquireInFlightDocs(index, reservingDocs); - if (reserveError != null) { - plan = IndexingStrategy.failAsTooManyDocs(reserveError); - } else { - plan = IndexingStrategy.optimizedAppendOnly(1L, reservingDocs); - } + plan = IndexingStrategy.optimizedAppendOnly(1L); } else { versionMap.enforceSafeAccess(); // resolves incoming version @@ -1086,14 +1064,9 @@ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion); } else { - final Exception reserveError = tryAcquireInFlightDocs(index, reservingDocs); - if (reserveError != null) { - plan = IndexingStrategy.failAsTooManyDocs(reserveError); - } else { - plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted, - canOptimizeAddDocument ? 1L : index.versionType().updateVersion(currentVersion, index.version()), - reservingDocs); - } + plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted, + canOptimizeAddDocument ? 1L : index.versionType().updateVersion(currentVersion, index.version()) + ); } } return plan; @@ -1205,55 +1178,53 @@ protected static final class IndexingStrategy { final long versionForIndexing; final boolean indexIntoLucene; final boolean addStaleOpToLucene; - final int reservedDocs; final Optional earlyResultOnPreFlightError; private IndexingStrategy(boolean currentNotFoundOrDeleted, boolean useLuceneUpdateDocument, boolean indexIntoLucene, boolean addStaleOpToLucene, - long versionForIndexing, int reservedDocs, IndexResult earlyResultOnPreFlightError) { + long versionForIndexing, IndexResult earlyResultOnPreFlightError) { assert useLuceneUpdateDocument == false || indexIntoLucene : "use lucene update is set to true, but we're not indexing into lucene"; assert (indexIntoLucene && earlyResultOnPreFlightError != null) == false : "can only index into lucene or have a preflight result but not both." + "indexIntoLucene: " + indexIntoLucene + " earlyResultOnPreFlightError:" + earlyResultOnPreFlightError; - assert reservedDocs == 0 || indexIntoLucene || addStaleOpToLucene : reservedDocs; this.currentNotFoundOrDeleted = currentNotFoundOrDeleted; this.useLuceneUpdateDocument = useLuceneUpdateDocument; this.versionForIndexing = versionForIndexing; this.indexIntoLucene = indexIntoLucene; this.addStaleOpToLucene = addStaleOpToLucene; - this.reservedDocs = reservedDocs; this.earlyResultOnPreFlightError = earlyResultOnPreFlightError == null ? Optional.empty() : Optional.of(earlyResultOnPreFlightError); } - static IndexingStrategy optimizedAppendOnly(long versionForIndexing, int reservedDocs) { - return new IndexingStrategy(true, false, true, false, versionForIndexing, reservedDocs, null); + static IndexingStrategy optimizedAppendOnly(long versionForIndexing) { + return new IndexingStrategy(true, false, true, false, versionForIndexing, null); } public static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion) { final IndexResult result = new IndexResult(e, currentVersion); - return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, Versions.NOT_FOUND, 0, result); + return new IndexingStrategy( + currentNotFoundOrDeleted, false, false, false, + Versions.NOT_FOUND, result); } - static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, long versionForIndexing, int reservedDocs) { + static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, + long versionForIndexing) { return new IndexingStrategy(currentNotFoundOrDeleted, currentNotFoundOrDeleted == false, - true, false, versionForIndexing, reservedDocs, null); + true, false, versionForIndexing, null); } public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) { - return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, versionForIndexing, 0, null); + return new IndexingStrategy(currentNotFoundOrDeleted, false, false, + false, versionForIndexing, null); } static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionForIndexing) { - return new IndexingStrategy(false, false, false, addStaleOpToLucene, versionForIndexing, 0, null); - } - static IndexingStrategy failAsTooManyDocs(Exception e) { - final IndexResult result = new IndexResult(e, Versions.NOT_FOUND); - return new IndexingStrategy(false, false, false, false, Versions.NOT_FOUND, 0, result); + return new IndexingStrategy(false, false, false, + addStaleOpToLucene, versionForIndexing, null); } } @@ -1304,15 +1275,13 @@ public DeleteResult delete(Delete delete) throws IOException { assert Objects.equals(delete.uid().field(), IdFieldMapper.NAME) : delete.uid().field(); assert assertIncomingSequenceNumber(delete.origin(), delete.seqNo()); final DeleteResult deleteResult; - int reservedDocs = 0; // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: try (ReleasableLock ignored = readLock.acquire(); Releasable ignored2 = versionMap.acquireLock(delete.uid().bytes())) { ensureOpen(); lastWriteNanos = delete.startTime(); final DeletionStrategy plan = deletionStrategyForOperation(delete); - reservedDocs = plan.reservedDocs; + if (plan.earlyResultOnPreflightError.isPresent()) { - assert delete.origin() == Operation.Origin.PRIMARY : delete.origin(); deleteResult = plan.earlyResultOnPreflightError.get(); } else { // generate or register sequence number @@ -1354,36 +1323,11 @@ public DeleteResult delete(Delete delete) throws IOException { e.addSuppressed(inner); } throw e; - } finally { - releaseInFlightDocs(reservedDocs); } maybePruneDeletes(); return deleteResult; } - private Exception tryAcquireInFlightDocs(Operation operation, int addingDocs) { - assert operation.origin() == Operation.Origin.PRIMARY : operation; - assert operation.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO : operation; - assert addingDocs > 0 : addingDocs; - final long totalDocs = indexWriter.getPendingNumDocs() + inFlightDocCount.addAndGet(addingDocs); - if (totalDocs > maxDocs) { - releaseInFlightDocs(addingDocs); - return new IllegalArgumentException("Number of documents in the index can't exceed [" + maxDocs + "]"); - } else { - return null; - } - } - - private void releaseInFlightDocs(int numDocs) { - assert numDocs >= 0 : numDocs; - final long newValue = inFlightDocCount.addAndGet(-numDocs); - assert newValue >= 0 : "inFlightDocCount must not be negative [" + newValue + "]"; - } - - long getInFlightDocCount() { - return inFlightDocCount.get(); - } - protected DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { if (delete.origin() == Operation.Origin.PRIMARY) { return planDeletionAsPrimary(delete); @@ -1410,7 +1354,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version()); } else { - plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version(), 0); + plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version()); } } return plan; @@ -1450,13 +1394,7 @@ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, currentlyDeleted); } else { - final Exception reserveError = tryAcquireInFlightDocs(delete, 1); - if (reserveError != null) { - plan = DeletionStrategy.failAsTooManyDocs(reserveError); - } else { - final long versionOfDeletion = delete.versionType().updateVersion(currentVersion, delete.version()); - plan = DeletionStrategy.processNormally(currentlyDeleted, versionOfDeletion, 1); - } + plan = DeletionStrategy.processNormally(currentlyDeleted, delete.versionType().updateVersion(currentVersion, delete.version())); } return plan; } @@ -1516,10 +1454,9 @@ protected static final class DeletionStrategy { final boolean currentlyDeleted; final long versionOfDeletion; final Optional earlyResultOnPreflightError; - final int reservedDocs; private DeletionStrategy(boolean deleteFromLucene, boolean addStaleOpToLucene, boolean currentlyDeleted, - long versionOfDeletion, int reservedDocs, DeleteResult earlyResultOnPreflightError) { + long versionOfDeletion, DeleteResult earlyResultOnPreflightError) { assert (deleteFromLucene && earlyResultOnPreflightError != null) == false : "can only delete from lucene or have a preflight result but not both." + "deleteFromLucene: " + deleteFromLucene @@ -1528,8 +1465,6 @@ private DeletionStrategy(boolean deleteFromLucene, boolean addStaleOpToLucene, b this.addStaleOpToLucene = addStaleOpToLucene; this.currentlyDeleted = currentlyDeleted; this.versionOfDeletion = versionOfDeletion; - this.reservedDocs = reservedDocs; - assert reservedDocs == 0 || deleteFromLucene || addStaleOpToLucene : reservedDocs; this.earlyResultOnPreflightError = earlyResultOnPreflightError == null ? Optional.empty() : Optional.of(earlyResultOnPreflightError); } @@ -1538,26 +1473,20 @@ public static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, boolean currentlyDeleted) { final DeleteResult deleteResult = new DeleteResult(e, currentVersion, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_SEQ_NO, currentlyDeleted == false); - return new DeletionStrategy(false, false, currentlyDeleted, Versions.NOT_FOUND, 0, deleteResult); + return new DeletionStrategy(false, false, currentlyDeleted, Versions.NOT_FOUND, deleteResult); } - static DeletionStrategy processNormally(boolean currentlyDeleted, long versionOfDeletion, int reservedDocs) { - return new DeletionStrategy(true, false, currentlyDeleted, versionOfDeletion, reservedDocs, null); + static DeletionStrategy processNormally(boolean currentlyDeleted, long versionOfDeletion) { + return new DeletionStrategy(true, false, currentlyDeleted, versionOfDeletion, null); } public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, long versionOfDeletion) { - return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, 0, null); + return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null); } static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, 0, null); - } - - static DeletionStrategy failAsTooManyDocs(Exception e) { - final DeleteResult deleteResult = new DeleteResult(e, Versions.NOT_FOUND, - SequenceNumbers.UNASSIGNED_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_SEQ_NO, false); - return new DeletionStrategy(false, false, false, Versions.NOT_FOUND, 0, deleteResult); + return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 09e4f6e060b85..ab537ebf09f17 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -23,10 +23,8 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.comparators.DoubleComparator; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.BigArrays; @@ -85,20 +83,14 @@ public FieldComparator newComparator(String fieldname, int numHits, int sortP final double dMissingValue = (Double) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new DoubleComparator(numHits, null, null, reversed, sortPos) { + return new FieldComparator.DoubleComparator(numHits, null, null) { @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new DoubleLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { - return DoubleValuesComparatorSource.this.getNumericDocValues(context, dMissingValue).getRawDoubleValues(); - } - - @Override - public void setScorer(Scorable scorer) { - DoubleValuesComparatorSource.this.setScorer(scorer); - } - }; + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return DoubleValuesComparatorSource.this.getNumericDocValues(context, dMissingValue).getRawDoubleValues(); + } + @Override + public void setScorer(Scorable scorer) { + DoubleValuesComparatorSource.this.setScorer(scorer); } }; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 265002be7aab8..1f646dcba058c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -22,10 +22,8 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.comparators.FloatComparator; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.BigArrays; @@ -78,15 +76,10 @@ public FieldComparator newComparator(String fieldname, int numHits, int sortP final float fMissingValue = (Float) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new FloatComparator(numHits, null, null, reversed, sortPos) { + return new FieldComparator.FloatComparator(numHits, null, null) { @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new FloatLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { - return FloatValuesComparatorSource.this.getNumericDocValues(context, fMissingValue).getRawFloatValues(); - } - }; + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return FloatValuesComparatorSource.this.getNumericDocValues(context, fMissingValue).getRawFloatValues(); } }; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 9b843c6db56bd..33e1ab33736b8 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -23,9 +23,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.BigArrays; @@ -97,15 +95,10 @@ public FieldComparator newComparator(String fieldname, int numHits, int sortP final long lMissingValue = (Long) missingObject(missingValue, reversed); // NOTE: it's important to pass null as a missing value in the constructor so that // the comparator doesn't check docsWithField since we replace missing values in select() - return new LongComparator(numHits, null, null, reversed, sortPos) { + return new FieldComparator.LongComparator(numHits, null, null) { @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new LongLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { - return LongValuesComparatorSource.this.getNumericDocValues(context, lMissingValue); - } - }; + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return LongValuesComparatorSource.this.getNumericDocValues(context, lMissingValue); } }; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java index 11da7ad7c7fc8..7c2295c748934 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; @@ -155,7 +155,7 @@ public Query regexpQuery(String value, int syntaxFlags, int matchFlags, int maxD ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false."); } failIfNotIndexed(); - RegexpQuery query = new RegexpQuery(new Term(name(), indexedValueForSearch(value)), syntaxFlags, + RegexpQuery87 query = new RegexpQuery87(new Term(name(), indexedValueForSearch(value)), syntaxFlags, matchFlags, maxDeterminizedStates); if (method != null) { query.setRewriteMethod(method); diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java b/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java index 669c885276f46..37388cd5cf254 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.query; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.common.Strings; import java.util.Locale; @@ -43,37 +43,37 @@ public enum RegexpFlag { /** * Enables intersection of the form: {@code <expression> & <expression>} */ - INTERSECTION(RegExp.INTERSECTION), + INTERSECTION(RegExp87.INTERSECTION), /** * Enables complement expression of the form: {@code ~<expression>} */ - COMPLEMENT(RegExp.COMPLEMENT), + COMPLEMENT(RegExp87.COMPLEMENT), /** * Enables empty language expression: {@code #} */ - EMPTY(RegExp.EMPTY), + EMPTY(RegExp87.EMPTY), /** * Enables any string expression: {@code @} */ - ANYSTRING(RegExp.ANYSTRING), + ANYSTRING(RegExp87.ANYSTRING), /** * Enables numerical interval expression: {@code <n-m>} */ - INTERVAL(RegExp.INTERVAL), + INTERVAL(RegExp87.INTERVAL), /** * Disables all available option flags */ - NONE(RegExp.NONE), + NONE(RegExp87.NONE), /** * Enables all available option flags */ - ALL(RegExp.ALL); + ALL(RegExp87.ALL); final int value; @@ -110,9 +110,9 @@ public int value() { */ public static int resolveValue(String flags) { if (flags == null || flags.isEmpty()) { - return RegExp.ALL; + return RegExp87.ALL; } - int magic = RegExp.NONE; + int magic = RegExp87.NONE; for (String s : Strings.delimitedListToStringArray(flags, "|")) { if (s.isEmpty()) { continue; diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index 565997246aed1..8e35cd5d0b61b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -22,9 +22,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegExp87; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -279,17 +279,17 @@ protected Query doToQuery(QueryShardContext context) throws QueryShardException, } MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE); - int matchFlagsValue = caseInsensitive ? RegExp.ASCII_CASE_INSENSITIVE : 0; + int matchFlagsValue = caseInsensitive ? RegExp87.ASCII_CASE_INSENSITIVE : 0; Query query = null; // For BWC we mask irrelevant bits (RegExp changed ALL from 0xffff to 0xff) - int sanitisedSyntaxFlag = syntaxFlagsValue & RegExp.ALL; + int sanitisedSyntaxFlag = syntaxFlagsValue & RegExp87.ALL; MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType != null) { query = fieldType.regexpQuery(value, sanitisedSyntaxFlag, matchFlagsValue, maxDeterminizedStates, method, context); } if (query == null) { - RegexpQuery regexpQuery = new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), sanitisedSyntaxFlag, + RegexpQuery87 regexpQuery = new RegexpQuery87(new Term(fieldName, BytesRefs.toBytesRef(value)), sanitisedSyntaxFlag, matchFlagsValue, maxDeterminizedStates); if (method != null) { regexpQuery.setRewriteMethod(method); diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 117e238be2a86..70b59036c0207 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -43,7 +43,7 @@ import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; @@ -733,7 +733,7 @@ private Query getRegexpQuerySingle(String field, String termStr) throws ParseExc setAnalyzer(forceAnalyzer); return super.getRegexpQuery(field, termStr); } - return currentFieldType.regexpQuery(termStr, RegExp.ALL, 0, getMaxDeterminizedStates(), + return currentFieldType.regexpQuery(termStr, RegExp87.ALL, 0, getMaxDeterminizedStates(), getMultiTermRewriteMethod(), context); } catch (RuntimeException e) { if (lenient) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 2791e1edd72c7..9e476488fb871 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -41,7 +40,6 @@ import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.Bits; import org.apache.lucene.util.RoaringDocIdSet; import org.elasticsearch.common.lease.Releasables; @@ -292,47 +290,40 @@ public int hashCode() { @Override public FieldComparator getComparator(int numHits, int sortPos) { - return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), sortPos) { + return new FieldComparator.LongComparator(1, delegate.getField(), (Long) missingValue) { @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new LongLeafComparator(context) { + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + NumericDocValues dvs = SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), + delegate.getSelector(), delegate.getNumericType()); + return new NumericDocValues() { @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) - throws IOException { - NumericDocValues dvs = SortedNumericSelector.wrap( - DocValues.getSortedNumeric(context.reader(), field), - delegate.getSelector(), delegate.getNumericType()); - return new NumericDocValues() { - @Override - public long longValue() throws IOException { - return round.applyAsLong(dvs.longValue()); - } - - @Override - public boolean advanceExact(int target) throws IOException { - return dvs.advanceExact(target); - } - - @Override - public int docID() { - return dvs.docID(); - } - - @Override - public int nextDoc() throws IOException { - return dvs.nextDoc(); - } - - @Override - public int advance(int target) throws IOException { - return dvs.advance(target); - } - - @Override - public long cost() { - return dvs.cost(); - } - }; + public long longValue() throws IOException { + return round.applyAsLong(dvs.longValue()); + } + + @Override + public boolean advanceExact(int target) throws IOException { + return dvs.advanceExact(target); + } + + @Override + public int docID() { + return dvs.docID(); + } + + @Override + public int nextDoc() throws IOException { + return dvs.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + return dvs.advance(target); + } + + @Override + public long cost() { + return dvs.cost(); } }; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 30653f04a355a..24554b0936e9c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -34,7 +34,7 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -322,7 +322,7 @@ public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) thro } - private final RegExp include, exclude; + private final RegExp87 include, exclude; private final SortedSet includeValues, excludeValues; private final int incZeroBasedPartition; private final int incNumPartitions; @@ -331,7 +331,7 @@ public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) thro * @param include The regular expression pattern for the terms to be included * @param exclude The regular expression pattern for the terms to be excluded */ - public IncludeExclude(RegExp include, RegExp exclude) { + public IncludeExclude(RegExp87 include, RegExp87 exclude) { if (include == null && exclude == null) { throw new IllegalArgumentException(); } @@ -344,7 +344,7 @@ public IncludeExclude(RegExp include, RegExp exclude) { } public IncludeExclude(String include, String exclude) { - this(include == null ? null : new RegExp(include), exclude == null ? null : new RegExp(exclude)); + this(include == null ? null : new RegExp87(include), exclude == null ? null : new RegExp87(exclude)); } /** @@ -400,9 +400,9 @@ public IncludeExclude(StreamInput in) throws IOException { incZeroBasedPartition = 0; incNumPartitions = 0; String includeString = in.readOptionalString(); - include = includeString == null ? null : new RegExp(includeString); + include = includeString == null ? null : new RegExp87(includeString); String excludeString = in.readOptionalString(); - exclude = excludeString == null ? null : new RegExp(excludeString); + exclude = excludeString == null ? null : new RegExp87(excludeString); return; } include = null; diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 3e3b9dc7f2ca4..cad13b83397ac 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -24,9 +24,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.comparators.DoubleComparator; import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -701,15 +699,10 @@ private NumericDoubleValues getNumericDoubleValues(LeafReaderContext context) th @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { - return new DoubleComparator(numHits, null, null, reversed, sortPos) { + return new FieldComparator.DoubleComparator(numHits, null, null) { @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new DoubleLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { - return getNumericDoubleValues(context).getRawDoubleValues(); - } - }; + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return getNumericDoubleValues(context).getRawDoubleValues(); } }; } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java index da42ea6e0cbb2..5495809fb26bb 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest.completion; import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -142,7 +142,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Options for regular expression queries */ public static class Builder { - private int flagsValue = RegExp.ALL; + private int flagsValue = RegExp87.ALL; private int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; public Builder() { diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index aa86888cdefe9..93c7258cfaa56 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -21,9 +21,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene87.Lucene87Codec; -import org.apache.lucene.codecs.lucene87.Lucene87StoredFieldsFormat; -import org.apache.lucene.codecs.lucene87.Lucene87StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene86.Lucene86Codec; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; +import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -53,8 +53,8 @@ public class CodecTests extends ESTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene87Codec.class)); - assertThat(codecService.codec("Lucene87"), instanceOf(Lucene87Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene86Codec.class)); + assertThat(codecService.codec("Lucene86"), instanceOf(Lucene86Codec.class)); } public void testDefault() throws Exception { @@ -78,7 +78,7 @@ private void assertCompressionEquals(Mode expected, Codec actual) throws Excepti iw.close(); DirectoryReader ir = DirectoryReader.open(dir); SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); - String v = sr.getSegmentInfo().info.getAttribute(Lucene87StoredFieldsFormat.MODE_KEY); + String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY); assertNotNull(v); assertEquals(expected, Mode.valueOf(v)); ir.close(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/elasticsearch/index/engine/CompletionStatsCacheTests.java index f53786c4fc1da..856c1d8653c84 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CompletionStatsCacheTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene87.Lucene87Codec; +import org.apache.lucene.codecs.lucene86.Lucene86Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -57,7 +57,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion84PostingsFormat(); - indexWriterConfig.setCodec(new Lucene87Codec() { + indexWriterConfig.setCodec(new Lucene86Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bacfa1d2d8549..169ec24caf893 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexWriterMaxDocsChanger; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -4387,7 +4386,7 @@ public void testNoOps() throws IOException { localCheckpoint); EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy())); - noOpEngine = new InternalEngine(noopEngineConfig, IndexWriter.MAX_DOCS, supplier) { + noOpEngine = new InternalEngine(noopEngineConfig, supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); @@ -6351,67 +6350,4 @@ public void testProducesStoredFieldsReader() throws Exception { } } } - - public void testMaxDocsOnPrimary() throws Exception { - engine.close(); - final boolean softDeleteEnabled = engine.config().getIndexSettings().isSoftDeleteEnabled(); - int maxDocs = randomIntBetween(1, 100); - IndexWriterMaxDocsChanger.setMaxDocs(maxDocs); - try { - engine = new InternalTestEngine(engine.config(), maxDocs, LocalCheckpointTracker::new); - int numDocs = between(maxDocs + 1, maxDocs * 2); - List operations = new ArrayList<>(numDocs); - for (int i = 0; i < numDocs; i++) { - final String id; - if (softDeleteEnabled == false || randomBoolean()) { - id = Integer.toString(randomInt(numDocs)); - operations.add(indexForDoc(createParsedDoc(id, null))); - } else { - id = "not_found"; - operations.add(new Engine.Delete("_doc", id, newUid(id), primaryTerm.get())); - } - } - for (int i = 0; i < numDocs; i++) { - final long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); - final Engine.Result result = applyOperation(engine, operations.get(i)); - if (i < maxDocs) { - assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); - assertNull(result.getFailure()); - assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo + 1L)); - } else { - assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE)); - assertNotNull(result.getFailure()); - assertThat(result.getFailure().getMessage(), - containsString("Number of documents in the index can't exceed [" + maxDocs + "]")); - assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); - assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); - } - assertFalse(engine.isClosed.get()); - } - } finally { - IndexWriterMaxDocsChanger.restoreMaxDocs(); - } - } - - public void testMaxDocsOnReplica() throws Exception { - assumeTrue("Deletes do not add documents to Lucene with soft-deletes disabled", - engine.config().getIndexSettings().isSoftDeleteEnabled()); - engine.close(); - int maxDocs = randomIntBetween(1, 100); - IndexWriterMaxDocsChanger.setMaxDocs(maxDocs); - try { - engine = new InternalTestEngine(engine.config(), maxDocs, LocalCheckpointTracker::new); - int numDocs = between(maxDocs + 1, maxDocs * 2); - List operations = generateHistoryOnReplica(numDocs, randomBoolean(), randomBoolean(), randomBoolean()); - final IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { - for (Engine.Operation op : operations) { - applyOperation(engine, op); - } - }); - assertThat(error.getMessage(), containsString("number of documents in the index cannot exceed " + maxDocs)); - assertTrue(engine.isClosed.get()); - } finally { - IndexWriterMaxDocsChanger.restoreMaxDocs(); - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index dde6eb14ed3d5..73276a024ad3a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -32,7 +32,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.Operations; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; @@ -717,7 +717,7 @@ public void testRegexQueryType() throws Exception { Mapper fieldMapper = defaultMapper.mappers().getMapper("field"); CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper; Query prefixQuery = completionFieldMapper.fieldType() - .regexpQuery(new BytesRef("co"), RegExp.ALL, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + .regexpQuery(new BytesRef("co"), RegExp87.ALL, Operations.DEFAULT_MAX_DETERMINIZED_STATES); assertThat(prefixQuery, instanceOf(RegexCompletionQuery.class)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java index cb9a75e523a6b..c864a9c7742ce 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; @@ -44,7 +44,7 @@ public void testPrefixQuery() { public void testRegexpQuery() { MappedFieldType ft = IgnoredFieldMapper.IgnoredFieldType.INSTANCE; - Query expected = new RegexpQuery(new Term("_ignored", new BytesRef("foo?"))); + Query expected = new RegexpQuery87(new Term("_ignored", new BytesRef("foo?"))); assertEquals(expected, ft.regexpQuery("foo?", 0, 0, 10, null, MOCK_QSC)); ElasticsearchException ee = expectThrows(ElasticsearchException.class, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index 5be4da8af95bf..c057dd98ed41a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.NormsFieldExistsQuery; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; @@ -142,7 +142,7 @@ public void testRangeQuery() { public void testRegexpQuery() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new RegexpQuery(new Term("field","foo.*")), + assertEquals(new RegexpQuery87(new Term("field","foo.*")), ft.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC)); MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldTypeTests.java index 6055aad825bce..d4af02d356767 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldTypeTests.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; @@ -43,7 +43,7 @@ public void testPrefixQuery() { public void testRegexpQuery() { MappedFieldType ft = RoutingFieldMapper.RoutingFieldType.INSTANCE; - Query expected = new RegexpQuery(new Term("_routing", new BytesRef("foo?"))); + Query expected = new RegexpQuery87(new Term("_routing", new BytesRef("foo?"))); assertEquals(expected, ft.regexpQuery("foo?", 0, 0, 10, null, MOCK_QSC)); ElasticsearchException ee = expectThrows(ElasticsearchException.class, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java index 277d6b8101dba..85738029533c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; @@ -101,7 +101,7 @@ public void testRangeQuery() { public void testRegexpQuery() { MappedFieldType ft = createFieldType(); - assertEquals(new RegexpQuery(new Term("field","foo.*")), + assertEquals(new RegexpQuery87(new Term("field","foo.*")), ft.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC)); MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 3e3e0f874be19..d6a65a08503a8 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -40,7 +40,7 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; @@ -747,8 +747,8 @@ public void testToQueryRegExpQuery() throws Exception { Query query = queryStringQuery("/foo*bar/").defaultField(TEXT_FIELD_NAME) .maxDeterminizedStates(5000) .toQuery(createShardContext()); - assertThat(query, instanceOf(RegexpQuery.class)); - RegexpQuery regexpQuery = (RegexpQuery) query; + assertThat(query, instanceOf(RegexpQuery87.class)); + RegexpQuery87 regexpQuery = (RegexpQuery87) query; assertTrue(regexpQuery.toString().contains("/foo*bar/")); } diff --git a/server/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java index 5a3ba3b0940a9..314565bd2ebdd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RegexpQueryBuilderTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.elasticsearch.common.ParsingException; import org.elasticsearch.test.AbstractQueryTestCase; @@ -80,8 +80,8 @@ private static RegexpQueryBuilder randomRegexpQuery() { @Override protected void doAssertLuceneQuery(RegexpQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - assertThat(query, instanceOf(RegexpQuery.class)); - RegexpQuery regexpQuery = (RegexpQuery) query; + assertThat(query, instanceOf(RegexpQuery87.class)); + RegexpQuery87 regexpQuery = (RegexpQuery87) query; String expectedFieldName = expectedFieldName( queryBuilder.fieldName()); assertThat(regexpQuery.getField(), equalTo(expectedFieldName)); diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java index 25674e2f2d6f2..7f98e354e48ba 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationTests.java @@ -30,11 +30,11 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.automaton.CompiledAutomaton; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.tasks.TaskCancelledException; @@ -129,7 +129,7 @@ public void testExitableDirectoryReader() throws IOException { ContextIndexSearcher searcher = new ContextIndexSearcher(reader, IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), true); searcher.addQueryCancellation(cancellation); - CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("a.*").toAutomaton()); + CompiledAutomaton automaton = new CompiledAutomaton(new RegExp87("a.*").toAutomaton()); expectThrows(TaskCancelledException.class, () -> searcher.getIndexReader().leaves().get(0).reader().terms(STRING_FIELD_NAME).iterator()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RareTermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RareTermsTests.java index bcd9b01f5cab3..6c59c427b4df7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RareTermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RareTermsTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; @@ -46,13 +46,13 @@ protected RareTermsAggregationBuilder createTestAggregatorBuilder() { IncludeExclude incExc = null; switch (randomInt(6)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude(new RegExp87("foobar"), null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, new RegExp87("foobaz")); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude(new RegExp87("foobar"), new RegExp87("foobaz")); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java index 1c270131ef401..bec3a3de49608 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; @@ -147,13 +147,13 @@ static IncludeExclude getIncludeExclude() { IncludeExclude incExc = null; switch (randomInt(5)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude(new RegExp87("foobar"), null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, new RegExp87("foobaz")); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude(new RegExp87("foobar"), new RegExp87("foobaz")); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index bbf8d1d81f80e..5b61bfcc7d87e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.BucketOrder; @@ -105,13 +105,13 @@ protected TermsAggregationBuilder createTestAggregatorBuilder() { IncludeExclude incExc = null; switch (randomInt(6)) { case 0: - incExc = new IncludeExclude(new RegExp("foobar"), null); + incExc = new IncludeExclude(new RegExp87("foobar"), null); break; case 1: - incExc = new IncludeExclude(null, new RegExp("foobaz")); + incExc = new IncludeExclude(null, new RegExp87("foobaz")); break; case 2: - incExc = new IncludeExclude(new RegExp("foobar"), new RegExp("foobaz")); + incExc = new IncludeExclude(new RegExp87("foobar"), new RegExp87("foobaz")); break; case 3: SortedSet includeValues = new TreeSet<>(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java index 9f39e0e896d3a..f0e6ad655f63d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java @@ -26,9 +26,9 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.Numbers; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -83,7 +83,7 @@ public void testMatchAllDocs() throws IOException { } public void testBadIncludeExclude() throws IOException { - IncludeExclude includeExclude = new IncludeExclude(new RegExp("foo"), null); + IncludeExclude includeExclude = new IncludeExclude(new RegExp87("foo"), null); // Make sure the include/exclude fails regardless of how the user tries to type hint the agg AggregationExecutionException e = expectThrows(AggregationExecutionException.class, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java index 174a8a1aed6e8..e2afb0921a929 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java @@ -28,8 +28,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -99,7 +99,7 @@ public void testMatchAllDocs() throws IOException { } public void testBadIncludeExclude() throws IOException { - IncludeExclude includeExclude = new IncludeExclude(new RegExp("foo"), null); + IncludeExclude includeExclude = new IncludeExclude(new RegExp87("foo"), null); // Numerics don't support any regex include/exclude, so should fail no matter what we do diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index cc1d90e7d04e6..66ef309ed14b0 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -74,7 +74,6 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util.bkd.BKDConfig; import org.apache.lucene.util.bkd.BKDReader; import org.apache.lucene.util.bkd.BKDWriter; import org.elasticsearch.action.search.SearchShardTask; @@ -670,11 +669,11 @@ public void testDisableTopScoreCollection() throws Exception { context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[]{DocValueFormat.RAW})); topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false); - assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS); + assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES); QueryPhase.executeInternal(context); assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3)); - assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO); reader.close(); dir.close(); @@ -781,9 +780,8 @@ public void testIndexHasDuplicateData() throws IOException { int maxPointsInLeafNode = 40; float duplicateRatio = 0.7f; long duplicateValue = randomLongBetween(-10000000L, 10000000L); - BKDConfig config = new BKDConfig(1, 1, 8, maxPointsInLeafNode); try (Directory dir = newDirectory()) { - BKDWriter w = new BKDWriter(docsCount, dir, "tmp", config, 1, docsCount); + BKDWriter w = new BKDWriter(docsCount, dir, "tmp", 1, 1, 8, maxPointsInLeafNode, 1, docsCount); byte[] longBytes = new byte[8]; for (int docId = 0; docId < docsCount; docId++) { long value = randomFloat() < duplicateRatio ? duplicateValue : randomLongBetween(-10000000L, 10000000L); @@ -809,9 +807,8 @@ public void testIndexHasNoDuplicateData() throws IOException { int maxPointsInLeafNode = 40; float duplicateRatio = 0.3f; long duplicateValue = randomLongBetween(-10000000L, 10000000L); - BKDConfig config = new BKDConfig(1, 1, 8, maxPointsInLeafNode); try (Directory dir = newDirectory()) { - BKDWriter w = new BKDWriter(docsCount, dir, "tmp", config, 1, docsCount); + BKDWriter w = new BKDWriter(docsCount, dir, "tmp", 1, 1, 8, maxPointsInLeafNode, 1, docsCount); byte[] longBytes = new byte[8]; for (int docId = 0; docId < docsCount; docId++) { long value = randomFloat() < duplicateRatio ? duplicateValue : randomLongBetween(-10000000L, 10000000L); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 33aa6bd09a1a4..868ca1ccdcd92 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -270,14 +270,12 @@ public void tearDown() throws Exception { if (engine != null && engine.isClosed.get() == false) { engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); - assertNoInFlightDocuments(engine); assertMaxSeqNoInCommitUserData(engine); assertAtMostOneLuceneDocumentPerSequenceNumber(engine); } if (replicaEngine != null && replicaEngine.isClosed.get() == false) { replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); - assertNoInFlightDocuments(replicaEngine); assertMaxSeqNoInCommitUserData(replicaEngine); assertAtMostOneLuceneDocumentPerSequenceNumber(replicaEngine); } @@ -288,6 +286,7 @@ public void tearDown() throws Exception { } } + protected static ParseContext.Document testDocumentWithTextField() { return testDocumentWithTextField("test"); } @@ -532,10 +531,6 @@ protected InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFa return internalEngine; } - public static InternalEngine createEngine(EngineConfig engineConfig, int maxDocs) { - return new InternalEngine(engineConfig, maxDocs, LocalCheckpointTracker::new); - } - @FunctionalInterface public interface IndexWriterFactory { @@ -573,7 +568,7 @@ protected long doGenerateSeqNoForOperation(final Operation operation) { } }; } else { - return new InternalTestEngine(config, IndexWriter.MAX_DOCS, localCheckpointTrackerSupplier) { + return new InternalTestEngine(config, localCheckpointTrackerSupplier) { @Override IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { return (indexWriterFactory != null) ? @@ -1243,16 +1238,4 @@ static long maxSeqNosInReader(DirectoryReader reader) throws IOException { public static long getNumVersionLookups(Engine engine) { return ((InternalEngine) engine).getNumVersionLookups(); } - - public static long getInFlightDocCount(Engine engine) { - if (engine instanceof InternalEngine) { - return ((InternalEngine) engine).getInFlightDocCount(); - } else { - return 0; - } - } - - public static void assertNoInFlightDocuments(Engine engine) throws Exception { - assertBusy(() -> assertThat(getInFlightDocCount(engine), equalTo(0L))); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/InternalTestEngine.java b/test/framework/src/main/java/org/elasticsearch/index/engine/InternalTestEngine.java index d31fe609e6203..8c52d57aabc39 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/InternalTestEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/InternalTestEngine.java @@ -37,9 +37,8 @@ class InternalTestEngine extends InternalEngine { super(engineConfig); } - InternalTestEngine(EngineConfig engineConfig, int maxDocs, - BiFunction localCheckpointTrackerSupplier) { - super(engineConfig, maxDocs, localCheckpointTrackerSupplier); + InternalTestEngine(EngineConfig engineConfig, BiFunction localCheckpointTrackerSupplier) { + super(engineConfig, localCheckpointTrackerSupplier); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 4bd26fe96c929..dd092d8a528fc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1464,24 +1464,6 @@ public void assertConsistentHistoryBetweenTranslogAndLuceneIndex() throws IOExce } } - public void assertNoInFlightDocsInEngine() throws Exception { - assertBusy(() -> { - for (String nodeName : getNodeNames()) { - IndicesService indexServices = getInstance(IndicesService.class, nodeName); - for (IndexService indexService : indexServices) { - for (IndexShard indexShard : indexService) { - try { - final Engine engine = IndexShardTestCase.getEngine(indexShard); - assertThat(indexShard.routingEntry().toString(), EngineTestCase.getInFlightDocCount(engine), equalTo(0L)); - } catch (AlreadyClosedException ignored) { - // shard is closed - } - } - } - } - }); - } - private IndexShard getShardOrNull(ClusterState clusterState, ShardRouting shardRouting) { if (shardRouting == null || shardRouting.assignedToNode() == false) { return null; @@ -2548,10 +2530,9 @@ public void ensureEstimatedStats() { } @Override - public synchronized void assertAfterTest() throws Exception { + public synchronized void assertAfterTest() throws IOException { super.assertAfterTest(); assertRequestsFinished(); - assertNoInFlightDocsInEngine(); for (NodeAndClient nodeAndClient : nodes.values()) { NodeEnvironment env = nodeAndClient.node().getNodeEnvironment(); Set shardIds = env.lockedShards(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 9bb633b2efe56..b11841cffc6ae 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -90,7 +90,7 @@ public void beforeIndexDeletion() throws Exception { /** * This method checks all the things that need to be checked after each test */ - public void assertAfterTest() throws Exception { + public void assertAfterTest() throws IOException { ensureEstimatedStats(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java index 6fc82a528e29a..f429e0c46478b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -61,13 +61,14 @@ import java.util.function.Supplier; import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.FIELDS_EXTENSION; -import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.INDEX_EXTENSION; -import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.META_EXTENSION; +import static org.apache.lucene.codecs.compressing.CompressingStoredFieldsWriter.INDEX_EXTENSION_PREFIX; +import static org.apache.lucene.codecs.compressing.FieldsIndexWriter.FIELDS_INDEX_EXTENSION_SUFFIX; +import static org.apache.lucene.codecs.compressing.FieldsIndexWriter.FIELDS_META_EXTENSION_SUFFIX; public class SourceOnlySnapshot { - private static final String FIELDS_INDEX_EXTENSION = INDEX_EXTENSION; - private static final String FIELDS_META_EXTENSION = META_EXTENSION; + private static final String FIELDS_INDEX_EXTENSION = INDEX_EXTENSION_PREFIX + FIELDS_INDEX_EXTENSION_SUFFIX; + private static final String FIELDS_META_EXTENSION = INDEX_EXTENSION_PREFIX + FIELDS_META_EXTENSION_SUFFIX; private final LinkedFilesDirectory targetDirectory; private final Supplier deleteByQuerySupplier; diff --git a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/resources/config.properties b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/resources/config.properties index d6298f90ac1e3..952ece6860ae7 100644 --- a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/resources/config.properties +++ b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/resources/config.properties @@ -8,7 +8,7 @@ index_name=mitre fetch_size=1000 size=2000 gcs_repo_name=eql_correctness_gcs_repo -gcs_snapshot_name=mitre-snapshot_7.10 -gcs_bucket_name=matriv-gcs -gcs_base_path=mitre-data +gcs_snapshot_name=correctness-snapshot_es7.10_lucene8.6.3 +gcs_bucket_name=eql-gcs +gcs_base_path=correctness-data gcs_client_name=eql_test diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index 6971982c40b8c..4176de696c699 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -11,12 +11,12 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.regex.Regex; @@ -212,7 +212,7 @@ public Query regexpQuery(String value, int syntaxFlags, int matchFlags, int maxD return new MatchNoDocsQuery(); } - final Automaton automaton = new RegExp(value, syntaxFlags, matchFlags).toAutomaton(maxDeterminizedStates); + final Automaton automaton = new RegExp87(value, syntaxFlags, matchFlags).toAutomaton(maxDeterminizedStates); final CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton); if (runAutomaton.run(this.value)) { return new MatchAllDocsQuery(); diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java index 224d650fbba4f..8a9c4fa737be6 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldTypeTests.java @@ -8,7 +8,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; @@ -98,10 +98,10 @@ public void testFuzzyQuery() { public void testRegexpQuery() { ConstantKeywordFieldType none = new ConstantKeywordFieldType("f", null); - assertEquals(new MatchNoDocsQuery(), none.regexpQuery("f..o", RegExp.ALL, 0, 10, null, null)); + assertEquals(new MatchNoDocsQuery(), none.regexpQuery("f..o", RegExp87.ALL, 0, 10, null, null)); ConstantKeywordFieldType ft = new ConstantKeywordFieldType("f", "foo"); - assertEquals(new MatchAllDocsQuery(), ft.regexpQuery("f.o", RegExp.ALL, 0, 10, null, null)); - assertEquals(new MatchNoDocsQuery(), ft.regexpQuery("f..o", RegExp.ALL, 0, 10, null, null)); + assertEquals(new MatchAllDocsQuery(), ft.regexpQuery("f.o", RegExp87.ALL, 0, 10, null, null)); + assertEquals(new MatchNoDocsQuery(), ft.regexpQuery("f..o", RegExp87.ALL, 0, 10, null, null)); } public void testFetchValue() throws Exception { diff --git a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/RootFlatObjectFieldTypeTests.java b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/RootFlatObjectFieldTypeTests.java index 8e7c11e3acd07..80e863e45f232 100644 --- a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/RootFlatObjectFieldTypeTests.java +++ b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/RootFlatObjectFieldTypeTests.java @@ -10,7 +10,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; @@ -103,7 +103,7 @@ public void testRangeQuery() { public void testRegexpQuery() { RootFlatObjectFieldType ft = createDefaultFieldType(); - Query expected = new RegexpQuery(new Term("field", "val.*")); + Query expected = new RegexpQuery87(new Term("field", "val.*")); Query actual = ft.regexpQuery("val.*", 0, 0, 10, null, MOCK_QSC); assertEquals(expected, actual); diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index b6a085f396658..a4549a9c1b7f8 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -18,7 +18,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.RegexpQuery87; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; @@ -168,7 +168,7 @@ public Query regexpQuery( "[regexp] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false." ); } - RegexpQuery query = new RegexpQuery(new Term(name(), new BytesRef(value)), syntaxFlags, matchFlags, maxDeterminizedStates) { + RegexpQuery87 query = new RegexpQuery87(new Term(name(), new BytesRef(value)), syntaxFlags, matchFlags, maxDeterminizedStates) { @Override protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/regex/RLikePattern.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/regex/RLikePattern.java index bcae2e1f05d75..6b2dfa5eb8110 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/regex/RLikePattern.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/regex/RLikePattern.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ql.expression.predicate.regex; import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.RegExp; +import org.apache.lucene.search.RegExp87; public class RLikePattern extends AbstractStringPattern { @@ -18,7 +18,7 @@ public RLikePattern(String regexpPattern) { @Override Automaton createAutomaton() { - return new RegExp(regexpPattern).toAutomaton(); + return new RegExp87(regexpPattern).toAutomaton(); } @Override diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.6.3.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.6.3.jar.sha1 new file mode 100644 index 0000000000000..bf4ca9e8cb6cb --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.6.3.jar.sha1 @@ -0,0 +1 @@ +b7acbdd00fc5552abbd30d61e14e6c0673e6b49e \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 deleted file mode 100644 index 4a8cbc9bc49c0..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.7.0-snapshot-72d8528c3a6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4525d38643972dda5a69206416043f592bf6ad6 \ No newline at end of file diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index d93c2f7438c88..ec44a0b6495ad 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -31,13 +31,13 @@ import org.apache.lucene.search.MultiTermQuery.RewriteMethod; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegExp87; +import org.apache.lucene.search.RegExp87.Kind; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.RegExp; -import org.apache.lucene.util.automaton.RegExp.Kind; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.geo.ShapeRelation; @@ -406,7 +406,7 @@ public Query regexpQuery(String value, int syntaxFlags, int matchFlags, int maxD return new MatchNoDocsQuery(); } - RegExp ngramRegex = new RegExp(addLineEndChars(value), syntaxFlags, matchFlags); + RegExp87 ngramRegex = new RegExp87(addLineEndChars(toLowerCase(value)), syntaxFlags, matchFlags); Query approxBooleanQuery = toApproximationQuery(ngramRegex); Query approxNgramQuery = rewriteBoolToNgramQuery(approxBooleanQuery); @@ -417,7 +417,7 @@ public Query regexpQuery(String value, int syntaxFlags, int matchFlags, int maxD return existsQuery(context); } Supplier deferredAutomatonSupplier = ()-> { - RegExp regex = new RegExp(value, syntaxFlags, matchFlags); + RegExp87 regex = new RegExp87(value, syntaxFlags, matchFlags); return regex.toAutomaton(maxDeterminizedStates); }; @@ -446,7 +446,7 @@ public Query regexpQuery(String value, int syntaxFlags, int matchFlags, int maxD // * If an expression resolves to a RegExpQuery eg ?? then only the verification // query is run. // * Anything else is a concrete query that should be run on the ngram index. - public static Query toApproximationQuery(RegExp r) throws IllegalArgumentException { + public static Query toApproximationQuery(RegExp87 r) throws IllegalArgumentException { Query result = null; switch (r.kind) { case REGEXP_UNION: @@ -459,7 +459,7 @@ public static Query toApproximationQuery(RegExp r) throws IllegalArgumentExcepti String normalizedString = toLowerCase(r.s); result = new TermQuery(new Term("", normalizedString)); break; - case REGEXP_CHAR: + case REGEXP_CHAR: String cs = new StringBuilder().appendCodePoint(r.c).toString(); String normalizedChar = toLowerCase(cs); result = new TermQuery(new Term("", normalizedChar)); @@ -507,7 +507,7 @@ public static Query toApproximationQuery(RegExp r) throws IllegalArgumentExcepti return result; } - private static Query createConcatenationQuery(RegExp r) { + private static Query createConcatenationQuery(RegExp87 r) { // Create ANDs of expressions plus collapse consecutive TermQuerys into single longer ones ArrayList queries = new ArrayList<>(); findLeaves(r.exp1, Kind.REGEXP_CONCATENATION, queries); @@ -538,7 +538,7 @@ private static Query createConcatenationQuery(RegExp r) { } - private static Query createUnionQuery(RegExp r) { + private static Query createUnionQuery(RegExp87 r) { // Create an OR of clauses ArrayList queries = new ArrayList<>(); findLeaves(r.exp1, Kind.REGEXP_UNION, queries); @@ -565,7 +565,7 @@ private static Query createUnionQuery(RegExp r) { return new MatchAllButRequireVerificationQuery(); } - private static void findLeaves(RegExp exp, Kind kind, List queries) { + private static void findLeaves(RegExp87 exp, Kind kind, List queries) { if (exp.kind == kind) { findLeaves(exp.exp1, kind, queries); findLeaves( exp.exp2, kind, queries); diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index cdcd05d41aa65..58a8631ed6eaf 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegExp87; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -37,7 +38,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.ByteRunAutomaton; -import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.TriFunction; @@ -212,7 +212,7 @@ public void testTooBigQueryField() throws IOException { assertThat(wildcardFieldTopDocs.totalHits.value, equalTo(0L)); // Test regexp query - wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(queryString, RegExp.ALL, 0, 20000, null, MOCK_QSC); + wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(queryString, RegExp87.ALL, 0, 20000, null, MOCK_QSC); wildcardFieldTopDocs = searcher.search(wildcardFieldQuery, 10, Sort.INDEXORDER); assertThat(wildcardFieldTopDocs.totalHits.value, equalTo(0L)); @@ -311,9 +311,9 @@ public void testSearchResultsVersusKeywordField() throws IOException { break; case 1: pattern = getRandomRegexPattern(values); - int matchFlags = randomBoolean()? 0 : RegExp.ASCII_CASE_INSENSITIVE; - wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(pattern, RegExp.ALL, matchFlags, 20000, null, MOCK_QSC); - keywordFieldQuery = keywordFieldType.fieldType().regexpQuery(pattern, RegExp.ALL, matchFlags,20000, null, MOCK_QSC); + int matchFlags = randomBoolean()? 0 : RegExp87.ASCII_CASE_INSENSITIVE; + wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(pattern, RegExp87.ALL, matchFlags, 20000, null, MOCK_QSC); + keywordFieldQuery = keywordFieldType.fieldType().regexpQuery(pattern, RegExp87.ALL, matchFlags,20000, null, MOCK_QSC); break; case 2: pattern = randomABString(5); @@ -472,12 +472,12 @@ public void testRegexAcceleration() throws IOException, ParseException { // All these expressions should rewrite to a match all with no verification step required at all String superfastRegexes[]= { ".*", "...*..", "(foo|bar|.*)", "@"}; for (String regex : superfastRegexes) { - Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp.ALL, 0, 20000, null, MOCK_QSC); + Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp87.ALL, 0, 20000, null, MOCK_QSC); assertTrue(wildcardFieldQuery instanceof DocValuesFieldExistsQuery); } String matchNoDocsRegexes[]= { ""}; for (String regex : matchNoDocsRegexes) { - Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp.ALL, 0, 20000, null, MOCK_QSC); + Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp87.ALL, 0, 20000, null, MOCK_QSC); assertTrue(wildcardFieldQuery instanceof MatchNoDocsQuery); } @@ -498,7 +498,7 @@ public void testRegexAcceleration() throws IOException, ParseException { for (String[] test : acceleratedTests) { String regex = test[0]; String expectedAccelerationQueryString = test[1].replaceAll("_", ""+WildcardFieldMapper.TOKEN_START_OR_END_CHAR); - Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp.ALL, 0, 20000, null, MOCK_QSC); + Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp87.ALL, 0, 20000, null, MOCK_QSC); testExpectedAccelerationQuery(regex, wildcardFieldQuery, expectedAccelerationQueryString); } @@ -506,7 +506,7 @@ public void testRegexAcceleration() throws IOException, ParseException { // TODO we can possibly improve on some of these String matchAllButVerifyTests[]= { "..", "(a)?","(a|b){0,3}", "((foo)?|(foo|bar)?)", "@&~(abc.+)", "aaa.+&.+bbb"}; for (String regex : matchAllButVerifyTests) { - Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp.ALL, 0, 20000, null, MOCK_QSC); + Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp87.ALL, 0, 20000, null, MOCK_QSC); assertTrue(regex +" was not a pure verify query " +formatQuery(wildcardFieldQuery), wildcardFieldQuery instanceof AutomatonQueryOnBinaryDv); } @@ -522,7 +522,7 @@ public void testRegexAcceleration() throws IOException, ParseException { for (String[] test : suboptimalTests) { String regex = test[0]; String expectedAccelerationQueryString = test[1].replaceAll("_", ""+WildcardFieldMapper.TOKEN_START_OR_END_CHAR); - Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp.ALL, 0, 20000, null, MOCK_QSC); + Query wildcardFieldQuery = wildcardFieldType.fieldType().regexpQuery(regex, RegExp87.ALL, 0, 20000, null, MOCK_QSC); testExpectedAccelerationQuery(regex, wildcardFieldQuery, expectedAccelerationQueryString); } @@ -861,7 +861,7 @@ protected String convertToRandomRegex(String randomValue) { } //Assert our randomly generated regex actually matches the provided raw input. - RegExp regex = new RegExp(result.toString()); + RegExp87 regex = new RegExp87(result.toString()); Automaton automaton = regex.toAutomaton(); ByteRunAutomaton bytesMatcher = new ByteRunAutomaton(automaton); BytesRef br = new BytesRef(randomValue);