Skip to content

Commit

Permalink
Merge branch 'master' into feature/runtime_fields
Browse files Browse the repository at this point in the history
  • Loading branch information
javanna committed Sep 3, 2020
2 parents a16b080 + 500e12f commit 60cda2a
Show file tree
Hide file tree
Showing 474 changed files with 1,489 additions and 892 deletions.
7 changes: 6 additions & 1 deletion distribution/docker/src/docker/config/log4j2.properties
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,15 @@ appender.deprecation_rolling.type = Console
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.layout.type = ECSJsonLayout
appender.deprecation_rolling.layout.type_name = deprecation
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter

appender.header_warning.type = HeaderWarningAppender
appender.header_warning.name = header_warning

logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.level = deprecation
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.appenderRef.header_warning.ref = header_warning
logger.deprecation.additivity = false

appender.index_search_slowlog_rolling.type = Console
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/indices/put-mapping.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Defaults to `false`.
fields, this mapping can include:

* Field name
* <<field-datatypes,Field data type>>
* <<mapping-types,Field data type>>
* <<mapping-params,Mapping parameters>>

For existing fields, see <<updating-field-mappings>>.
Expand Down
28 changes: 2 additions & 26 deletions docs/reference/mapping.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -22,38 +22,14 @@ treated. Examples of metadata fields include the document's
<<mapping-index-field,`_index`>>, <<mapping-id-field,`_id`>>, and
<<mapping-source-field,`_source`>> fields.

<<mapping-types,Fields>> or _properties_::
<<mapping-types,Fields>>::

A mapping contains a list of fields or `properties` pertinent to the
document.
document. Each field has its own <<mapping-types, data type>>.

NOTE: Before 7.0.0, the 'mappings' definition used to include a type name.
For more details, please see <<removal-of-types>>.

[discrete]
[[field-datatypes]]
== Field data types

Each field has a data `type` which can be:

* a simple type like <<text,`text`>>, <<keyword,`keyword`>>, <<date,`date`>>, <<number,`long`>>,
<<number,`double`>>, <<boolean,`boolean`>> or <<ip,`ip`>>.
* a type which supports the hierarchical nature of JSON such as
<<object,`object`>> or <<nested,`nested`>>.
* or a specialised type like <<geo-point,`geo_point`>>,
<<geo-shape,`geo_shape`>>, or <<completion-suggester,`completion`>>.

It is often useful to index the same field in different ways for different
purposes. For instance, a `string` field could be <<mapping-index,indexed>> as
a `text` field for full-text search, and as a `keyword` field for
sorting or aggregations. Alternatively, you could index a string field with
the <<analysis-standard-analyzer,`standard` analyzer>>, the
<<english-analyzer,`english`>> analyzer, and the
<<french-analyzer,`french` analyzer>>.

This is the purpose of _multi-fields_. Most data types support multi-fields
via the <<multi-fields>> parameter.

[[mapping-limit-settings]]
[discrete]
=== Settings to prevent mappings explosion
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/mapping/types/keyword.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

The keyword family includes the following field types:

* <<keyword,`keyword`>>, which is used for structured content such as IDs, email
* <<keyword-field-type,`keyword`>>, which is used for structured content such as IDs, email
addresses, hostnames, status codes, zip codes, or tags.
* <<constant-keyword-field-type,`constant_keyword`>> for keyword fields that always contain
the same value.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ _search shard routing_ or _routing_.
=== Adaptive replica selection

By default, {es} uses _adaptive replica selection_ to route search requests.
This method selects an eligible node using <<allocation-awareness,allocation
awareness>> and the following criteria:
This method selects an eligible node using <<shard-allocation-awareness,shard
allocation awareness>> and the following criteria:

* Response time of prior requests between the coordinating node
and the eligible node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public static class Builder extends ParametrizedFieldMapper.Builder {
}
});
private final Parameter<Double> nullValue = new Parameter<>("null_value", false, () -> null,
(n, c, o) -> XContentMapValues.nodeDoubleValue(o), m -> toType(m).nullValue);
(n, c, o) -> o == null ? null : XContentMapValues.nodeDoubleValue(o), m -> toType(m).nullValue).acceptsNull();

private final Parameter<Map<String, String>> meta = Parameter.metaParam();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ protected class DenseCollectionStrategy implements CollectionStrategy {
private final BitArray ordsBits;

public DenseCollectionStrategy(long maxOrd, BigArrays bigArrays) {
ordsBits = new BitArray((int) maxOrd, context.bigArrays());
ordsBits = new BitArray(maxOrd, context.bigArrays());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.search.aggregations.metrics;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.test.ESIntegTestCase;

import java.util.Map;
import java.util.stream.IntStream;

import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;

public class CardinalityWithRequestBreakerIT extends ESIntegTestCase {

/**
* Test that searches using cardinality aggregations returns all request breaker memory.
*/
public void testRequestBreaker() throws Exception {
final String requestBreaker = randomIntBetween(1, 10000) + "kb";
logger.info("--> Using request breaker setting: {}", requestBreaker);

indexRandom(true, IntStream.range(0, randomIntBetween(10, 1000))
.mapToObj(i ->
client().prepareIndex("test").setId("id_" + i)
.setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5)))
).toArray(IndexRequestBuilder[]::new));

client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(),
requestBreaker))
.get();

try {
client().prepareSearch("test")
.addAggregation(terms("terms").field("field0.keyword")
.collectMode(randomFrom(Aggregator.SubAggCollectionMode.values()))
.order(BucketOrder.aggregation("cardinality", randomBoolean()))
.subAggregation(cardinality("cardinality").precisionThreshold(randomLongBetween(1, 40000)).field("field1.keyword")))
.get();
} catch (ElasticsearchException e) {
if (ExceptionsHelper.unwrap(e, CircuitBreakingException.class) == null) {
throw e;
}
}

client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()))
.get();

// validation done by InternalTestCluster.ensureEstimatedStats()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
*/
final class DfsQueryPhase extends SearchPhase {
private final ArraySearchPhaseResults<SearchPhaseResult> queryResult;
private final QueryPhaseResultConsumer queryResult;
private final List<DfsSearchResult> searchResults;
private final AggregatedDfs dfs;
private final Function<ArraySearchPhaseResults<SearchPhaseResult>, SearchPhase> nextPhaseFactory;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContextBuilder;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.query.QuerySearchResult;

import java.util.ArrayDeque;
Expand All @@ -43,6 +44,7 @@
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;

import static org.elasticsearch.action.search.SearchPhaseController.getTopDocsSize;
import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs;
import static org.elasticsearch.action.search.SearchPhaseController.setShardIndex;

Expand All @@ -52,7 +54,7 @@
* This implementation can be configured to batch up a certain amount of results and reduce
* them asynchronously in the provided {@link Executor} iff the buffer is exhausted.
*/
class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhaseResult> {
public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhaseResult> {
private static final Logger logger = LogManager.getLogger(QueryPhaseResultConsumer.class);

private final Executor executor;
Expand All @@ -76,43 +78,39 @@ class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhaseResult
* Creates a {@link QueryPhaseResultConsumer} that incrementally reduces aggregation results
* as shard results are consumed.
*/
QueryPhaseResultConsumer(Executor executor,
SearchPhaseController controller,
SearchProgressListener progressListener,
ReduceContextBuilder aggReduceContextBuilder,
NamedWriteableRegistry namedWriteableRegistry,
int expectedResultSize,
int bufferSize,
boolean hasTopDocs,
boolean hasAggs,
int trackTotalHitsUpTo,
int topNSize,
boolean performFinalReduce,
Consumer<Exception> onPartialMergeFailure) {
public QueryPhaseResultConsumer(SearchRequest request,
Executor executor,
SearchPhaseController controller,
SearchProgressListener progressListener,
NamedWriteableRegistry namedWriteableRegistry,
int expectedResultSize,
Consumer<Exception> onPartialMergeFailure) {
super(expectedResultSize);
this.executor = executor;
this.controller = controller;
this.progressListener = progressListener;
this.aggReduceContextBuilder = aggReduceContextBuilder;
this.aggReduceContextBuilder = controller.getReduceContext(request);
this.namedWriteableRegistry = namedWriteableRegistry;
this.topNSize = topNSize;
this.pendingMerges = new PendingMerges(bufferSize, trackTotalHitsUpTo);
this.hasTopDocs = hasTopDocs;
this.hasAggs = hasAggs;
this.performFinalReduce = performFinalReduce;
this.topNSize = getTopDocsSize(request);
this.performFinalReduce = request.isFinalReduce();
this.onPartialMergeFailure = onPartialMergeFailure;
SearchSourceBuilder source = request.source();
this.hasTopDocs = source == null || source.size() != 0;
this.hasAggs = source != null && source.aggregations() != null;
int bufferSize = (hasAggs || hasTopDocs) ? Math.min(request.getBatchedReduceSize(), expectedResultSize) : expectedResultSize;
this.pendingMerges = new PendingMerges(bufferSize, request.resolveTrackTotalHitsUpTo());
}

@Override
void consumeResult(SearchPhaseResult result, Runnable next) {
public void consumeResult(SearchPhaseResult result, Runnable next) {
super.consumeResult(result, () -> {});
QuerySearchResult querySearchResult = result.queryResult();
progressListener.notifyQueryResult(querySearchResult.getShardIndex());
pendingMerges.consume(querySearchResult, next);
}

@Override
SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
public SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
if (pendingMerges.hasPendingMerges()) {
throw new AssertionError("partial reduce in-flight");
} else if (pendingMerges.hasFailure()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -558,23 +558,19 @@ public InternalSearchResponse buildResponse(SearchHits hits) {
}
}

InternalAggregation.ReduceContextBuilder getReduceContext(SearchRequest request) {
return requestToAggReduceContextBuilder.apply(request);
}

/**
* Returns a new ArraySearchPhaseResults instance. This might return an instance that reduces search responses incrementally.
* Returns a new {@link QueryPhaseResultConsumer} instance. This might return an instance that reduces search responses incrementally.
*/
ArraySearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(Executor executor,
SearchProgressListener listener,
SearchRequest request,
int numShards,
Consumer<Exception> onPartialMergeFailure) {
SearchSourceBuilder source = request.source();
final boolean hasAggs = source != null && source.aggregations() != null;
final boolean hasTopDocs = source == null || source.size() != 0;
final int trackTotalHitsUpTo = request.resolveTrackTotalHitsUpTo();
InternalAggregation.ReduceContextBuilder aggReduceContextBuilder = requestToAggReduceContextBuilder.apply(request);
int topNSize = getTopDocsSize(request);
int bufferSize = (hasAggs || hasTopDocs) ? Math.min(request.getBatchedReduceSize(), numShards) : numShards;
return new QueryPhaseResultConsumer(executor, this, listener, aggReduceContextBuilder, namedWriteableRegistry,
numShards, bufferSize, hasTopDocs, hasAggs, trackTotalHitsUpTo, topNSize, request.isFinalReduce(), onPartialMergeFailure);
QueryPhaseResultConsumer newSearchPhaseResults(Executor executor,
SearchProgressListener listener,
SearchRequest request,
int numShards,
Consumer<Exception> onPartialMergeFailure) {
return new QueryPhaseResultConsumer(request, executor, this, listener, namedWriteableRegistry, numShards, onPartialMergeFailure);
}

static final class TopDocsStats {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
/**
* A listener that allows to track progress of the {@link SearchAction}.
*/
abstract class SearchProgressListener {
public abstract class SearchProgressListener {
private static final Logger logger = LogManager.getLogger(SearchProgressListener.class);

public static final SearchProgressListener NOOP = new SearchProgressListener() {};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws Cir
this.addWithoutBreaking(-bytes);
throw e;
}
assert newUsed >= 0 : "Used bytes: [" + newUsed + "] must be >= 0";
return newUsed;
}

Expand Down
18 changes: 9 additions & 9 deletions server/src/main/java/org/elasticsearch/common/util/BitArray.java
Original file line number Diff line number Diff line change
Expand Up @@ -36,25 +36,25 @@ public final class BitArray implements Releasable {
* Create the {@linkplain BitArray}.
* @param initialSize the initial size of underlying storage.
*/
public BitArray(int initialSize, BigArrays bigArrays) {
public BitArray(long initialSize, BigArrays bigArrays) {
this.bigArrays = bigArrays;
this.bits = bigArrays.newLongArray(initialSize, true);
}

/**
* Set the {@code index}th bit.
*/
public void set(int index) {
int wordNum = wordNum(index);
public void set(long index) {
long wordNum = wordNum(index);
bits = bigArrays.grow(bits, wordNum + 1);
bits.set(wordNum, bits.get(wordNum) | bitmask(index));
}

/**
* Clear the {@code index}th bit.
*/
public void clear(int index) {
int wordNum = wordNum(index);
public void clear(long index) {
long wordNum = wordNum(index);
if (wordNum >= bits.size()) {
/*
* No need to resize the array just to clear the bit because we'll
Expand All @@ -68,8 +68,8 @@ public void clear(int index) {
/**
* Is the {@code index}th bit set?
*/
public boolean get(int index) {
int wordNum = wordNum(index);
public boolean get(long index) {
long wordNum = wordNum(index);
if (wordNum >= bits.size()) {
/*
* If the word is bigger than the array then it could *never* have
Expand All @@ -81,11 +81,11 @@ public boolean get(int index) {
return (bits.get(wordNum) & bitmask) != 0;
}

private static int wordNum(int index) {
private static long wordNum(long index) {
return index >> 6;
}

private static long bitmask(int index) {
private static long bitmask(long index) {
return 1L << index;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,9 @@ public static class Builder extends ParametrizedFieldMapper.Builder {
private final Parameter<Boolean> hasNorms
= Parameter.boolParam("norms", false, m -> toType(m).fieldType.omitNorms() == false, false);
private final Parameter<SimilarityProvider> similarity = new Parameter<>("similarity", false, () -> null,
(n, c, o) -> TypeParsers.resolveSimilarity(c, n, o.toString()), m -> toType(m).similarity);
(n, c, o) -> TypeParsers.resolveSimilarity(c, n, o), m -> toType(m).similarity)
.setSerializer((b, f, v) -> b.field(f, v == null ? null : v.name()), v -> v == null ? null : v.name())
.acceptsNull();

private final Parameter<String> normalizer
= Parameter.stringParam("normalizer", false, m -> toType(m).normalizerName, "default");
Expand Down
Loading

0 comments on commit 60cda2a

Please sign in to comment.