Skip to content

Commit

Permalink
Revert "fix: log TooManyBucketException and CircuitBreakerException"
Browse files Browse the repository at this point in the history
This reverts commit d7516b2.
  • Loading branch information
salvatore-campagna committed Sep 27, 2023
1 parent d7516b2 commit 6de5e96
Show file tree
Hide file tree
Showing 6 changed files with 7 additions and 61 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,6 @@ static TransportVersion def(int id) {
public static final TransportVersion NODE_INFO_INDEX_VERSION_ADDED = def(8_500_075);
public static final TransportVersion FIRST_NEW_ID_LAYOUT = def(8_501_00_0);
public static final TransportVersion COMMIT_PRIMARY_TERM_GENERATION = def(8_501_00_1);
public static final TransportVersion LOG_TOO_MANY_BUCKETS_ERROR = def(8_501_00_2);
/*
* STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,8 @@

package org.elasticsearch.search.aggregations;

import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree;
import org.elasticsearch.tasks.TaskCancelledException;
Expand Down Expand Up @@ -178,7 +175,6 @@ protected AggregationReduceContext forSubAgg(AggregationBuilder sub) {
* A {@linkplain AggregationReduceContext} to perform the final reduction.
*/
public static final class ForFinal extends AggregationReduceContext {
final Logger logger = LogManager.getLogger(ForFinal.class);
private final IntConsumer multiBucketConsumer;
private final PipelineTree pipelineTreeRoot;

Expand Down Expand Up @@ -214,28 +210,7 @@ public boolean isFinalReduce() {

@Override
protected void consumeBucketCountAndMaybeBreak(int size) {
final String aggregationName = builder().getName();
try {
multiBucketConsumer.accept(size);
} catch (MultiBucketConsumerService.TooManyBucketsException e) {
logger.error(
"Too many buckets for aggregation [%s] (max [%d], count [%d])".formatted(
aggregationName,
e.getMaxBuckets(),
e.getBucketsCount()
)
);
throw e;
} catch (CircuitBreakingException e) {
logger.error(
"Too much memory required for aggregation [%s] (max [%d] bytes, estimated [%d] bytes)".formatted(
aggregationName,
e.getByteLimit(),
e.getBytesWanted()
)
);
throw e;
}
multiBucketConsumer.accept(size);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
*/
package org.elasticsearch.search.aggregations;

import org.elasticsearch.TransportVersions;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.io.stream.StreamInput;
Expand Down Expand Up @@ -53,35 +52,27 @@ private void setMaxBucket(int maxBucket) {

public static class TooManyBucketsException extends AggregationExecutionException {
private final int maxBuckets;
private final int bucketsCount;

public TooManyBucketsException(String message, int maxBuckets, int bucketsCount) {
public TooManyBucketsException(String message, int maxBuckets) {
super(message);
this.maxBuckets = maxBuckets;
this.bucketsCount = bucketsCount;
}

public TooManyBucketsException(StreamInput in) throws IOException {
super(in);
maxBuckets = in.readInt();
bucketsCount = in.getTransportVersion().onOrAfter(TransportVersions.LOG_TOO_MANY_BUCKETS_ERROR) ? in.readVInt() : -1;
}

@Override
protected void writeTo(StreamOutput out, Writer<Throwable> nestedExceptionsWriter) throws IOException {
super.writeTo(out, nestedExceptionsWriter);
out.writeInt(maxBuckets);
out.writeVInt(out.getTransportVersion().onOrAfter(TransportVersions.LOG_TOO_MANY_BUCKETS_ERROR) ? bucketsCount : -1);
}

public int getMaxBuckets() {
return maxBuckets;
}

public int getBucketsCount() {
return bucketsCount;
}

@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
Expand All @@ -90,9 +81,6 @@ public RestStatus status() {
@Override
protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("max_buckets", maxBuckets);
if (bucketsCount >= 0) {
builder.field("buckets_count", bucketsCount);
}
}
}

Expand Down Expand Up @@ -126,8 +114,7 @@ public void accept(int value) {
+ "] but this number of buckets was exceeded. This limit can be set by changing the ["
+ MAX_BUCKET_SETTING.getKey()
+ "] cluster level setting.",
limit,
count
limit
);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,16 @@

package org.elasticsearch.search.aggregations.bucket.composite;

import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.CardinalityUpperBound;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.support.AggregationContext;

import java.io.IOException;
import java.util.Map;

class CompositeAggregationFactory extends AggregatorFactory {
final Logger logger = LogManager.getLogger(CompositeAggregationFactory.class);
private final int size;
private final CompositeValuesSourceConfig[] sources;
private final CompositeKey afterKey;
Expand All @@ -45,13 +41,6 @@ class CompositeAggregationFactory extends AggregatorFactory {
@Override
protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map<String, Object> metadata)
throws IOException {
try {
return new CompositeAggregator(name, factories, context, parent, metadata, size, sources, afterKey);
} catch (MultiBucketConsumerService.TooManyBucketsException e) {
logger.error(
"Too many buckets for aggregation [%s] (max [%d], count [%d])".formatted(name, e.getMaxBuckets(), e.getBucketsCount())
);
throw e;
}
return new CompositeAggregator(name, factories, context, parent, metadata, size, sources, afterKey);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,7 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
+ "]. This limit can be set by changing the ["
+ MAX_BUCKET_SETTING.getKey()
+ "] cluster level setting.",
bucketLimit,
size
bucketLimit
);
}
this.sourceConfigs = sourceConfigs;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -375,15 +375,12 @@ public void testCircuitBreakingException() throws IOException {

public void testTooManyBucketsException() throws IOException {
TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random());
int max = randomIntBetween(10, 20);
int count = randomIntBetween(100, 200);
MultiBucketConsumerService.TooManyBucketsException ex = serialize(
new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", max, count),
new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", 100),
version
);
assertEquals("Too many buckets", ex.getMessage());
assertEquals(max, ex.getMaxBuckets());
assertEquals(count, ex.getBucketsCount());
assertEquals(100, ex.getMaxBuckets());
}

public void testTimestampParsingException() throws IOException {
Expand Down

0 comments on commit 6de5e96

Please sign in to comment.