From f9f88bb710fd07d7906f9562364401847df51338 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 13 Jun 2017 13:41:03 +0200 Subject: [PATCH 001/148] update nexus-staging plugin from 1.6.7 -> 1.6.8, qualify all used maven plugins --- pom.xml | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1db066e4d..326916cc3 100644 --- a/pom.xml +++ b/pom.xml @@ -86,6 +86,31 @@ maven-site-plugin 3.6 + + org.apache.maven.plugins + maven-clean-plugin + 3.0.0 + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + + org.apache.maven.plugins + maven-install-plugin + 2.5.2 + + + org.apache.maven.plugins + maven-jar-plugin + 3.0.2 + + + org.apache.maven.plugins + maven-resources-plugin + 3.0.1 + @@ -93,7 +118,7 @@ org.sonatype.plugins nexus-staging-maven-plugin - 1.6.7 + 1.6.8 true ossrh From de8a9b5d0a87ccccf209ed0f4684105b5798e864 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 15 Jun 2017 11:48:51 +0200 Subject: [PATCH 002/148] Update documentation, closes #337 --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a75eae0c7..81d44674d 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,7 @@ influxDB.deleteDatabase(dbName); Note that the batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` If all of your points are written to the same database and retention policy, the simpler write() methods can be used. +This requires influxdb-java v2.7 or newer. ```java InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); From 38fb2f65b11986fcde1b5f8a4d8bf949e50d2bcf Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 15 Jun 2017 12:16:24 +0200 Subject: [PATCH 003/148] Silent all remaining javadoc warnings --- src/main/java/org/influxdb/InfluxDB.java | 51 ++++++++++++++++++- .../java/org/influxdb/dto/BatchPoints.java | 10 ++-- src/main/java/org/influxdb/dto/Point.java | 4 +- src/main/java/org/influxdb/dto/Query.java | 8 +-- .../org/influxdb/impl/BatchProcessor.java | 1 + src/main/java/org/influxdb/impl/TimeUtil.java | 2 +- 6 files changed, 62 insertions(+), 14 deletions(-) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index f2b4c4ee0..ddc8f7316 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -81,16 +81,19 @@ public String value() { /** * Enable Gzip compress for http request body. + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableGzip(); /** * Disable Gzip compress for http request body. + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB disableGzip(); /** * Returns whether Gzip compress for http request body is enabled. + * @return true if gzip is enabled. */ public boolean isGzipEnabled(); @@ -98,7 +101,16 @@ public String value() { * Enable batching of single Point writes as {@link #enableBatch(int, int, TimeUnit, ThreadFactory)}} * using {@linkplain java.util.concurrent.Executors#defaultThreadFactory() default thread factory}. * + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * * @see #enableBatch(int, int, TimeUnit, ThreadFactory) + * + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit); @@ -107,7 +119,18 @@ public String value() { * {@link #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer)} * using with a exceptionHandler that does nothing. * + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * @param threadFactory + * a ThreadFactory instance to be used. + * * @see #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer) + * + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory); @@ -123,7 +146,9 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * @param flushDuration * the time to wait at most. * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. * @param threadFactory + * a ThreadFactory instance to be used. * @param exceptionHandler * a consumer function to handle asynchronous errors * @return the InfluxDB instance to be able to use it in a fluent manner. @@ -139,6 +164,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti /** * Returns whether Batching is enabled. + * @return true if batch is enabled. */ public boolean isBatchEnabled(); @@ -168,6 +194,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * Write a set of Points to the default database with the string records. * * @param records + * the points in the correct lineprotocol. */ public void write(final String records); @@ -175,6 +202,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * Write a set of Points to the default database with the list of string records. * * @param records + * the List of points in the correct lineprotocol. */ public void write(final List records); @@ -206,6 +234,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * @see 2696 * * @param batchPoints + * the points to write in BatchPoints. */ public void write(final BatchPoints batchPoints); @@ -214,7 +243,14 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use * @param records + * the points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final String records); @@ -224,7 +260,14 @@ public void write(final String database, final String retentionPolicy, * * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use * @param records + * the List of points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final List records); @@ -233,7 +276,9 @@ public void write(final String database, final String retentionPolicy, * Write a set of Points to the influxdb database with the string records through UDP. * * @param udpPort - * @param records the content will be encoded by UTF-8 before sent. + * the udpPort where influxdb is listening + * @param records + * the content will be encoded by UTF-8 before sent. */ public void write(final int udpPort, final String records); @@ -241,7 +286,9 @@ public void write(final String database, final String retentionPolicy, * Write a set of Points to the influxdb database with the list of string records through UDP. * * @param udpPort - * @param records list of record, the content will be encoded by UTF-8 before sent. + * the udpPort where influxdb is listening + * @param records + * list of record, the content will be encoded by UTF-8 before sent. */ public void write(final int udpPort, final List records); diff --git a/src/main/java/org/influxdb/dto/BatchPoints.java b/src/main/java/org/influxdb/dto/BatchPoints.java index e56ced195..c67ddcf28 100644 --- a/src/main/java/org/influxdb/dto/BatchPoints.java +++ b/src/main/java/org/influxdb/dto/BatchPoints.java @@ -61,7 +61,7 @@ public static final class Builder { /** * The retentionPolicy to use. * - * @param policy + * @param policy the retentionPolicy to use * @return the Builder instance */ public Builder retentionPolicy(final String policy) { @@ -86,7 +86,7 @@ public Builder tag(final String tagName, final String value) { /** * Add a Point to this set of points. * - * @param pointToAdd + * @param pointToAdd the Point to add * @return the Builder instance */ public Builder point(final Point pointToAdd) { @@ -97,7 +97,7 @@ public Builder point(final Point pointToAdd) { /** * Add a set of Points to this set of points. * - * @param pointsToAdd + * @param pointsToAdd the List if Points to add * @return the Builder instance */ public Builder points(final Point... pointsToAdd) { @@ -108,7 +108,7 @@ public Builder points(final Point... pointsToAdd) { /** * Set the ConsistencyLevel to use. If not given it defaults to {@link ConsistencyLevel#ONE} * - * @param consistencyLevel + * @param consistencyLevel the ConsistencyLevel * @return the Builder instance */ public Builder consistency(final ConsistencyLevel consistencyLevel) { @@ -187,7 +187,7 @@ void setPoints(final List points) { /** * Add a single Point to these batches. * - * @param point + * @param point the Point to add * @return this Instance to be able to daisy chain calls. */ public BatchPoints point(final Point point) { diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index 6156b21fc..fb6ad7a26 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -182,8 +182,8 @@ public Builder fields(final Map fieldsToAdd) { /** * Add a time to this point. * - * @param precisionToSet - * @param timeToSet + * @param timeToSet the time for this point + * @param precisionToSet the TimeUnit * @return the Builder instance. */ public Builder time(final long timeToSet, final TimeUnit precisionToSet) { diff --git a/src/main/java/org/influxdb/dto/Query.java b/src/main/java/org/influxdb/dto/Query.java index cab423bff..503b120a8 100644 --- a/src/main/java/org/influxdb/dto/Query.java +++ b/src/main/java/org/influxdb/dto/Query.java @@ -17,16 +17,16 @@ public class Query { private final boolean requiresPost; /** - * @param command - * @param database + * @param command the query command + * @param database the database to query */ public Query(final String command, final String database) { this(command, database, false); } /** - * @param command - * @param database + * @param command the query command + * @param database the database to query * @param requiresPost true if the command requires a POST instead of GET to influxdb */ public Query(final String command, final String database, final boolean requiresPost) { diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index c637282c2..28f973dc9 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -52,6 +52,7 @@ public static final class Builder { /** * @param threadFactory * is optional. + * @return this Builder to use it fluent */ public Builder threadFactory(final ThreadFactory threadFactory) { this.threadFactory = threadFactory; diff --git a/src/main/java/org/influxdb/impl/TimeUtil.java b/src/main/java/org/influxdb/impl/TimeUtil.java index 079e50b6a..ca4cf987a 100644 --- a/src/main/java/org/influxdb/impl/TimeUtil.java +++ b/src/main/java/org/influxdb/impl/TimeUtil.java @@ -44,7 +44,7 @@ protected SimpleDateFormat initialValue() { /** * Convert from a TimeUnit to a influxDB timeunit String. * - * @param t + * @param t the TimeUnit * @return the String representation. */ public static String toTimePrecision(final TimeUnit t) { From ff5b57d88d2ee2c2e9e87dcbee767b5831260378 Mon Sep 17 00:00:00 2001 From: Andy Flury Date: Thu, 15 Jun 2017 17:52:58 +0200 Subject: [PATCH 004/148] propagate IOExceptions to consumer --- src/main/java/org/influxdb/impl/InfluxDBImpl.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index b24f8d734..b2f50ecbd 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -412,7 +412,9 @@ public void onResponse(final Call call, final Response Date: Sun, 18 Jun 2017 21:33:48 +0200 Subject: [PATCH 005/148] Update okhttp from 3.8.0 -> 3.8.1 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 326916cc3..19f7b2f75 100644 --- a/pom.xml +++ b/pom.xml @@ -254,12 +254,12 @@ com.squareup.okhttp3 okhttp - 3.8.0 + 3.8.1 com.squareup.okhttp3 logging-interceptor - 3.8.0 + 3.8.1 From 6f9b4b19fbff128cfc31933c5552a8fde1007c30 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sun, 18 Jun 2017 21:37:10 +0200 Subject: [PATCH 006/148] update mockito from 2.8.9 -> 2.8.47 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 19f7b2f75..936a8b0b2 100644 --- a/pom.xml +++ b/pom.xml @@ -224,7 +224,7 @@ org.mockito mockito-core - 2.8.9 + 2.8.47 test From e94070ffae5bdc3cbf7e0643115c4438ff17db97 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 19 Jun 2017 07:23:57 +0200 Subject: [PATCH 007/148] Do not sysout.println in production code --- src/main/java/org/influxdb/impl/InfluxDBImpl.java | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index b24f8d734..56debe83f 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -206,12 +206,6 @@ public void disableBatch() { this.batchEnabled.set(false); if (this.batchProcessor != null) { this.batchProcessor.flushAndShutdown(); - if (this.logLevel != LogLevel.NONE) { - System.out.println( - "total writes:" + this.writeCount - + " unbatched:" + this.unBatchedCount - + " batchPoints:" + this.batchedCount); - } } } From 205fc1d9625524d8bdb3bc25db7bf5bbddabfcd8 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 19 Jun 2017 07:24:37 +0200 Subject: [PATCH 008/148] Update guava from 21.0 -> 22.0 used for tests, remove unneeded slf4j dependency --- pom.xml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pom.xml b/pom.xml index 936a8b0b2..a90fe4dcd 100644 --- a/pom.xml +++ b/pom.xml @@ -227,16 +227,10 @@ 2.8.47 test - - org.slf4j - slf4j-simple - 1.7.25 - test - com.google.guava guava - 21.0 + 22.0 test From 87bf4da530419aa0d6eeaebfad0518f149e2e1b6 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 21 Jun 2017 08:57:23 +0200 Subject: [PATCH 009/148] QueryResult to Object mapper added #340 --- .../pojomapper/InfluxDBResultMapper.java | 273 +++++++++++++++++ .../pojomapper/annotation/Column.java | 38 +++ .../pojomapper/annotation/Measurement.java | 39 +++ .../exception/InfluxDBMapperException.java | 41 +++ .../pojomapper/InfluxDBResultMapperTest.java | 278 ++++++++++++++++++ 5 files changed, 669 insertions(+) create mode 100644 src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java create mode 100644 src/main/java/org/influxdb/pojomapper/annotation/Column.java create mode 100644 src/main/java/org/influxdb/pojomapper/annotation/Measurement.java create mode 100644 src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java create mode 100644 src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java diff --git a/src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java b/src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java new file mode 100644 index 000000000..7f4cc54f8 --- /dev/null +++ b/src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java @@ -0,0 +1,273 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.pojomapper; + +import java.lang.reflect.Field; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +import org.influxdb.dto.QueryResult; +import org.influxdb.pojomapper.annotation.Column; +import org.influxdb.pojomapper.annotation.Measurement; +import org.influxdb.pojomapper.exception.InfluxDBMapperException; + +/** + * Main class responsible for mapping a QueryResult to POJO. + * + * @author fmachado + */ +public class InfluxDBResultMapper { + + /** + * Data structure used to cache classes used as measurements. + */ + private static final + ConcurrentMap> CLASS_FIELD_CACHE = new ConcurrentHashMap<>(); + + private static final int FRACTION_MIN_WIDTH = 0; + private static final int FRACTION_MAX_WIDTH = 6; + private static final boolean ADD_DECIMAL_POINT = true; + + /** + * When a query is executed without {@link TimeUnit}, InfluxDB returns the time + * column as an ISO8601 date. + */ + private static final DateTimeFormatter ISO8601_FORMATTER = new DateTimeFormatterBuilder() + .appendPattern("yyyy-MM-dd'T'HH:mm:ss") + .appendFraction(ChronoField.MICRO_OF_SECOND, FRACTION_MIN_WIDTH, FRACTION_MAX_WIDTH, ADD_DECIMAL_POINT) + .appendPattern("X") + .toFormatter(); + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * clazz parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz) throws InfluxDBMapperException { + throwExceptionIfMissingAnnotation(clazz); + throwExceptionIfResultWithError(queryResult); + cacheMeasurementClass(clazz); + + List result = new LinkedList(); + String measurementName = getMeasurementName(clazz); + queryResult.getResults().stream() + .forEach(singleResult -> { + singleResult.getSeries().stream() + .filter(series -> series.getName().equals(measurementName)) + .forEachOrdered(series -> { + parseSeriesAs(series, clazz, result); + }); + }); + + return result; + } + + void throwExceptionIfMissingAnnotation(final Class clazz) { + if (!clazz.isAnnotationPresent(Measurement.class)) { + throw new IllegalArgumentException( + "Class " + clazz.getName() + " is not annotated with @" + Measurement.class.getSimpleName()); + } + } + + void throwExceptionIfResultWithError(final QueryResult queryResult) { + if (queryResult.getError() != null) { + throw new InfluxDBMapperException("InfluxDB returned an error: " + queryResult.getError()); + } + + queryResult.getResults().forEach(seriesResult -> { + if (seriesResult.getError() != null) { + throw new InfluxDBMapperException("InfluxDB returned an error with Series: " + seriesResult.getError()); + } + }); + } + + void cacheMeasurementClass(final Class... classVarAgrs) { + for (Class clazz : classVarAgrs) { + if (CLASS_FIELD_CACHE.containsKey(clazz.getName())) { + continue; + } + ConcurrentMap initialMap = new ConcurrentHashMap<>(); + ConcurrentMap influxColumnAndFieldMap = CLASS_FIELD_CACHE.putIfAbsent(clazz.getName(), initialMap); + if (influxColumnAndFieldMap == null) { + influxColumnAndFieldMap = initialMap; + } + + for (Field field : clazz.getDeclaredFields()) { + Column colAnnotation = field.getAnnotation(Column.class); + if (colAnnotation != null) { + influxColumnAndFieldMap.put(colAnnotation.name(), field); + } + } + } + } + + String getMeasurementName(final Class clazz) { + return ((Measurement) clazz.getAnnotation(Measurement.class)).name(); + } + + List parseSeriesAs(final QueryResult.Series series, final Class clazz, final List result) { + int columnSize = series.getColumns().size(); + try { + T object = null; + for (List row : series.getValues()) { + for (int i = 0; i < columnSize; i++) { + String resultColumnName = series.getColumns().get(i); + Field correspondingField = CLASS_FIELD_CACHE.get(clazz.getName()).get(resultColumnName); + if (correspondingField != null) { + if (object == null) { + object = clazz.newInstance(); + } + setFieldValue(object, correspondingField, row.get(i)); + } + } + if (object != null) { + result.add(object); + object = null; + } + } + } catch (InstantiationException | IllegalAccessException e) { + throw new InfluxDBMapperException(e); + } + return result; + } + + /** + * InfluxDB client returns any number as Double. + * See https://github.com/influxdata/influxdb-java/issues/153#issuecomment-259681987 + * for more information. + * + * @param object + * @param field + * @param value + * @throws IllegalArgumentException + * @throws IllegalAccessException + */ + void setFieldValue(final T object, final Field field, final Object value) + throws IllegalArgumentException, IllegalAccessException { + if (value == null) { + return; + } + Class fieldType = field.getType(); + boolean oldAccessibleState = field.isAccessible(); + try { + field.setAccessible(true); + if (fieldValueModified(fieldType, field, object, value) + || fieldValueForPrimitivesModified(fieldType, field, object, value) + || fieldValueForPrimitiveWrappersModified(fieldType, field, object, value)) { + return; + } + String msg = "Class '%s' field '%s' is from an unsupported type '%s'."; + throw new InfluxDBMapperException( + String.format(msg, object.getClass().getName(), field.getName(), field.getType())); + } catch (ClassCastException e) { + String msg = "Class '%s' field '%s' was defined with a different field type and caused a ClassCastException. " + + "The correct type is '%s' (current field value: '%s')."; + throw new InfluxDBMapperException( + String.format(msg, object.getClass().getName(), field.getName(), value.getClass().getName(), value)); + } finally { + field.setAccessible(oldAccessibleState); + } + } + + boolean fieldValueModified(final Class fieldType, final Field field, final T object, final Object value) + throws IllegalArgumentException, IllegalAccessException { + if (String.class.isAssignableFrom(fieldType)) { + field.set(object, String.valueOf(value)); + return true; + } + if (Instant.class.isAssignableFrom(fieldType)) { + Instant instant; + if (value instanceof String) { + instant = Instant.from(ISO8601_FORMATTER.parse(String.valueOf(value))); + } else if (value instanceof Long) { + instant = Instant.ofEpochMilli((Long) value); + } else if (value instanceof Double) { + instant = Instant.ofEpochMilli(((Double) value).longValue()); + } else { + throw new InfluxDBMapperException("Unsupported type " + field.getClass() + " for field " + field.getName()); + } + field.set(object, instant); + return true; + } + return false; + } + + boolean fieldValueForPrimitivesModified(final Class fieldType, final Field field, final T object, + final Object value) + throws IllegalArgumentException, IllegalAccessException { + if (double.class.isAssignableFrom(fieldType)) { + field.setDouble(object, ((Double) value).doubleValue()); + return true; + } + if (long.class.isAssignableFrom(fieldType)) { + field.setLong(object, ((Double) value).longValue()); + return true; + } + if (int.class.isAssignableFrom(fieldType)) { + field.setInt(object, ((Double) value).intValue()); + return true; + } + if (boolean.class.isAssignableFrom(fieldType)) { + field.setBoolean(object, Boolean.valueOf(String.valueOf(value)).booleanValue()); + return true; + } + return false; + } + + boolean fieldValueForPrimitiveWrappersModified(final Class fieldType, final Field field, final T object, + final Object value) + throws IllegalArgumentException, IllegalAccessException { + if (Double.class.isAssignableFrom(fieldType)) { + field.set(object, value); + return true; + } + if (Long.class.isAssignableFrom(fieldType)) { + field.set(object, Long.valueOf(((Double) value).longValue())); + return true; + } + if (Integer.class.isAssignableFrom(fieldType)) { + field.set(object, Integer.valueOf(((Double) value).intValue())); + return true; + } + if (Boolean.class.isAssignableFrom(fieldType)) { + field.set(object, Boolean.valueOf(String.valueOf(value))); + return true; + } + return false; + } +} diff --git a/src/main/java/org/influxdb/pojomapper/annotation/Column.java b/src/main/java/org/influxdb/pojomapper/annotation/Column.java new file mode 100644 index 000000000..4a2a56c57 --- /dev/null +++ b/src/main/java/org/influxdb/pojomapper/annotation/Column.java @@ -0,0 +1,38 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.pojomapper.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author fmachado + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.FIELD) +public @interface Column { + + String name(); + + boolean tag() default false; +} diff --git a/src/main/java/org/influxdb/pojomapper/annotation/Measurement.java b/src/main/java/org/influxdb/pojomapper/annotation/Measurement.java new file mode 100644 index 000000000..9fdafc5ec --- /dev/null +++ b/src/main/java/org/influxdb/pojomapper/annotation/Measurement.java @@ -0,0 +1,39 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.pojomapper.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.concurrent.TimeUnit; + +/** + * @author fmachado + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface Measurement { + + String name(); + + TimeUnit timeUnit() default TimeUnit.MILLISECONDS; +} diff --git a/src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java b/src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java new file mode 100644 index 000000000..a0a7af85c --- /dev/null +++ b/src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.pojomapper.exception; + +/** + * @author fmachado + */ +public class InfluxDBMapperException extends RuntimeException { + + private static final long serialVersionUID = -7328402653918756407L; + + public InfluxDBMapperException(final String message, final Throwable cause) { + super(message, cause); + } + + public InfluxDBMapperException(final String message) { + super(message); + } + + public InfluxDBMapperException(final Throwable cause) { + super(cause); + } +} diff --git a/src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java new file mode 100644 index 000000000..84b6779bc --- /dev/null +++ b/src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java @@ -0,0 +1,278 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.pojomapper; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.UUID; + +import org.influxdb.dto.QueryResult; +import org.influxdb.pojomapper.annotation.Column; +import org.influxdb.pojomapper.annotation.Measurement; +import org.influxdb.pojomapper.exception.InfluxDBMapperException; +import org.junit.Test; + +/** + * @author fmachado + */ +public class InfluxDBResultMapperTest { + + InfluxDBResultMapper mapper = new InfluxDBResultMapper(); + + @Test + public void testToPOJO_HappyPath() { + // Given... + List columnList = Arrays.asList("time", "uuid"); + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setName("CustomMeasurement"); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + assertEquals("there must be one entry in the result list", 1, myList.size()); + } + + @Test(expected = IllegalArgumentException.class) + public void testThrowExceptionIfMissingAnnotation() { + mapper.throwExceptionIfMissingAnnotation(String.class); + } + + @Test(expected = InfluxDBMapperException.class) + public void testThrowExceptionIfError_InfluxQueryResultHasError() { + QueryResult queryResult = new QueryResult(); + queryResult.setError("main queryresult error"); + + mapper.throwExceptionIfResultWithError(queryResult); + } + + @Test(expected = InfluxDBMapperException.class) + public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { + QueryResult queryResult = new QueryResult(); + + QueryResult.Result seriesResult = new QueryResult.Result(); + seriesResult.setError("series error"); + + queryResult.setResults(Arrays.asList(seriesResult)); + + mapper.throwExceptionIfResultWithError(queryResult); + } + + @Test + public void testGetMeasurementName_testStateMeasurement() { + assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); + } + + @Test + public void testParseSeriesAs_testTwoValidSeries() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time", "uuid"); + + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + List secondSeriesResult = Arrays.asList(Instant.now().plusSeconds(1).toEpochMilli(), UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult, secondSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + assertTrue("there must be two series in the result list", result.size() == 2); + + assertEquals("Field 'time' (1st series) is not valid", firstSeriesResult.get(0), result.get(0).time.toEpochMilli()); + assertEquals("Field 'uuid' (1st series) is not valid", firstSeriesResult.get(1), result.get(0).uuid); + + assertEquals("Field 'time' (2nd series) is not valid", secondSeriesResult.get(0), result.get(1).time.toEpochMilli()); + assertEquals("Field 'uuid' (2nd series) is not valid", secondSeriesResult.get(1), result.get(1).uuid); + } + + @Test + public void testParseSeriesAs_testNonNullAndValidValues() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time", "uuid", + "doubleObject", "longObject", "integerObject", + "doublePrimitive", "longPrimitive", "integerPrimitive", + "booleanObject", "booleanPrimitive"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + String uuidAsString = UUID.randomUUID().toString(); + + // InfluxDB client returns any number as Double. + // See https://github.com/influxdata/influxdb-java/issues/153#issuecomment-259681987 + // for more information. + List seriesResult = Arrays.asList(now, uuidAsString, + new Double("1.01"), new Double("2"), new Double("3"), + new Double("1.01"), new Double("4"), new Double("5"), + "false", "true"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(seriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + MyCustomMeasurement myObject = result.get(0); + assertEquals("field 'time' does not match", now.longValue(), myObject.time.toEpochMilli()); + assertEquals("field 'uuid' does not match", uuidAsString, myObject.uuid); + + assertEquals("field 'doubleObject' does not match", asDouble(seriesResult.get(2)), myObject.doubleObject); + assertEquals("field 'longObject' does not match", new Long(asDouble(seriesResult.get(3)).longValue()), myObject.longObject); + assertEquals("field 'integerObject' does not match", new Integer(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject); + + assertTrue("field 'doublePrimitive' does not match", + Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive) == 0); + + assertTrue("field 'longPrimitive' does not match", + Long.compare(asDouble(seriesResult.get(6)).longValue(), myObject.longPrimitive) == 0); + + assertTrue("field 'integerPrimitive' does not match", + Integer.compare(asDouble(seriesResult.get(7)).intValue(), myObject.integerPrimitive) == 0); + + assertEquals("booleanObject 'time' does not match", + Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject); + + assertEquals("booleanPrimitive 'uuid' does not match", + Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive); + } + + Double asDouble(Object obj) { + return (Double) obj; + } + + @Test + public void testFieldValueModified_DateAsISO8601() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList("2017-06-19T09:29:45.655123Z"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + assertTrue(result.size() == 1); + } + + @Test(expected = InfluxDBMapperException.class) + public void testUnsupportedField() { + // Given... + mapper.cacheMeasurementClass(MyPojoWithUnsupportedField.class); + + List columnList = Arrays.asList("bar"); + List firstSeriesResult = Arrays.asList("content representing a Date"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyPojoWithUnsupportedField.class, result); + } + + @Measurement(name = "CustomMeasurement") + static class MyCustomMeasurement { + + @Column(name = "time") + private Instant time; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "doubleObject") + private Double doubleObject; + + @Column(name = "longObject") + private Long longObject; + + @Column(name = "integerObject") + private Integer integerObject; + + @Column(name = "doublePrimitive") + private double doublePrimitive; + + @Column(name = "longPrimitive") + private long longPrimitive; + + @Column(name = "integerPrimitive") + private int integerPrimitive; + + @Column(name = "booleanObject") + private Boolean booleanObject; + + @Column(name = "booleanPrimitive") + private boolean booleanPrimitive; + + @SuppressWarnings("unused") + private String nonColumn1; + + @SuppressWarnings("unused") + private Random rnd; + + @Override + public String toString() { + return "MyCustomMeasurement [time=" + time + ", uuid=" + uuid + ", doubleObject=" + doubleObject + ", longObject=" + longObject + + ", integerObject=" + integerObject + ", doublePrimitive=" + doublePrimitive + ", longPrimitive=" + longPrimitive + + ", integerPrimitive=" + integerPrimitive + ", booleanObject=" + booleanObject + ", booleanPrimitive=" + booleanPrimitive + "]"; + } + } + + @Measurement(name = "foo") + static class MyPojoWithUnsupportedField { + + @Column(name = "bar") + private Date myDate; + } +} \ No newline at end of file From 907d3c0774de00501c4bce4efa6dced3fd96d115 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Fri, 23 Jun 2017 07:25:47 +0200 Subject: [PATCH 010/148] Moved influxdb-pojomapper classes to parent package #340 --- .../org/influxdb/{pojomapper => }/annotation/Column.java | 2 +- .../{pojomapper => }/annotation/Measurement.java | 2 +- .../exception => impl}/InfluxDBMapperException.java | 2 +- .../{pojomapper => impl}/InfluxDBResultMapper.java | 7 +++---- .../{pojomapper => impl}/InfluxDBResultMapperTest.java | 9 +++++---- 5 files changed, 11 insertions(+), 11 deletions(-) rename src/main/java/org/influxdb/{pojomapper => }/annotation/Column.java (97%) rename src/main/java/org/influxdb/{pojomapper => }/annotation/Measurement.java (97%) rename src/main/java/org/influxdb/{pojomapper/exception => impl}/InfluxDBMapperException.java (97%) rename src/main/java/org/influxdb/{pojomapper => impl}/InfluxDBResultMapper.java (98%) rename src/test/java/org/influxdb/{pojomapper => impl}/InfluxDBResultMapperTest.java (97%) diff --git a/src/main/java/org/influxdb/pojomapper/annotation/Column.java b/src/main/java/org/influxdb/annotation/Column.java similarity index 97% rename from src/main/java/org/influxdb/pojomapper/annotation/Column.java rename to src/main/java/org/influxdb/annotation/Column.java index 4a2a56c57..cde2fbe50 100644 --- a/src/main/java/org/influxdb/pojomapper/annotation/Column.java +++ b/src/main/java/org/influxdb/annotation/Column.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.pojomapper.annotation; +package org.influxdb.annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/src/main/java/org/influxdb/pojomapper/annotation/Measurement.java b/src/main/java/org/influxdb/annotation/Measurement.java similarity index 97% rename from src/main/java/org/influxdb/pojomapper/annotation/Measurement.java rename to src/main/java/org/influxdb/annotation/Measurement.java index 9fdafc5ec..8310f0f98 100644 --- a/src/main/java/org/influxdb/pojomapper/annotation/Measurement.java +++ b/src/main/java/org/influxdb/annotation/Measurement.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.pojomapper.annotation; +package org.influxdb.annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; diff --git a/src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java b/src/main/java/org/influxdb/impl/InfluxDBMapperException.java similarity index 97% rename from src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java rename to src/main/java/org/influxdb/impl/InfluxDBMapperException.java index a0a7af85c..ee88a5a00 100644 --- a/src/main/java/org/influxdb/pojomapper/exception/InfluxDBMapperException.java +++ b/src/main/java/org/influxdb/impl/InfluxDBMapperException.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.pojomapper.exception; +package org.influxdb.impl; /** * @author fmachado diff --git a/src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java similarity index 98% rename from src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java rename to src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 7f4cc54f8..712df818f 100644 --- a/src/main/java/org/influxdb/pojomapper/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.pojomapper; +package org.influxdb.impl; import java.lang.reflect.Field; import java.time.Instant; @@ -31,10 +31,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; import org.influxdb.dto.QueryResult; -import org.influxdb.pojomapper.annotation.Column; -import org.influxdb.pojomapper.annotation.Measurement; -import org.influxdb.pojomapper.exception.InfluxDBMapperException; /** * Main class responsible for mapping a QueryResult to POJO. diff --git a/src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java similarity index 97% rename from src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java rename to src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index 84b6779bc..027eff605 100644 --- a/src/test/java/org/influxdb/pojomapper/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.pojomapper; +package org.influxdb.impl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -31,10 +31,11 @@ import java.util.Random; import java.util.UUID; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; import org.influxdb.dto.QueryResult; -import org.influxdb.pojomapper.annotation.Column; -import org.influxdb.pojomapper.annotation.Measurement; -import org.influxdb.pojomapper.exception.InfluxDBMapperException; +import org.influxdb.impl.InfluxDBMapperException; +import org.influxdb.impl.InfluxDBResultMapper; import org.junit.Test; /** From 76b3bee8359bd3abc3285f33ccb63142d92080ab Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Fri, 23 Jun 2017 07:52:46 +0200 Subject: [PATCH 011/148] Moved influxdb-pojomapper exception to parent package #340 --- .../java/org/influxdb/{impl => }/InfluxDBMapperException.java | 2 +- src/main/java/org/influxdb/impl/InfluxDBResultMapper.java | 1 + src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) rename src/main/java/org/influxdb/{impl => }/InfluxDBMapperException.java (98%) diff --git a/src/main/java/org/influxdb/impl/InfluxDBMapperException.java b/src/main/java/org/influxdb/InfluxDBMapperException.java similarity index 98% rename from src/main/java/org/influxdb/impl/InfluxDBMapperException.java rename to src/main/java/org/influxdb/InfluxDBMapperException.java index ee88a5a00..a79dd9c7f 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBMapperException.java +++ b/src/main/java/org/influxdb/InfluxDBMapperException.java @@ -18,7 +18,7 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package org.influxdb.impl; +package org.influxdb; /** * @author fmachado diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 712df818f..beb07824d 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import org.influxdb.InfluxDBMapperException; import org.influxdb.annotation.Column; import org.influxdb.annotation.Measurement; import org.influxdb.dto.QueryResult; diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index 027eff605..af096e571 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -31,10 +31,10 @@ import java.util.Random; import java.util.UUID; +import org.influxdb.InfluxDBMapperException; import org.influxdb.annotation.Column; import org.influxdb.annotation.Measurement; import org.influxdb.dto.QueryResult; -import org.influxdb.impl.InfluxDBMapperException; import org.influxdb.impl.InfluxDBResultMapper; import org.junit.Test; From c79770ea0b40ff8a79e4ca80b910c178642414a4 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Fri, 23 Jun 2017 11:37:10 +0200 Subject: [PATCH 012/148] README.md updated with details about influxdb-pojomapper --- README.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 81d44674d..3c10745e9 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ public void write(final int udpPort, final Point point); note: make sure write content's total size should not > UDP protocol's limit(64K), or you should use http instead of udp. -#### chunking support (version 2.6+ required, unreleased): +#### Chunking support (version 2.6+ required): influxdb-java client now supports influxdb chunking. The following example uses a chunkSize of 20 and invokes the specified Consumer (e.g. System.out.println) for each received QueryResult ```java @@ -146,6 +146,74 @@ influxDB.query(query, 20, queryResult -> System.out.println(queryResult)); ``` +#### QueryResult mapper to POJO (version 2.7+ required, unreleased): + +An alternative way to handle the QueryResult object is now available. +Supposing that you have a measurement _CPU_: +``` +> INSERT cpu,host=serverA,region=us_west idle=0.64,happydevop=false,uptimesecs=123456789i +> +> select * from cpu +name: cpu +time happydevop host idle region uptimesecs +---- ---------- ---- ---- ------ ---------- +2017-06-20T15:32:46.202829088Z false serverA 0.64 us_west 123456789 +``` +And the following tag keys: +``` +> show tag keys from cpu +name: cpu +tagKey +------ +host +region +``` + +1. Create a POJO to represent your measurement. For example: +```Java +public class Cpu { + private Instant time; + private String hostname; + private String region; + private Double idle; + private Boolean happydevop; + private Long uptimeSecs; + // getters (and setters if you need) +} +``` +2. Add @Measurement and @Column annotations: +```Java +@Measurement(name = "cpu") +public class Cpu { + @Column(name = "time") + private Instant time; + @Column(name = "host", tag = true) + private String hostname; + @Column(name = "region", tag = true) + private String region; + @Column(name = "idle") + private Double idle; + @Column(name = "happydevop") + private Boolean happydevop; + @Column(name = "uptimesecs") + private Long uptimeSecs; + // getters (and setters if you need) +} +``` +3. Call _InfluxDBResultMapper.toPOJO(...)_ to map the QueryResult to your POJO: +``` +InfluxDB influxDB = InfluxDBFactory.connect("http://localhost:8086", "root", "root"); +String dbName = "myTimeseries"; +QueryResult queryResult = influxDB.query(new Query("SELECT * FROM cpu", dbName)); + +InfluxResultMapper resultMapper = new InfluxResultMapper(); // thread-safe - can be reused +List cpuList = resultMapper.toPOJO(queryResult, Cpu.class); +``` +**QueryResult mapper limitations** +- If your InfluxDB query contains multiple SELECT clauses, you will have to call InfluxResultMapper#toPOJO() multiple times to map every measurement returned by QueryResult to the respective POJO; +- If your InfluxDB query contains multiple SELECT clauses **for the same measurement**, InfluxResultMapper will process all results because there is no way to distinguish which one should be mapped to your POJO. It may result in an invalid collection being returned; + + ### Other Usages: For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") From da240f0d54b8a3516905b21b0552521d01407a61 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Jun 2017 11:47:20 +0200 Subject: [PATCH 013/148] Run tests againts influxdb 1.3 as well --- compile-and-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compile-and-test.sh b/compile-and-test.sh index 660181a21..5aa0b11a9 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -4,7 +4,7 @@ # set -e -INFLUXDB_VERSIONS="1.2 1.1" +INFLUXDB_VERSIONS="1.3 1.2 1.1" for version in ${INFLUXDB_VERSIONS} do From bf43c1eba4238ba0505bef947372598879b30f75 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Mon, 26 Jun 2017 13:29:21 +0200 Subject: [PATCH 014/148] Fixed NPE when no series is present in QueryResult object InfluxDBResultMapper was not prepared to handle the 'null' value instad of an empty collection returned by the JSON parser used by InfluxDB client. The null value will be returned whenever a query returns with no result as explained in https://github.com/influxdata/influxdb/issues/7596 --- .../influxdb/impl/InfluxDBResultMapper.java | 11 +++++--- .../impl/InfluxDBResultMapperTest.java | 25 +++++++++++++++++-- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index beb07824d..cd20bb6ed 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -27,6 +27,7 @@ import java.time.temporal.ChronoField; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -37,7 +38,7 @@ import org.influxdb.dto.QueryResult; /** - * Main class responsible for mapping a QueryResult to POJO. + * Main class responsible for mapping a QueryResult to a POJO. * * @author fmachado */ @@ -79,6 +80,9 @@ public class InfluxDBResultMapper { * possible to define the values of your POJO (e.g. due to an unsupported field type). */ public List toPOJO(final QueryResult queryResult, final Class clazz) throws InfluxDBMapperException { + Objects.requireNonNull(queryResult, "queryResult"); + Objects.requireNonNull(clazz, "clazz"); + throwExceptionIfMissingAnnotation(clazz); throwExceptionIfResultWithError(queryResult); cacheMeasurementClass(clazz); @@ -86,8 +90,9 @@ public List toPOJO(final QueryResult queryResult, final Class clazz) t List result = new LinkedList(); String measurementName = getMeasurementName(clazz); queryResult.getResults().stream() - .forEach(singleResult -> { - singleResult.getSeries().stream() + .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) + .forEach(internalResult -> { + internalResult.getSeries().stream() .filter(series -> series.getName().equals(measurementName)) .forEachOrdered(series -> { parseSeriesAs(series, clazz, result); diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index af096e571..61409a77b 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -175,10 +175,10 @@ public void testParseSeriesAs_testNonNullAndValidValues() { assertTrue("field 'integerPrimitive' does not match", Integer.compare(asDouble(seriesResult.get(7)).intValue(), myObject.integerPrimitive) == 0); - assertEquals("booleanObject 'time' does not match", + assertEquals("field 'booleanObject' does not match", Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject); - assertEquals("booleanPrimitive 'uuid' does not match", + assertEquals("field 'booleanPrimitive' does not match", Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive); } @@ -223,6 +223,27 @@ public void testUnsupportedField() { mapper.parseSeriesAs(series, MyPojoWithUnsupportedField.class, result); } + /** + * https://github.com/influxdata/influxdb/issues/7596 for more information. + */ + @Test + public void testToPOJO_SeriesFromQueryResultIsNull() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(null); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + assertTrue("there must NO entry in the result list", myList.isEmpty()); + } + @Measurement(name = "CustomMeasurement") static class MyCustomMeasurement { From 3c624b22b4475c3c735c37d051cb20afd9b53675 Mon Sep 17 00:00:00 2001 From: Andy Flury Date: Mon, 26 Jun 2017 14:53:08 +0200 Subject: [PATCH 015/148] add ChunkingExceptionTest --- .../java/org/influxdb/impl/InfluxDBImpl.java | 18 ++++ .../influxdb/impl/ChunkingExceptionTest.java | 89 +++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 src/test/java/org/influxdb/impl/ChunkingExceptionTest.java diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index b2f50ecbd..b30037549 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -97,6 +97,24 @@ public InfluxDBImpl(final String url, final String username, final String passwo this.adapter = moshi.adapter(QueryResult.class); } + InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client, + final InfluxDBService influxDBService, final JsonAdapter adapter) { + super(); + this.hostAddress = parseHostAddress(url); + this.username = username; + this.password = password; + this.loggingInterceptor = new HttpLoggingInterceptor(); + this.loggingInterceptor.setLevel(Level.NONE); + this.gzipRequestInterceptor = new GzipRequestInterceptor(); + this.retrofit = new Retrofit.Builder() + .baseUrl(url) + .client(client.addInterceptor(loggingInterceptor).addInterceptor(gzipRequestInterceptor).build()) + .addConverterFactory(MoshiConverterFactory.create()) + .build(); + this.influxDBService = influxDBService; + this.adapter = adapter; + } + public InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client, final String database, final String retentionPolicy, final ConsistencyLevel consistency) { diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java new file mode 100644 index 000000000..605fb430d --- /dev/null +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -0,0 +1,89 @@ +package org.influxdb.impl; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.EOFException; +import java.io.IOException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import org.influxdb.InfluxDB; +import org.influxdb.TestUtils; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.JsonReader; + +import okhttp3.OkHttpClient; +import okhttp3.ResponseBody; +import okio.Buffer; +import retrofit2.Call; +import retrofit2.Callback; +import retrofit2.Response; + +public class ChunkingExceptionTest { + + @Test + public void testChunkingIOException() throws IOException, InterruptedException { + + testChunkingException(new IOException(), "java.io.IOException"); + } + + @Test + public void testChunkingEOFException() throws IOException, InterruptedException { + + testChunkingException(new EOFException(), "DONE"); + } + + public void testChunkingException(Exception ex, String message) throws IOException, InterruptedException { + + InfluxDBService influxDBService = mock(InfluxDBService.class); + JsonAdapter adapter = mock(JsonAdapter.class); + Call call = mock(Call.class); + ResponseBody responseBody = mock(ResponseBody.class); + + when(influxDBService.query(any(String.class), any(String.class), any(String.class), any(String.class), anyInt())).thenReturn(call); + when(responseBody.source()).thenReturn(new Buffer()); + doThrow(ex).when(adapter).fromJson(any(JsonReader.class)); + + String url = "http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true); + InfluxDB influxDB = new InfluxDBImpl(url, "admin", "admin", new OkHttpClient.Builder(), influxDBService, adapter) { + @Override + public String version() { + return "9.99"; + } + }; + + final BlockingQueue queue = new LinkedBlockingQueue<>(); + Query query = new Query("SELECT * FROM disk", "xxx"); + influxDB.query(query, 2, new Consumer() { + @Override + public void accept(QueryResult result) { + queue.add(result); + } + }); + + ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(Callback.class); + verify(call).enqueue(argumentCaptor.capture()); + Callback callback = argumentCaptor.getValue(); + + callback.onResponse(call, Response.success(responseBody)); + + QueryResult result = queue.poll(20, TimeUnit.SECONDS); + Assert.assertNotNull(result); + Assert.assertEquals(message, result.getError()); + + } + +} From 03f39ff8e87e3eb2d2e0d30488f73fd063aa5be4 Mon Sep 17 00:00:00 2001 From: Andy Flury Date: Mon, 26 Jun 2017 15:10:32 +0200 Subject: [PATCH 016/148] ChunkingExceptionTest: deleteDatabase --- src/test/java/org/influxdb/impl/ChunkingExceptionTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java index 605fb430d..e9ebb9fa6 100644 --- a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -65,8 +65,9 @@ public String version() { } }; + String dbName = "write_unittest_" + System.currentTimeMillis(); final BlockingQueue queue = new LinkedBlockingQueue<>(); - Query query = new Query("SELECT * FROM disk", "xxx"); + Query query = new Query("SELECT * FROM disk", dbName); influxDB.query(query, 2, new Consumer() { @Override public void accept(QueryResult result) { @@ -84,6 +85,7 @@ public void accept(QueryResult result) { Assert.assertNotNull(result); Assert.assertEquals(message, result.getError()); + influxDB.deleteDatabase(dbName); } } From 976ddd818c546329aec867fa07ca566283d22e89 Mon Sep 17 00:00:00 2001 From: Andy Flury Date: Mon, 26 Jun 2017 15:18:31 +0200 Subject: [PATCH 017/148] remove deleteDatabase --- src/test/java/org/influxdb/impl/ChunkingExceptionTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java index e9ebb9fa6..b8e892fa6 100644 --- a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -84,8 +84,6 @@ public void accept(QueryResult result) { QueryResult result = queue.poll(20, TimeUnit.SECONDS); Assert.assertNotNull(result); Assert.assertEquals(message, result.getError()); - - influxDB.deleteDatabase(dbName); } } From 3f0d8d97b653da7297249fe8e6cb9993793d885f Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 27 Jun 2017 14:16:20 +0200 Subject: [PATCH 018/148] release 2.7 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index a90fe4dcd..54eabf6a4 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.7-SNAPSHOT + 2.7 influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org From 3a8624aceef43ac1e2fa10eaf32b4891f5826432 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 28 Jun 2017 00:48:19 +0200 Subject: [PATCH 019/148] Added support to map QueryResult created using the GROUP BY clause. --- .../influxdb/impl/InfluxDBResultMapper.java | 24 +++-- .../impl/InfluxDBResultMapperTest.java | 89 ++++++++++++++++++- 2 files changed, 106 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index cd20bb6ed..75884eed7 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -28,6 +28,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Objects; +import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -147,12 +148,12 @@ String getMeasurementName(final Class clazz) { List parseSeriesAs(final QueryResult.Series series, final Class clazz, final List result) { int columnSize = series.getColumns().size(); + ConcurrentMap colNameAndFieldMap = CLASS_FIELD_CACHE.get(clazz.getName()); try { T object = null; for (List row : series.getValues()) { for (int i = 0; i < columnSize; i++) { - String resultColumnName = series.getColumns().get(i); - Field correspondingField = CLASS_FIELD_CACHE.get(clazz.getName()).get(resultColumnName); + Field correspondingField = colNameAndFieldMap.get(series.getColumns().get(i)/*InfluxDB columnName*/); if (correspondingField != null) { if (object == null) { object = clazz.newInstance(); @@ -160,6 +161,19 @@ List parseSeriesAs(final QueryResult.Series series, final Class clazz, setFieldValue(object, correspondingField, row.get(i)); } } + // When the "GROUP BY" clause is used, "tags" are returned as Map and + // accordingly with InfluxDB documentation + // https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value + // "tag" values are always String. + if (series.getTags() != null && !series.getTags().isEmpty()) { + for (Entry entry : series.getTags().entrySet()) { + Field correspondingField = colNameAndFieldMap.get(entry.getKey()/*InfluxDB columnName*/); + if (correspondingField != null) { + // I don't think it is possible to reach here without a valid "object" + setFieldValue(object, correspondingField, entry.getValue()); + } + } + } if (object != null) { result.add(object); object = null; @@ -233,8 +247,7 @@ boolean fieldValueModified(final Class fieldType, final Field field, fina } boolean fieldValueForPrimitivesModified(final Class fieldType, final Field field, final T object, - final Object value) - throws IllegalArgumentException, IllegalAccessException { + final Object value) throws IllegalArgumentException, IllegalAccessException { if (double.class.isAssignableFrom(fieldType)) { field.setDouble(object, ((Double) value).doubleValue()); return true; @@ -255,8 +268,7 @@ boolean fieldValueForPrimitivesModified(final Class fieldType, final Fiel } boolean fieldValueForPrimitiveWrappersModified(final Class fieldType, final Field field, final T object, - final Object value) - throws IllegalArgumentException, IllegalAccessException { + final Object value) throws IllegalArgumentException, IllegalAccessException { if (Double.class.isAssignableFrom(fieldType)) { field.set(object, value); return true; diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index 61409a77b..0c9b86a05 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -26,8 +26,10 @@ import java.time.Instant; import java.util.Arrays; import java.util.Date; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.UUID; @@ -243,7 +245,61 @@ public void testToPOJO_SeriesFromQueryResultIsNull() { // Then... assertTrue("there must NO entry in the result list", myList.isEmpty()); } - + + @Test + public void testToPOJO_QueryResultCreatedByGroupByClause() { + // Given... + mapper.cacheMeasurementClass(GroupByCarrierDeviceOS.class); + + List columnList = Arrays.asList("time", "median", "min", "max"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + List firstSeriesResult = Arrays.asList(now, new Double("233.8"), new Double("0.0"), + new Double("3090744.0")); + // When the "GROUP BY" clause is used, "tags" are returned as Map + Map firstSeriesTagMap = new HashMap<>(); + firstSeriesTagMap.put("CARRIER", "000/00"); + firstSeriesTagMap.put("DEVICE_OS_VERSION", "4.4.2"); + + List secondSeriesResult = Arrays.asList(now, new Double("552.0"), new Double("135.0"), + new Double("267705.0")); + Map secondSeriesTagMap = new HashMap<>(); + secondSeriesTagMap.put("CARRIER", "000/01"); + secondSeriesTagMap.put("DEVICE_OS_VERSION", "9.3.5"); + + QueryResult.Series firstSeries = new QueryResult.Series(); + firstSeries.setColumns(columnList); + firstSeries.setValues(Arrays.asList(firstSeriesResult)); + firstSeries.setTags(firstSeriesTagMap); + firstSeries.setName("tb_network"); + + QueryResult.Series secondSeries = new QueryResult.Series(); + secondSeries.setColumns(columnList); + secondSeries.setValues(Arrays.asList(secondSeriesResult)); + secondSeries.setTags(secondSeriesTagMap); + secondSeries.setName("tb_network"); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(firstSeries, secondSeries)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, GroupByCarrierDeviceOS.class); + + // Then... + GroupByCarrierDeviceOS firstGroupByEntry = myList.get(0); + assertEquals("field 'carrier' does not match", "000/00", firstGroupByEntry.carrier); + assertEquals("field 'deviceOsVersion' does not match", "4.4.2", firstGroupByEntry.deviceOsVersion); + + GroupByCarrierDeviceOS secondGroupByEntry = myList.get(1); + assertEquals("field 'carrier' does not match", "000/01", secondGroupByEntry.carrier); + assertEquals("field 'deviceOsVersion' does not match", "9.3.5", secondGroupByEntry.deviceOsVersion); + } + @Measurement(name = "CustomMeasurement") static class MyCustomMeasurement { @@ -297,4 +353,35 @@ static class MyPojoWithUnsupportedField { @Column(name = "bar") private Date myDate; } + + /** + * Class created based on example from https://github.com/influxdata/influxdb-java/issues/343 + */ + @Measurement(name = "tb_network") + static class GroupByCarrierDeviceOS { + + @Column(name = "time") + private Instant time; + + @Column(name = "CARRIER", tag = true) + private String carrier; + + @Column(name = "DEVICE_OS_VERSION", tag = true) + private String deviceOsVersion; + + @Column(name = "median") + private Double median; + + @Column(name = "min") + private Double min; + + @Column(name = "max") + private Double max; + + @Override + public String toString() { + return "GroupByCarrierDeviceOS [time=" + time + ", carrier=" + carrier + ", deviceOsVersion=" + deviceOsVersion + + ", median=" + median + ", min=" + min + ", max=" + max + "]"; + } + } } \ No newline at end of file From 8f601ccade6bed9f5a36929994bab3a40614bb68 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 28 Jun 2017 09:18:06 +0200 Subject: [PATCH 020/148] Start 2.8 development cycle --- CHANGELOG.md | 7 +++++-- README.md | 4 ++-- pom.xml | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 878679b51..bc1521636 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,11 @@ -## v2.7 [unreleased] +## 2.8 [unreleased] + +## v2.7 [2017-06-26] #### Features - Simplify write() methods for use cases writing all points to same database and retention policy [PR #327](https://github.com/influxdata/influxdb-java/pull/327) + - QueryResult to Object mapper added [PR #341](https://github.com/influxdata/influxdb-java/pull/341) #### Fixes @@ -13,7 +16,7 @@ - Significant (~35%) performance improvements for write speed with less memory footprint. [PR #330](https://github.com/influxdata/influxdb-java/pull/330) - Drop guava runtime dependency which reduces jar size from 1MB -> 49KB [PR #322](https://github.com/influxdata/influxdb-java/pull/322) -## v2.6 [2017.06-08] +## v2.6 [2017-06-08] #### Features diff --git a/README.md b/README.md index 3c10745e9..afe035753 100644 --- a/README.md +++ b/README.md @@ -224,12 +224,12 @@ The latest version for maven dependence: org.influxdb influxdb-java - 2.6 + 2.7 ``` Or when using with gradle: ```groovy -compile 'org.influxdb:influxdb-java:2.6' +compile 'org.influxdb:influxdb-java:2.7' ``` For version change history have a look at [ChangeLog](https://github.com/influxdata/influxdb-java/blob/master/CHANGELOG.md). diff --git a/pom.xml b/pom.xml index 54eabf6a4..1808de999 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.7 + 2.8-SNAPSHOT influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org From f9aa9c846cf594bad6c4fce0ed6f35094b198488 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 28 Jun 2017 09:53:54 +0200 Subject: [PATCH 021/148] Updated README.md, section QueryResult mapper limitations --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index afe035753..4079714b6 100644 --- a/README.md +++ b/README.md @@ -212,6 +212,7 @@ List cpuList = resultMapper.toPOJO(queryResult, Cpu.class); **QueryResult mapper limitations** - If your InfluxDB query contains multiple SELECT clauses, you will have to call InfluxResultMapper#toPOJO() multiple times to map every measurement returned by QueryResult to the respective POJO; - If your InfluxDB query contains multiple SELECT clauses **for the same measurement**, InfluxResultMapper will process all results because there is no way to distinguish which one should be mapped to your POJO. It may result in an invalid collection being returned; +- A Class field annotated with _@Column(..., tag = true)_ (i.e. a [InfluxDB Tag](https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value)) must be declared as _String_. ### Other Usages: From 26e014884a7fa6f41601f0e16b861e1866f66a80 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 28 Jun 2017 09:53:54 +0200 Subject: [PATCH 022/148] Updated README.md and CHANGELOG.md, section QueryResult mapper limitations --- CHANGELOG.md | 4 ++++ README.md | 7 ++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc1521636..e6676e3b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 2.8 [unreleased] +#### Fixes + + - InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) + ## v2.7 [2017-06-26] #### Features diff --git a/README.md b/README.md index afe035753..6f0519a9d 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ influxDB.query(query, 20, queryResult -> System.out.println(queryResult)); ``` -#### QueryResult mapper to POJO (version 2.7+ required, unreleased): +#### QueryResult mapper to POJO (version 2.7+ required): An alternative way to handle the QueryResult object is now available. Supposing that you have a measurement _CPU_: @@ -206,13 +206,14 @@ InfluxDB influxDB = InfluxDBFactory.connect("http://localhost:8086", "root", "ro String dbName = "myTimeseries"; QueryResult queryResult = influxDB.query(new Query("SELECT * FROM cpu", dbName)); -InfluxResultMapper resultMapper = new InfluxResultMapper(); // thread-safe - can be reused +InfluxDBResultMapper resultMapper = new InfluxDBResultMapper(); // thread-safe - can be reused List cpuList = resultMapper.toPOJO(queryResult, Cpu.class); ``` **QueryResult mapper limitations** - If your InfluxDB query contains multiple SELECT clauses, you will have to call InfluxResultMapper#toPOJO() multiple times to map every measurement returned by QueryResult to the respective POJO; - If your InfluxDB query contains multiple SELECT clauses **for the same measurement**, InfluxResultMapper will process all results because there is no way to distinguish which one should be mapped to your POJO. It may result in an invalid collection being returned; - +- A Class field annotated with _@Column(..., tag = true)_ (i.e. a [InfluxDB Tag](https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value)) must be declared as _String_. +-- _Note: With the current released version (2.7), InfluxDBResultMapper does not support QueryResult created by queries using the "GROUP BY" clause. This was fixed by [PR #345](https://github.com/influxdata/influxdb-java/pull/345)._ ### Other Usages: For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") From e41c2093b676193b8d57c25cd784c6b3c17494e7 Mon Sep 17 00:00:00 2001 From: wasnertobias Date: Wed, 5 Jul 2017 13:40:44 +0200 Subject: [PATCH 023/148] Let InfluxDB handle the timestamp if none provided. Fixes #349 --- src/main/java/org/influxdb/dto/Point.java | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index fb6ad7a26..8c0f91b79 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -69,7 +69,7 @@ public static final class Builder { private final String measurement; private final Map tags = new TreeMap<>(); private Long time; - private TimeUnit precision = TimeUnit.NANOSECONDS; + private TimeUnit precision; private final Map fields = new TreeMap<>(); /** @@ -207,9 +207,6 @@ public Point build() { if (this.time != null) { point.setTime(this.time); point.setPrecision(this.precision); - } else { - point.setTime(System.currentTimeMillis()); - point.setPrecision(TimeUnit.MILLISECONDS); } point.setTags(this.tags); return point; @@ -292,12 +289,16 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Point [name="); builder.append(this.measurement); - builder.append(", time="); - builder.append(this.time); + if (this.time != null) { + builder.append(", time="); + builder.append(this.time); + } builder.append(", tags="); builder.append(this.tags); - builder.append(", precision="); - builder.append(this.precision); + if (this.precision != null) { + builder.append(", precision="); + builder.append(this.precision); + } builder.append(", fields="); builder.append(this.fields); builder.append("]"); @@ -368,6 +369,9 @@ private void concatenatedFields(final StringBuilder sb) { } private void formatedTime(final StringBuilder sb) { + if (this.time == null || this.precision == null) { + return; + } sb.append(' ').append(TimeUnit.NANOSECONDS.convert(this.time, this.precision)); } From 17cdd589e2464b45a2f0fcea8ad6e5767a71bdf3 Mon Sep 17 00:00:00 2001 From: Tobias Wasner Date: Wed, 5 Jul 2017 14:07:59 +0200 Subject: [PATCH 024/148] Update CHANGELOG.md --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6676e3b1..8c8d51031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,10 @@ -## 2.8 [unreleased] +## 2.8 [2017-07-05] #### Fixes - InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) + - InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350) + ## v2.7 [2017-06-26] From 62150708fe756640276f1eae01eacaa0609c90db Mon Sep 17 00:00:00 2001 From: Tobias Wasner Date: Wed, 5 Jul 2017 14:14:58 +0200 Subject: [PATCH 025/148] Update CHANGELOG.md not yet released... --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c8d51031..9f40a782f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 2.8 [2017-07-05] +## 2.8 [unreleased] #### Fixes From f87797f9f8194677b595b27966a67eda4e9cc3b2 Mon Sep 17 00:00:00 2001 From: gurlwhocodes Date: Wed, 5 Jul 2017 22:33:36 -0700 Subject: [PATCH 026/148] feature: #295 Create and Drop retention policies --- src/main/java/org/influxdb/InfluxDB.java | 40 +++++++++++ .../java/org/influxdb/impl/InfluxDBImpl.java | 69 +++++++++++++++++++ .../java/org/influxdb/impl/Preconditions.java | 13 ++++ src/test/java/org/influxdb/InfluxDBTest.java | 30 ++++++++ 4 files changed, 152 insertions(+) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index ddc8f7316..9e583f51c 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -395,4 +395,44 @@ public void write(final String database, final String retentionPolicy, * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB setRetentionPolicy(final String retentionPolicy); + + /** + * Creates a retentionPolicy. + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param shardDuration the shardDuration + * @param replicationFactor the replicationFactor of the rp + * @param isDefault if the rp is the default rp for the database or not + */ + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor, final boolean isDefault); + + /** + * Creates a retentionPolicy. (optional shardDuration) + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param replicationFactor the replicationFactor of the rp + * @param isDefault if the rp is the default rp for the database or not + */ + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final int replicationFactor, final boolean isDefault); + + /** + * Creates a retentionPolicy. (optional shardDuration and isDefault) + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param replicationFactor the replicationFactor of the rp + */ + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor); + + /** + * Drops a retentionPolicy in a database. + * @param rpName the name of the retentionPolicy + * @param database the name of the database + */ + public void dropRetentionPolicy(final String rpName, final String database); } diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 37f7dbd46..d0ea19fd2 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -557,4 +557,73 @@ public InfluxDB setRetentionPolicy(final String retentionPolicy) { this.retentionPolicy = retentionPolicy; return this; } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor, final boolean isDefault) { + Preconditions.checkNonEmptyString(rpName, "retentionPolicyName"); + Preconditions.checkNonEmptyString(database, "database"); + Preconditions.checkNonEmptyString(duration, "retentionDuration"); + Preconditions.checkDuration(duration, "retentionDuration"); + if (shardDuration != null && !shardDuration.isEmpty()) { + Preconditions.checkDuration(shardDuration, "shardDuration"); + } + Preconditions.checkPositiveNumber(replicationFactor, "replicationFactor"); + + StringBuilder queryBuilder = new StringBuilder("CREATE RETENTION POLICY \""); + queryBuilder.append(rpName) + .append("\" ON \"") + .append(database) + .append("\" DURATION ") + .append(duration) + .append(" REPLICATION ") + .append(replicationFactor); + if (shardDuration != null && !shardDuration.isEmpty()) { + queryBuilder.append(" SHARD DURATION "); + queryBuilder.append(shardDuration); + } + if (isDefault) { + queryBuilder.append(" DEFAULT"); + } + execute(this.influxDBService.postQuery(this.username, this.password, Query.encode(queryBuilder.toString()))); + } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final int replicationFactor, final boolean isDefault) { + createRetentionPolicy(rpName, database, duration, null, replicationFactor, isDefault); + } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor) { + createRetentionPolicy(rpName, database, duration, null, replicationFactor, false); + } + + /** + * {@inheritDoc} + * @param rpName the name of the retentionPolicy + * @param database the name of the database + */ + @Override + public void dropRetentionPolicy(final String rpName, final String database) { + Preconditions.checkNonEmptyString(rpName, "retentionPolicyName"); + Preconditions.checkNonEmptyString(database, "database"); + StringBuilder queryBuilder = new StringBuilder("DROP RETENTION POLICY \""); + queryBuilder.append(rpName) + .append("\" ON \"") + .append(database) + .append("\""); + execute(this.influxDBService.postQuery(this.username, this.password, + Query.encode(queryBuilder.toString()))); + } } diff --git a/src/main/java/org/influxdb/impl/Preconditions.java b/src/main/java/org/influxdb/impl/Preconditions.java index f2c21a796..be8acd2a9 100644 --- a/src/main/java/org/influxdb/impl/Preconditions.java +++ b/src/main/java/org/influxdb/impl/Preconditions.java @@ -35,4 +35,17 @@ public static void checkPositiveNumber(final Number number, final String name) t throw new IllegalArgumentException("Expecting a positive number for " + name); } } + + /** + * Enforces that the duration is a valid influxDB duration. + * @param duration the duration to test + * @param name variable name for reporting + * @throws IllegalArgumentException + */ + public static void checkDuration(final String duration, final String name) throws IllegalArgumentException { + if (!duration.matches("(\\d+[wdmhs])+")) { + throw new IllegalArgumentException("Invalid InfluxDB duration: " + duration + + "for " + name); + } + } } diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index 4b486d6db..f5a8bca06 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -715,4 +715,34 @@ public void testFlushThrowsIfBatchingIsNotEnabled() { this.influxDB.flush(); } + /** + * Test creation and deletion of retention policies + */ + @Test + public void testCreateDropRetentionPolicies() { + String dbName = "rpTest_" + System.currentTimeMillis(); + this.influxDB.createDatabase(dbName); + + this.influxDB.createRetentionPolicy("testRP1", dbName, "30h", 2, false); + this.influxDB.createRetentionPolicy("testRP2", dbName, "10d", "20m", 2, false); + this.influxDB.createRetentionPolicy("testRP3", dbName, "2d4w", "20m", 2); + + Query query = new Query("SHOW RETENTION POLICIES", dbName); + QueryResult result = this.influxDB.query(query); + Assert.assertNull(result.getError()); + List> retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); + Assert.assertTrue(retentionPolicies.get(1).contains("testRP1")); + Assert.assertTrue(retentionPolicies.get(2).contains("testRP2")); + Assert.assertTrue(retentionPolicies.get(3).contains("testRP3")); + + this.influxDB.dropRetentionPolicy("testRP1", dbName); + this.influxDB.dropRetentionPolicy("testRP2", dbName); + this.influxDB.dropRetentionPolicy("testRP3", dbName); + + result = this.influxDB.query(query); + Assert.assertNull(result.getError()); + retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); + Assert.assertTrue(retentionPolicies.size() == 1); + } + } From b8ad9de46103dc7594c0dfd6069deba0621675dc Mon Sep 17 00:00:00 2001 From: gurlwhocodes Date: Thu, 6 Jul 2017 20:29:14 -0700 Subject: [PATCH 027/148] Updating Changelog and Readme --- CHANGELOG.md | 3 +++ README.md | 22 +++++++++++++++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f40a782f..90f66071a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,9 @@ - InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) - InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350) +#### Features + + - API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) ## v2.7 [2017-06-26] diff --git a/README.md b/README.md index 6f0519a9d..e47dd0c57 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,13 @@ All low level REST Api calls are available. InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); String dbName = "aTimeSeries"; influxDB.createDatabase(dbName); +String rpName = "aRetentionPolicy"; +influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); BatchPoints batchPoints = BatchPoints .database(dbName) .tag("async", "true") - .retentionPolicy("autogen") + .retentionPolicy(rpName) .consistency(ConsistencyLevel.ALL) .build(); Point point1 = Point.measurement("cpu") @@ -43,9 +45,13 @@ batchPoints.point(point2); influxDB.write(batchPoints); Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); +influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); ``` -Note : If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen' +Note: +* APIs to create and drop retention policies are supported only in versions > 2.7 +* If you are using influxdb < 2.8, you should use retention policy: 'autogen' +* If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen' If your application produces only single Points, you can enable the batching functionality of influxdb-java: @@ -53,6 +59,8 @@ If your application produces only single Points, you can enable the batching fun InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); String dbName = "aTimeSeries"; influxDB.createDatabase(dbName); +String rpName = "aRetentionPolicy"; +influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); // Flush every 2000 Points, at least every 100ms influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); @@ -69,10 +77,11 @@ Point point2 = Point.measurement("disk") .addField("free", 1L) .build(); -influxDB.write(dbName, "autogen", point1); -influxDB.write(dbName, "autogen", point2); +influxDB.write(dbName, rpName, point1); +influxDB.write(dbName, rpName, point2); Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); +influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); ``` Note that the batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` @@ -85,7 +94,9 @@ InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "r String dbName = "aTimeSeries"; influxDB.createDatabase(dbName); influxDB.setDatabase(dbName); -influxDB.setRetentionPolicy("autogen"); +String rpName = "aRetentionPolicy"; +influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); +influxDB.setRetentionPolicy(rpName); // Flush every 2000 Points, at least every 100ms influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); @@ -105,6 +116,7 @@ influxDB.write(Point.measurement("disk") Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); +influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); ``` From ee6445469743cea4327de0fc3091da08556bb800 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 29 Jul 2017 10:21:51 +0200 Subject: [PATCH 028/148] Expand test suite to use jdk8 and jdk9 for unit and integration tests --- CHANGELOG.md | 4 ++++ README.md | 4 ++-- compile-and-test.sh | 9 ++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90f66071a..4f197be8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,10 @@ - API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) +#### Improvements + + - Build: all unit and integration test are now running with jdk8 and jdk9. + ## v2.7 [2017-06-26] #### Features diff --git a/README.md b/README.md index e47dd0c57..60b9d77b5 100644 --- a/README.md +++ b/README.md @@ -249,8 +249,8 @@ For version change history have a look at [ChangeLog](https://github.com/influxd ### Build Requirements -* Java 1.8+ -* Maven 3.0+ +* Java 1.8+ (tested with jdk8 and jdk9) +* Maven 3.0+ (tested with maven 3.5.0) * Docker daemon running Then you can build influxdb-java with all tests with: diff --git a/compile-and-test.sh b/compile-and-test.sh index 5aa0b11a9..b89a52320 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -6,6 +6,12 @@ set -e INFLUXDB_VERSIONS="1.3 1.2 1.1" +JAVA_VERSIONS="3-jdk-8 3-jdk-9" + + +for java_version in ${JAVA_VERSIONS} +do + echo "Run tests with maven:${java_version}" for version in ${INFLUXDB_VERSIONS} do echo "Tesing againts influxdb ${version}" @@ -26,7 +32,8 @@ do --workdir /usr/src/mymaven \ --link=influxdb \ --env INFLUXDB_IP=influxdb \ - maven:alpine mvn clean install + maven:${java_version} mvn clean install docker kill influxdb || true done +done From d4c74eeba2bf158420cab4e7d30efe51e672cb37 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Thu, 17 Aug 2017 13:46:38 +0200 Subject: [PATCH 029/148] Fixed DateTimeFormatterBuilder to properly parse nanoseconds DateTimeFormatterBuilder was using a wrong value for the max width of a fraction and an incorrect ChronoField. Now it is expected to have up to 9 digits of ns correctly parsed. --- .../influxdb/impl/InfluxDBResultMapper.java | 4 ++-- .../impl/InfluxDBResultMapperTest.java | 21 +++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 75884eed7..31e5760ed 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -52,7 +52,7 @@ public class InfluxDBResultMapper { ConcurrentMap> CLASS_FIELD_CACHE = new ConcurrentHashMap<>(); private static final int FRACTION_MIN_WIDTH = 0; - private static final int FRACTION_MAX_WIDTH = 6; + private static final int FRACTION_MAX_WIDTH = 9; private static final boolean ADD_DECIMAL_POINT = true; /** @@ -61,7 +61,7 @@ public class InfluxDBResultMapper { */ private static final DateTimeFormatter ISO8601_FORMATTER = new DateTimeFormatterBuilder() .appendPattern("yyyy-MM-dd'T'HH:mm:ss") - .appendFraction(ChronoField.MICRO_OF_SECOND, FRACTION_MIN_WIDTH, FRACTION_MAX_WIDTH, ADD_DECIMAL_POINT) + .appendFraction(ChronoField.NANO_OF_SECOND, FRACTION_MIN_WIDTH, FRACTION_MAX_WIDTH, ADD_DECIMAL_POINT) .appendPattern("X") .toFormatter(); diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index 0c9b86a05..e31333a2c 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -300,6 +300,27 @@ public void testToPOJO_QueryResultCreatedByGroupByClause() { assertEquals("field 'deviceOsVersion' does not match", "9.3.5", secondGroupByEntry.deviceOsVersion); } + @Test + public void testToPOJO_ticket363() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList("2000-01-01T00:00:00.000000001Z"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + assertEquals("incorrect number of elemets", 1, result.size()); + assertEquals("incorrect value for the nanoseconds field", 1, result.get(0).time.getNano()); + } + @Measurement(name = "CustomMeasurement") static class MyCustomMeasurement { From 03be128f2696b5b0d6b212b31eab02971979c1b3 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 22 Aug 2017 08:52:20 +0200 Subject: [PATCH 030/148] Update guava used in tests from 22.0 -> 23.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1808de999..2b087a03b 100644 --- a/pom.xml +++ b/pom.xml @@ -230,7 +230,7 @@ com.google.guava guava - 22.0 + 23.0 test From a28d681beff68d22533930d644feb1ee69008997 Mon Sep 17 00:00:00 2001 From: Shani Elharrar Date: Sat, 26 Aug 2017 12:25:23 +0300 Subject: [PATCH 031/148] InfluxDB: Add Query with Callbacks --- src/main/java/org/influxdb/InfluxDB.java | 16 +++++- .../java/org/influxdb/impl/InfluxDBImpl.java | 45 ++++++++++++---- src/test/java/org/influxdb/AsyncResult.java | 51 +++++++++++++++++++ src/test/java/org/influxdb/InfluxDBTest.java | 19 +++++++ 4 files changed, 121 insertions(+), 10 deletions(-) create mode 100644 src/test/java/org/influxdb/AsyncResult.java diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index 9e583f51c..cb80442bc 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -301,6 +301,20 @@ public void write(final String database, final String retentionPolicy, */ public QueryResult query(final Query query); + /** + * Execute a query against a database. + * + * One of the consumers will be executed. + * + * @param query + * the query to execute. + * @param onSuccess + * the consumer to invoke when result is received + * @param onFailure + * the consumer to invoke when error is thrown + */ + public void query(final Query query, final Consumer onSuccess, final Consumer onFailure); + /** * Execute a streaming query against a database. * @@ -311,7 +325,7 @@ public void write(final String database, final String retentionPolicy, * @param consumer * the consumer to invoke for each received QueryResult */ - public void query(Query query, int chunkSize, Consumer consumer); + public void query(Query query, int chunkSize, Consumer consumer); /** * Execute a query against a database. diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index d0ea19fd2..070a6dbe8 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -379,15 +379,26 @@ public void write(final int udpPort, final List records) { */ @Override public QueryResult query(final Query query) { - Call call; - if (query.requiresPost()) { - call = this.influxDBService.postQuery(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); - } else { - call = this.influxDBService.query(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); - } - return execute(call); + return execute(callQuery(query)); + } + + /** + * {@inheritDoc} + */ + @Override + public void query(final Query query, final Consumer onSuccess, final Consumer onFailure) { + final Call call = callQuery(query); + call.enqueue(new Callback() { + @Override + public void onResponse(final Call call, final Response response) { + onSuccess.accept(response.body()); + } + + @Override + public void onFailure(final Call call, final Throwable throwable) { + onFailure.accept(throwable); + } + }); } /** @@ -501,6 +512,22 @@ public boolean databaseExists(final String name) { return false; } + /** + * Calls the influxDBService for the query. + */ + private Call callQuery(final Query query) { + Call call; + if (query.requiresPost()) { + call = this.influxDBService.postQuery(this.username, + this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + } else { + call = this.influxDBService.query(this.username, + this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + } + return call; + } + + private T execute(final Call call) { try { Response response = call.execute(); diff --git a/src/test/java/org/influxdb/AsyncResult.java b/src/test/java/org/influxdb/AsyncResult.java new file mode 100644 index 000000000..a89c9a376 --- /dev/null +++ b/src/test/java/org/influxdb/AsyncResult.java @@ -0,0 +1,51 @@ +package org.influxdb; + +import org.influxdb.dto.QueryResult; + +import java.util.function.Consumer; + +public class AsyncResult { + + private final Object syncObject = new Object(); + + private boolean gotResult = false; + private T result = null; + private Throwable throwable = null; + + T result() throws Throwable { + while (!this.gotResult) { + synchronized (this.syncObject) { + this.syncObject.wait(); + } + } + + if (this.throwable != null) { + throw this.throwable; + } + + return this.result; + } + + public final Consumer resultConsumer = new Consumer() { + @Override + public void accept(T t) { + synchronized (syncObject) { + result = t; + gotResult = true; + syncObject.notifyAll(); + } + } + }; + + public final Consumer errorConsumer = new Consumer() { + @Override + public void accept(Throwable t) { + synchronized (syncObject) { + throwable = t; + gotResult = true; + syncObject.notifyAll(); + } + } + }; + +} diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index f5a8bca06..d3b973ff7 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -107,6 +107,25 @@ public void testQuery() { this.influxDB.query(new Query("DROP DATABASE mydb2", "mydb")); } + /** + * Tests for callback query. + */ + @Test + public void testCallbackQuery() throws Throwable { + final AsyncResult result = new AsyncResult<>(); + final Consumer firstQueryConsumer = new Consumer() { + @Override + public void accept(QueryResult queryResult) { + influxDB.query(new Query("DROP DATABASE mydb2", "mydb"), result.resultConsumer, result.errorConsumer); + } + }; + + this.influxDB.query(new Query("CREATE DATABASE mydb2", "mydb"), firstQueryConsumer, result.errorConsumer); + + // Will throw exception in case of error. + result.result(); + } + /** * Test that describe Databases works. */ From debbe29c27f10e2f6396a7b6e330e933e6544f94 Mon Sep 17 00:00:00 2001 From: Shani Elharrar Date: Tue, 29 Aug 2017 08:26:20 +0300 Subject: [PATCH 032/148] Added changelog and readme entries about query callback support --- CHANGELOG.md | 1 + README.md | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f197be8c..27404396c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ #### Features - API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) + - API: add InfluxDB#query that uses callbacks #### Improvements diff --git a/README.md b/README.md index 60b9d77b5..ce00af547 100644 --- a/README.md +++ b/README.md @@ -227,6 +227,19 @@ List cpuList = resultMapper.toPOJO(queryResult, Cpu.class); - A Class field annotated with _@Column(..., tag = true)_ (i.e. a [InfluxDB Tag](https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value)) must be declared as _String_. -- _Note: With the current released version (2.7), InfluxDBResultMapper does not support QueryResult created by queries using the "GROUP BY" clause. This was fixed by [PR #345](https://github.com/influxdata/influxdb-java/pull/345)._ +#### Query using Callbacks (version 2.8+ required) + +influxdb-java now supports returning results of a query via callbacks. Only one +of the following consumers are going to be called once : + +```java +this.influxDB.query(new Query("SELECT idle FROM cpu", dbName), queryResult -> { + // Do something with the result... +}, throwable -> { + // Do something with the error... +}); +``` + ### Other Usages: For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") From 7c675992d9ff9bd3d65ef93de85776227b57adbb Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 30 Aug 2017 07:39:42 +0200 Subject: [PATCH 033/148] Update mockite from 2.8.47 -> 2.9.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 2b087a03b..0e18e1838 100644 --- a/pom.xml +++ b/pom.xml @@ -224,7 +224,7 @@ org.mockito mockito-core - 2.8.47 + 2.9.0 test From d3116547a5f737f42c7098dd1db99367f746b7e1 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 30 Aug 2017 07:50:31 +0200 Subject: [PATCH 034/148] silent 3 javadoc warnings --- src/main/java/org/influxdb/InfluxDB.java | 1 + src/main/java/org/influxdb/impl/InfluxDBResultMapper.java | 1 + src/main/java/org/influxdb/impl/Preconditions.java | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index cb80442bc..3714eb131 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -438,6 +438,7 @@ public void createRetentionPolicy(final String rpName, final String database, fi * @param rpName the name of the retentionPolicy(rp) * @param database the name of the database * @param duration the duration of the rp + * @param shardDuration the shardDuration * @param replicationFactor the replicationFactor of the rp */ public void createRetentionPolicy(final String rpName, final String database, final String duration, diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 31e5760ed..2188bcd18 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -74,6 +74,7 @@ public class InfluxDBResultMapper { * * @param queryResult the InfluxDB result object * @param clazz the Class that will be used to hold your measurement data + * @param the target type * @return a {@link List} of objects from the same Class passed as parameter and sorted on the * same order as received from InfluxDB. * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, diff --git a/src/main/java/org/influxdb/impl/Preconditions.java b/src/main/java/org/influxdb/impl/Preconditions.java index be8acd2a9..4a3297db6 100644 --- a/src/main/java/org/influxdb/impl/Preconditions.java +++ b/src/main/java/org/influxdb/impl/Preconditions.java @@ -40,7 +40,7 @@ public static void checkPositiveNumber(final Number number, final String name) t * Enforces that the duration is a valid influxDB duration. * @param duration the duration to test * @param name variable name for reporting - * @throws IllegalArgumentException + * @throws IllegalArgumentException if the given duration is not valid. */ public static void checkDuration(final String duration, final String name) throws IllegalArgumentException { if (!duration.matches("(\\d+[wdmhs])+")) { From af84523dc0917bf36a28097e29a19244d5b51a91 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 31 Aug 2017 15:21:36 +0200 Subject: [PATCH 035/148] add a format sources helper for later reformatting according google coding style rules --- format-sources.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100755 format-sources.sh diff --git a/format-sources.sh b/format-sources.sh new file mode 100755 index 000000000..98d0bc92f --- /dev/null +++ b/format-sources.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +wget https://github.com/google/google-java-format/releases/download/google-java-format-1.4/google-java-format-1.4-all-deps.jar + +JAVA_FILES=$(find src/ -name "*.java") + +for JAVA_FILE in ${JAVA_FILES} +do + echo "formatting ${JAVA_FILE}" + docker run -it --rm \ + -v $PWD:/mnt \ + openjdk java -jar /mnt/google-java-format-1.4-all-deps.jar -r /mnt/${JAVA_FILE} +done From 4f9e0b44bf358c3612b9ef092b350dc537ead440 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 5 Sep 2017 07:58:23 +0200 Subject: [PATCH 036/148] Update okhttp from 3.8.1 -> 3.9.0 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 0e18e1838..fb62bd6e5 100644 --- a/pom.xml +++ b/pom.xml @@ -248,12 +248,12 @@ com.squareup.okhttp3 okhttp - 3.8.1 + 3.9.0 com.squareup.okhttp3 logging-interceptor - 3.8.1 + 3.9.0 From e864c41932fb75c8824ecec23b91c9e1c3fef011 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Fri, 15 Sep 2017 13:36:05 +0200 Subject: [PATCH 037/148] update mockito from 2.9.0 -> 2.10.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fb62bd6e5..26a189acd 100644 --- a/pom.xml +++ b/pom.xml @@ -224,7 +224,7 @@ org.mockito mockito-core - 2.9.0 + 2.10.0 test From 2b75c035ff76a4fce838803f441f3434c849a266 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 25 Sep 2017 21:49:25 +0200 Subject: [PATCH 038/148] Add proper license dates, fixes #374 --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 766a0a595..f21351ced 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) {{{year}}} {{{fullname}}} +Copyright (c) 2014-2017 Stefan Majer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 93af77ef32c121968a941b5c57167bb7deb3862c Mon Sep 17 00:00:00 2001 From: Abhi Auradkar Date: Thu, 12 Oct 2017 21:13:33 +0530 Subject: [PATCH 039/148] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ce00af547..329c66ea4 100644 --- a/README.md +++ b/README.md @@ -140,7 +140,7 @@ influxDB.enableGzip() #### UDP's support (version 2.5+ required): -influxdb-java client support udp protocol now. you can call followed methods directly to write through UDP. +influxdb-java client support udp protocol now. you can call following methods directly to write through UDP. ```java public void write(final int udpPort, final String records); public void write(final int udpPort, final List records); From 0129cf7a3c89df954e08fe352ce526793bae0aa6 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 18 Oct 2017 21:35:47 +0200 Subject: [PATCH 040/148] Update mockito from 2.10 -> 2.11 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 26a189acd..1b6f64b01 100644 --- a/pom.xml +++ b/pom.xml @@ -224,7 +224,7 @@ org.mockito mockito-core - 2.10.0 + 2.11.0 test From f94690969593b5fe7fbad45830c97886c67d37ae Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sun, 29 Oct 2017 12:00:25 +0100 Subject: [PATCH 041/148] Update guava from 23.0 to 23.3-jre --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1b6f64b01..e89b968f9 100644 --- a/pom.xml +++ b/pom.xml @@ -230,7 +230,7 @@ com.google.guava guava - 23.0 + 23.3-jre test From a382ad6f6d1ca88fba920d2bf4a9a4940eda30df Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 14 Nov 2017 21:15:06 +0100 Subject: [PATCH 042/148] Add influxdb-1.4 to tests, use smaller maven images --- compile-and-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compile-and-test.sh b/compile-and-test.sh index b89a52320..67323c40f 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -4,9 +4,9 @@ # set -e -INFLUXDB_VERSIONS="1.3 1.2 1.1" +INFLUXDB_VERSIONS="1.4 1.3 1.2 1.1" -JAVA_VERSIONS="3-jdk-8 3-jdk-9" +JAVA_VERSIONS="3-jdk-8-alpine 3-jdk-9-slim" for java_version in ${JAVA_VERSIONS} From 074ccb3f2660f631d80080fe6a72554d66e5301f Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 14 Nov 2017 21:17:33 +0100 Subject: [PATCH 043/148] Update test dependencies guava from 23.3 -> 23.4, mockito from 2.11 -> 2.12 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index e89b968f9..eaca3d712 100644 --- a/pom.xml +++ b/pom.xml @@ -224,13 +224,13 @@ org.mockito mockito-core - 2.11.0 + 2.12.0 test com.google.guava guava - 23.3-jre + 23.4-jre test From 525efb6f7d9aa2473b8ef62f153800acad602b07 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 15 Nov 2017 08:40:05 +0100 Subject: [PATCH 044/148] Update findbugs from 3.0.4 -> 3.0.5 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index eaca3d712..9a1024d9c 100644 --- a/pom.xml +++ b/pom.xml @@ -42,7 +42,7 @@ org.codehaus.mojo findbugs-maven-plugin - 3.0.4 + 3.0.5 true From e852a6add778080d6be4f64b05141bb502427d89 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Fri, 17 Nov 2017 13:26:10 +0100 Subject: [PATCH 045/148] Update maven compile plugin from 3.6.1 -> 3.7.0, maven javadoc plugin from 2.10.4 -> 3.0.0-M1 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 9a1024d9c..2f8f644a5 100644 --- a/pom.xml +++ b/pom.xml @@ -70,7 +70,7 @@ org.apache.maven.plugins maven-compiler-plugin - 3.6.1 + 3.7.0 1.8 1.8 @@ -143,7 +143,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 2.10.4 + 3.0.0-M1 attach-javadocs From 8098e1ef87fcd10661fe0452dd41cd90717e356a Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 20 Nov 2017 10:42:18 +0100 Subject: [PATCH 046/148] Update okhttp from 3.9.0 -> 3.9.1 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 2f8f644a5..a4e09ce9e 100644 --- a/pom.xml +++ b/pom.xml @@ -248,12 +248,12 @@ com.squareup.okhttp3 okhttp - 3.9.0 + 3.9.1 com.squareup.okhttp3 logging-interceptor - 3.9.0 + 3.9.1 From 619af3480ee2d8c2110d8eab85edbf232f3249a3 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 20 Nov 2017 18:51:07 +0100 Subject: [PATCH 047/148] Migrate to JUnit5 --- pom.xml | 18 +- .../org/influxdb/InfluxDBFactoryTest.java | 97 ++++---- src/test/java/org/influxdb/InfluxDBTest.java | 210 ++++++++++-------- .../java/org/influxdb/PerformanceTests.java | 31 +-- src/test/java/org/influxdb/TicketTest.java | 12 +- .../java/org/influxdb/dto/BatchPointTest.java | 2 +- src/test/java/org/influxdb/dto/PointTest.java | 35 ++- src/test/java/org/influxdb/dto/QueryTest.java | 2 +- .../org/influxdb/impl/BatchProcessorTest.java | 25 ++- .../influxdb/impl/ChunkingExceptionTest.java | 8 +- .../impl/InfluxDBResultMapperTest.java | 95 ++++---- .../org/influxdb/impl/PreconditionsTest.java | 39 ++-- .../java/org/influxdb/impl/TimeUtilTest.java | 22 +- 13 files changed, 336 insertions(+), 260 deletions(-) diff --git a/pom.xml b/pom.xml index a4e09ce9e..979f63a01 100644 --- a/pom.xml +++ b/pom.xml @@ -210,9 +210,21 @@ - junit - junit - 4.12 + org.junit.jupiter + junit-jupiter-engine + 5.0.2 + test + + + org.junit.platform + junit-platform-runner + 1.0.2 + test + + + org.hamcrest + hamcrest-all + 1.3 test diff --git a/src/test/java/org/influxdb/InfluxDBFactoryTest.java b/src/test/java/org/influxdb/InfluxDBFactoryTest.java index ec1455c1c..99291b522 100644 --- a/src/test/java/org/influxdb/InfluxDBFactoryTest.java +++ b/src/test/java/org/influxdb/InfluxDBFactoryTest.java @@ -1,46 +1,51 @@ -package org.influxdb; - -import org.influxdb.dto.Pong; -import org.junit.Assert; -import org.junit.Test; - -import okhttp3.OkHttpClient; - -/** - * Test the InfluxDB Factory API. - * - * @author fujian1115 [at] gmail.com - * - */ -public class InfluxDBFactoryTest { - - /** - * Test for a {@link InfluxDBFactory #connect(String)}. - */ - @Test - public void testCreateInfluxDBInstanceWithoutUserNameAndPassword() { - InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true)); - verifyInfluxDBInstance(influxDB); - } - - private void verifyInfluxDBInstance(InfluxDB influxDB) { - Assert.assertNotNull(influxDB); - Pong pong = influxDB.ping(); - Assert.assertNotNull(pong); - Assert.assertNotEquals(pong.getVersion(), "unknown"); - } - - /** - * Test for a {@link InfluxDBFactory #connect(String, okhttp3.OkHttpClient.Builder)}. - */ - @Test - public void testCreateInfluxDBInstanceWithClientAndWithoutUserNameAndPassword() { - InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), new OkHttpClient.Builder()); - verifyInfluxDBInstance(influxDB); - } - - @Test(expected = IllegalArgumentException.class) - public void shouldThrowIllegalArgumentWithInvalidUrl() { - InfluxDBFactory.connect("invalidUrl"); - } -} +package org.influxdb; + +import org.influxdb.dto.Pong; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import okhttp3.OkHttpClient; + +/** + * Test the InfluxDB Factory API. + * + * @author fujian1115 [at] gmail.com + * + */ +@RunWith(JUnitPlatform.class) +public class InfluxDBFactoryTest { + + /** + * Test for a {@link InfluxDBFactory #connect(String)}. + */ + @Test + public void testCreateInfluxDBInstanceWithoutUserNameAndPassword() { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true)); + verifyInfluxDBInstance(influxDB); + } + + private void verifyInfluxDBInstance(InfluxDB influxDB) { + Assertions.assertNotNull(influxDB); + Pong pong = influxDB.ping(); + Assertions.assertNotNull(pong); + Assertions.assertNotEquals(pong.getVersion(), "unknown"); + } + + /** + * Test for a {@link InfluxDBFactory #connect(String, okhttp3.OkHttpClient.Builder)}. + */ + @Test + public void testCreateInfluxDBInstanceWithClientAndWithoutUserNameAndPassword() { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), new OkHttpClient.Builder()); + verifyInfluxDBInstance(influxDB); + } + + @Test + public void testShouldThrowIllegalArgumentWithInvalidUrl() { + Assertions.assertThrows(IllegalArgumentException.class,() -> { + InfluxDBFactory.connect("invalidUrl"); + }); + } +} diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index d3b973ff7..cd505dd63 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -19,11 +19,12 @@ import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; import org.influxdb.impl.InfluxDBImpl; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; import com.google.common.util.concurrent.Uninterruptibles; @@ -33,20 +34,20 @@ * @author stefan.majer [at] gmail.com * */ +@RunWith(JUnitPlatform.class) public class InfluxDBTest { private InfluxDB influxDB; private final static int UDP_PORT = 8089; private final static String UDP_DATABASE = "udp"; - @Rule public final ExpectedException exception = ExpectedException.none(); /** * Create a influxDB connection before all tests start. * * @throws InterruptedException * @throws IOException */ - @Before + @BeforeEach public void setUp() throws InterruptedException, IOException { this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); boolean influxDBstarted = false; @@ -73,8 +74,8 @@ public void setUp() throws InterruptedException, IOException { /** * delete UDP database after all tests end. */ - //@After - public void clearup(){ + @AfterEach + public void cleanup(){ this.influxDB.deleteDatabase(UDP_DATABASE); } @@ -84,8 +85,8 @@ public void clearup(){ @Test public void testPing() { Pong result = this.influxDB.ping(); - Assert.assertNotNull(result); - Assert.assertNotEquals(result.getVersion(), "unknown"); + Assertions.assertNotNull(result); + Assertions.assertNotEquals(result.getVersion(), "unknown"); } /** @@ -94,8 +95,8 @@ public void testPing() { @Test public void testVersion() { String version = this.influxDB.version(); - Assert.assertNotNull(version); - Assert.assertFalse(version.contains("unknown")); + Assertions.assertNotNull(version); + Assertions.assertFalse(version.contains("unknown")); } /** @@ -135,8 +136,8 @@ public void testDescribeDatabases() { this.influxDB.createDatabase(dbName); this.influxDB.describeDatabases(); List result = this.influxDB.describeDatabases(); - Assert.assertNotNull(result); - Assert.assertTrue(result.size() > 0); + Assertions.assertNotNull(result); + Assertions.assertTrue(result.size() > 0); boolean found = false; for (String database : result) { if (database.equals(dbName)) { @@ -145,7 +146,7 @@ public void testDescribeDatabases() { } } - Assert.assertTrue("It is expected that describeDataBases contents the newly create database.", found); + Assertions.assertTrue(found, "It is expected that describeDataBases contents the newly create database."); this.influxDB.deleteDatabase(dbName); } @@ -158,9 +159,9 @@ public void testDatabaseExists() { String notExistentdbName = "unittest_2"; this.influxDB.createDatabase(existentdbName); boolean checkDbExistence = this.influxDB.databaseExists(existentdbName); - Assert.assertTrue("It is expected that databaseExists return true for " + existentdbName + " database", checkDbExistence); + Assertions.assertTrue(checkDbExistence, "It is expected that databaseExists return true for " + existentdbName + " database"); checkDbExistence = this.influxDB.databaseExists(notExistentdbName); - Assert.assertFalse("It is expected that databaseExists return false for " + notExistentdbName + " database", checkDbExistence); + Assertions.assertFalse(checkDbExistence, "It is expected that databaseExists return false for " + notExistentdbName + " database"); this.influxDB.deleteDatabase(existentdbName); } @@ -186,7 +187,7 @@ public void testWrite() { this.influxDB.write(batchPoints); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); this.influxDB.deleteDatabase(dbName); } @@ -202,7 +203,7 @@ public void testSyncWritePointThroughUDP() { Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); } /** @@ -212,14 +213,14 @@ public void testSyncWritePointThroughUDP() { public void testAsyncWritePointThroughUDP() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ - Assert.assertTrue(this.influxDB.isBatchEnabled()); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); String measurement = TestUtils.getRandomMeasurement(); Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); this.influxDB.write(UDP_PORT, point); Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); }finally{ this.influxDB.disableBatch(); } @@ -229,15 +230,17 @@ public void testAsyncWritePointThroughUDP() { /** * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. */ - @Test(expected = RuntimeException.class) + @Test public void testAsyncWritePointThroughUDPFail() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ - Assert.assertTrue(this.influxDB.isBatchEnabled()); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); String measurement = TestUtils.getRandomMeasurement(); Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); Thread.currentThread().interrupt(); - this.influxDB.write(UDP_PORT, point); + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, point); + }); }finally{ this.influxDB.disableBatch(); } @@ -254,7 +257,7 @@ public void testWriteStringData() { this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, "cpu,atag=test idle=90,usertime=9,system=1"); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); this.influxDB.deleteDatabase(dbName); } @@ -271,7 +274,7 @@ public void testWriteStringDataSimple() { this.influxDB.write("cpu,atag=test idle=90,usertime=9,system=1"); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); this.influxDB.deleteDatabase(dbName); } @@ -286,7 +289,7 @@ public void testWriteStringDataThroughUDP() { Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); } /** @@ -302,10 +305,10 @@ public void testWriteMultipleStringDataThroughUDP() { Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); } /** @@ -323,10 +326,10 @@ public void testWriteMultipleStringDataLinesThroughUDP() { Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); } /** @@ -335,8 +338,8 @@ public void testWriteMultipleStringDataLinesThroughUDP() { * The message is larger than the maximum supported by the underlying transport: Datagram send failed * @throws Exception */ - @Test(expected = RuntimeException.class) - public void writeMultipleStringDataLinesOverUDPLimit() throws Exception { + @Test + public void testWriteMultipleStringDataLinesOverUDPLimit() throws Exception { //prepare data List lineProtocols = new ArrayList(); int i = 0; @@ -351,7 +354,9 @@ public void writeMultipleStringDataLinesOverUDPLimit() throws Exception { } } //write batch of string which size is over 64K - this.influxDB.write(UDP_PORT, lineProtocols); + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, lineProtocols); + }); } /** @@ -367,10 +372,10 @@ public void testWriteMultipleStringData() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); this.influxDB.deleteDatabase(dbName); } @@ -389,10 +394,10 @@ public void testWriteMultipleStringDataSimple() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); this.influxDB.deleteDatabase(dbName); } @@ -413,10 +418,10 @@ public void testWriteMultipleStringDataLines() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); this.influxDB.deleteDatabase(dbName); } @@ -439,10 +444,10 @@ public void testWriteMultipleStringDataLinesSimple() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); this.influxDB.deleteDatabase(dbName); } @@ -455,17 +460,19 @@ public void testCreateNumericNamedDatabase() { this.influxDB.createDatabase(numericDbName); List result = this.influxDB.describeDatabases(); - Assert.assertTrue(result.contains(numericDbName)); + Assertions.assertTrue(result.contains(numericDbName)); this.influxDB.deleteDatabase(numericDbName); } /** * Test that creating database which name is empty will throw expected exception */ - @Test(expected = IllegalArgumentException.class) + @Test public void testCreateEmptyNamedDatabase() { String emptyName = ""; - this.influxDB.createDatabase(emptyName); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + this.influxDB.createDatabase(emptyName); + }); } /** @@ -477,7 +484,7 @@ public void testCreateDatabaseWithNameContainHyphen() { this.influxDB.createDatabase(databaseName); try { List result = this.influxDB.describeDatabases(); - Assert.assertTrue(result.contains(databaseName)); + Assertions.assertTrue(result.contains(databaseName)); } finally { this.influxDB.deleteDatabase(databaseName); } @@ -488,13 +495,13 @@ public void testCreateDatabaseWithNameContainHyphen() { */ @Test public void testIsBatchEnabled() { - Assert.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); - Assert.assertTrue(this.influxDB.isBatchEnabled()); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); this.influxDB.disableBatch(); - Assert.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); } /** @@ -521,29 +528,35 @@ public Thread newThread(Runnable r) { } } - Assert.assertTrue(existThreadWithSettedName); + Assertions.assertTrue(existThreadWithSettedName); this.influxDB.disableBatch(); } - @Test(expected = NullPointerException.class) + @Test public void testBatchEnabledWithThreadFactoryIsNull() { - this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, null); + Assertions.assertThrows(NullPointerException.class, () -> { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, null); + }); } /** * Test the implementation of {@link InfluxDBImpl#InfluxDBImpl(String, String, String, okhttp3.OkHttpClient.Builder)}. */ - @Test(expected = RuntimeException.class) + @Test public void testWrongHostForInfluxdb(){ String errorHost = "10.224.2.122_error_host"; - InfluxDBFactory.connect("http://" + errorHost + ":" + TestUtils.getInfluxPORT(true)); + Assertions.assertThrows(RuntimeException.class, () -> { + InfluxDBFactory.connect("http://" + errorHost + ":" + TestUtils.getInfluxPORT(true)); + }); } - @Test(expected = IllegalStateException.class) + @Test public void testBatchEnabledTwice() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ - this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + Assertions.assertThrows(IllegalStateException.class, () -> { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + }); } finally { this.influxDB.disableBatch(); } @@ -556,9 +569,9 @@ public void testBatchEnabledTwice() { public void testCloseInfluxDBClient() { InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); influxDB.enableBatch(1, 1, TimeUnit.SECONDS); - Assert.assertTrue(influxDB.isBatchEnabled()); + Assertions.assertTrue(influxDB.isBatchEnabled()); influxDB.close(); - Assert.assertFalse(influxDB.isBatchEnabled()); + Assertions.assertFalse(influxDB.isBatchEnabled()); } /** @@ -582,10 +595,10 @@ public void testWriteEnableGzip() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = influxDBForTestGzip.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); + Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); } finally { influxDBForTestGzip.deleteDatabase(dbName); influxDBForTestGzip.close(); @@ -601,11 +614,11 @@ public void testWriteEnableGzipAndDisableGzip() { InfluxDB influxDBForTestGzip = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); try { //test default: gzip is disable - Assert.assertFalse(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertFalse(influxDBForTestGzip.isGzipEnabled()); influxDBForTestGzip.enableGzip(); - Assert.assertTrue(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertTrue(influxDBForTestGzip.isGzipEnabled()); influxDBForTestGzip.disableGzip(); - Assert.assertFalse(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertFalse(influxDBForTestGzip.isGzipEnabled()); } finally { influxDBForTestGzip.close(); } @@ -646,19 +659,19 @@ public void accept(QueryResult result) { this.influxDB.deleteDatabase(dbName); QueryResult result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); + Assertions.assertNotNull(result); System.out.println(result); - Assert.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); + Assertions.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); + Assertions.assertNotNull(result); System.out.println(result); - Assert.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); + Assertions.assertNotNull(result); System.out.println(result); - Assert.assertEquals("DONE", result.getError()); + Assertions.assertEquals("DONE", result.getError()); } /** @@ -682,7 +695,7 @@ public void accept(QueryResult result) { } }); this.influxDB.deleteDatabase(dbName); - Assert.assertFalse(countDownLatch.await(10, TimeUnit.SECONDS)); + Assertions.assertFalse(countDownLatch.await(10, TimeUnit.SECONDS)); } /** @@ -694,14 +707,15 @@ public void testChunkingOldVersion() throws InterruptedException { if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { - this.exception.expect(RuntimeException.class); + Assertions.assertThrows(RuntimeException.class, () -> { String dbName = "write_unittest_" + System.currentTimeMillis(); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); this.influxDB.query(query, 10, new Consumer() { @Override public void accept(QueryResult result) { } - }); + }); + }); } } @@ -721,17 +735,19 @@ public void testFlushPendingWritesWhenBatchingEnabled() { Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); } finally { this.influxDB.deleteDatabase(dbName); this.influxDB.disableBatch(); } } - @Test(expected = IllegalStateException.class) + @Test public void testFlushThrowsIfBatchingIsNotEnabled() { - Assert.assertFalse(this.influxDB.isBatchEnabled()); - this.influxDB.flush(); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertThrows(IllegalStateException.class, () -> { + this.influxDB.flush(); + }); } /** @@ -748,20 +764,20 @@ public void testCreateDropRetentionPolicies() { Query query = new Query("SHOW RETENTION POLICIES", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertNull(result.getError()); + Assertions.assertNull(result.getError()); List> retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); - Assert.assertTrue(retentionPolicies.get(1).contains("testRP1")); - Assert.assertTrue(retentionPolicies.get(2).contains("testRP2")); - Assert.assertTrue(retentionPolicies.get(3).contains("testRP3")); + Assertions.assertTrue(retentionPolicies.get(1).contains("testRP1")); + Assertions.assertTrue(retentionPolicies.get(2).contains("testRP2")); + Assertions.assertTrue(retentionPolicies.get(3).contains("testRP3")); this.influxDB.dropRetentionPolicy("testRP1", dbName); this.influxDB.dropRetentionPolicy("testRP2", dbName); this.influxDB.dropRetentionPolicy("testRP3", dbName); result = this.influxDB.query(query); - Assert.assertNull(result.getError()); + Assertions.assertNull(result.getError()); retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); - Assert.assertTrue(retentionPolicies.size() == 1); + Assertions.assertTrue(retentionPolicies.size() == 1); } } diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index ec783abb8..98827e397 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -4,16 +4,19 @@ import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; +@RunWith(JUnitPlatform.class) public class PerformanceTests { private InfluxDB influxDB; private final static int COUNT = 1; @@ -23,7 +26,7 @@ public class PerformanceTests { private final static int UDP_PORT = 8089; private final static String UDP_DATABASE = "udp"; - @Before + @BeforeEach public void setUp() { this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "root", "root"); this.influxDB.setLogLevel(LogLevel.NONE); @@ -33,13 +36,13 @@ public void setUp() { /** * delete UDP database after all tests end. */ - @After - public void clearup(){ + @AfterEach + public void cleanup(){ this.influxDB.deleteDatabase(UDP_DATABASE); } @Test - public void writeSinglePointPerformance() { + public void testWriteSinglePointPerformance() { String dbName = "write_" + System.currentTimeMillis(); this.influxDB.createDatabase(dbName); this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); @@ -57,9 +60,9 @@ public void writeSinglePointPerformance() { this.influxDB.deleteDatabase(dbName); } - @Ignore + @Disabled @Test - public void writePerformance() { + public void testWritePerformance() { String dbName = "writepoints_" + System.currentTimeMillis(); this.influxDB.createDatabase(dbName); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); @@ -89,7 +92,7 @@ public void writePerformance() { } @Test - public void maxWritePointsPerformance() { + public void testMaxWritePointsPerformance() { String dbName = "d"; this.influxDB.createDatabase(dbName); this.influxDB.enableBatch(100000, 60, TimeUnit.SECONDS); @@ -105,7 +108,7 @@ public void maxWritePointsPerformance() { } @Test - public void writeCompareUDPPerformanceForBatchWithSinglePoints() { + public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { //prepare data List lineProtocols = new ArrayList(); for (int i = 0; i < 1000; i++) { @@ -128,7 +131,7 @@ public void writeCompareUDPPerformanceForBatchWithSinglePoints() { long elapsedForSingleWrite = watch.elapsed(TimeUnit.MILLISECONDS); System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); - Assert.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); + Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); } } diff --git a/src/test/java/org/influxdb/TicketTest.java b/src/test/java/org/influxdb/TicketTest.java index cd73b3ac0..828b30a8e 100644 --- a/src/test/java/org/influxdb/TicketTest.java +++ b/src/test/java/org/influxdb/TicketTest.java @@ -9,15 +9,19 @@ import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; import org.influxdb.dto.Pong; -import org.junit.Before; -import org.junit.Test; - +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; /** * Test the InfluxDB API. * * @author stefan.majer [at] gmail.com * */ +@DisplayName("Test for github issues") +@RunWith(JUnitPlatform.class) public class TicketTest { private InfluxDB influxDB; @@ -28,7 +32,7 @@ public class TicketTest { * @throws InterruptedException * @throws IOException */ - @Before + @BeforeEach public void setUp() throws InterruptedException, IOException { this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); boolean influxDBstarted = false; diff --git a/src/test/java/org/influxdb/dto/BatchPointTest.java b/src/test/java/org/influxdb/dto/BatchPointTest.java index 5cef6653c..a86029c34 100644 --- a/src/test/java/org/influxdb/dto/BatchPointTest.java +++ b/src/test/java/org/influxdb/dto/BatchPointTest.java @@ -9,7 +9,7 @@ import java.util.concurrent.TimeUnit; import org.influxdb.InfluxDB; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class BatchPointTest { diff --git a/src/test/java/org/influxdb/dto/PointTest.java b/src/test/java/org/influxdb/dto/PointTest.java index 1ea7ae8d4..704a2baaf 100644 --- a/src/test/java/org/influxdb/dto/PointTest.java +++ b/src/test/java/org/influxdb/dto/PointTest.java @@ -11,7 +11,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Assertions; /** * Test for the Point DTO. @@ -28,7 +29,7 @@ public class PointTest { * */ @Test - public void lineProtocol() { + public void testLineProtocol() { Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1"); @@ -195,33 +196,43 @@ public void testIgnoreNullPointerValue() { /** * Tests for issue #110 */ - @Test(expected = NullPointerException.class) + @Test public void testAddingTagsWithNullNameThrowsAnError() { - Point.measurement("dontcare").tag(null, "DontCare"); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(null, "DontCare"); + }); } - @Test(expected = NullPointerException.class) + @Test public void testAddingTagsWithNullValueThrowsAnError() { - Point.measurement("dontcare").tag("DontCare", null); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag("DontCare", null); + }); } - @Test(expected = NullPointerException.class) + @Test public void testAddingMapOfTagsWithNullNameThrowsAnError() { Map map = new HashMap<>(); map.put(null, "DontCare"); - Point.measurement("dontcare").tag(map); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(map); + }); } - @Test(expected = NullPointerException.class) + @Test public void testAddingMapOfTagsWithNullValueThrowsAnError() { Map map = new HashMap<>(); map.put("DontCare", null); - Point.measurement("dontcare").tag(map); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(map); + }); } - @Test(expected = RuntimeException.class) + @Test public void testNullValueThrowsExceptionViaAddField() { - Point.measurement("dontcare").addField("field", (String) null); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").addField("field", (String) null); + }); } @Test diff --git a/src/test/java/org/influxdb/dto/QueryTest.java b/src/test/java/org/influxdb/dto/QueryTest.java index 5231faa9b..40987e1a5 100644 --- a/src/test/java/org/influxdb/dto/QueryTest.java +++ b/src/test/java/org/influxdb/dto/QueryTest.java @@ -2,7 +2,7 @@ import static org.assertj.core.api.Assertions.assertThat; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; diff --git a/src/test/java/org/influxdb/impl/BatchProcessorTest.java b/src/test/java/org/influxdb/impl/BatchProcessorTest.java index b43a6a322..216c1d7a1 100644 --- a/src/test/java/org/influxdb/impl/BatchProcessorTest.java +++ b/src/test/java/org/influxdb/impl/BatchProcessorTest.java @@ -1,6 +1,5 @@ package org.influxdb.impl; -import static org.hamcrest.CoreMatchers.hasItems; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -8,6 +7,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.hamcrest.MockitoHamcrest.argThat; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -16,7 +16,8 @@ import org.influxdb.InfluxDB; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; public class BatchProcessorTest { @@ -60,7 +61,7 @@ public void testSchedulerExceptionHandlingCallback() throws InterruptedException batchProcessor.put(batchEntry1); Thread.sleep(200); // wait for scheduler - verify(mockHandler, times(1)).accept(argThat(hasItems(point, point)), any(RuntimeException.class)); + verify(mockHandler, times(1)).accept(argThat(Matchers.hasItems(point, point)), any(RuntimeException.class)); } @Test @@ -107,24 +108,30 @@ public void testFlushWritesBufferedPointsAndDoesNotShutdownScheduler() throws In verifyNoMoreInteractions(mockInfluxDB); } - @Test(expected = IllegalArgumentException.class) + @Test public void testActionsIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); - BatchProcessor.builder(mockInfluxDB).actions(0) + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(0) .interval(1, TimeUnit.NANOSECONDS).build(); + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testIntervalIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); - BatchProcessor.builder(mockInfluxDB).actions(1) + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(1) .interval(0, TimeUnit.NANOSECONDS).build(); + }); } - @Test(expected = NullPointerException.class) + @Test public void testInfluxDBIsNull() throws InterruptedException, IOException { InfluxDB mockInfluxDB = null; - BatchProcessor.builder(mockInfluxDB).actions(1) + Assertions.assertThrows(NullPointerException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(1) .interval(1, TimeUnit.NANOSECONDS).build(); + }); } } diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java index b8e892fa6..f2865e0cd 100644 --- a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -18,8 +18,8 @@ import org.influxdb.TestUtils; import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import com.squareup.moshi.JsonAdapter; @@ -82,8 +82,8 @@ public void accept(QueryResult result) { callback.onResponse(call, Response.success(responseBody)); QueryResult result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); - Assert.assertEquals(message, result.getError()); + Assertions.assertNotNull(result); + Assertions.assertEquals(message, result.getError()); } } diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index e31333a2c..c11b2b15a 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -20,9 +20,6 @@ */ package org.influxdb.impl; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import java.time.Instant; import java.util.Arrays; import java.util.Date; @@ -38,7 +35,8 @@ import org.influxdb.annotation.Measurement; import org.influxdb.dto.QueryResult; import org.influxdb.impl.InfluxDBResultMapper; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; /** * @author fmachado @@ -68,23 +66,27 @@ public void testToPOJO_HappyPath() { List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); // Then... - assertEquals("there must be one entry in the result list", 1, myList.size()); + Assertions.assertEquals(1, myList.size(), "there must be one entry in the result list"); } - @Test(expected = IllegalArgumentException.class) + @Test public void testThrowExceptionIfMissingAnnotation() { - mapper.throwExceptionIfMissingAnnotation(String.class); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + mapper.throwExceptionIfMissingAnnotation(String.class); + }); } - @Test(expected = InfluxDBMapperException.class) + @Test public void testThrowExceptionIfError_InfluxQueryResultHasError() { QueryResult queryResult = new QueryResult(); queryResult.setError("main queryresult error"); - mapper.throwExceptionIfResultWithError(queryResult); + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.throwExceptionIfResultWithError(queryResult); + }); } - @Test(expected = InfluxDBMapperException.class) + @Test public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { QueryResult queryResult = new QueryResult(); @@ -93,12 +95,14 @@ public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { queryResult.setResults(Arrays.asList(seriesResult)); - mapper.throwExceptionIfResultWithError(queryResult); + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.throwExceptionIfResultWithError(queryResult); + }); } @Test public void testGetMeasurementName_testStateMeasurement() { - assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); + Assertions.assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); } @Test @@ -120,13 +124,13 @@ public void testParseSeriesAs_testTwoValidSeries() { mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); //Then... - assertTrue("there must be two series in the result list", result.size() == 2); + Assertions.assertTrue(result.size() == 2, "there must be two series in the result list"); - assertEquals("Field 'time' (1st series) is not valid", firstSeriesResult.get(0), result.get(0).time.toEpochMilli()); - assertEquals("Field 'uuid' (1st series) is not valid", firstSeriesResult.get(1), result.get(0).uuid); + Assertions.assertEquals(firstSeriesResult.get(0), result.get(0).time.toEpochMilli(), "Field 'time' (1st series) is not valid"); + Assertions.assertEquals(firstSeriesResult.get(1), result.get(0).uuid, "Field 'uuid' (1st series) is not valid"); - assertEquals("Field 'time' (2nd series) is not valid", secondSeriesResult.get(0), result.get(1).time.toEpochMilli()); - assertEquals("Field 'uuid' (2nd series) is not valid", secondSeriesResult.get(1), result.get(1).uuid); + Assertions.assertEquals(secondSeriesResult.get(0), result.get(1).time.toEpochMilli(), "Field 'time' (2nd series) is not valid"); + Assertions.assertEquals(secondSeriesResult.get(1), result.get(1).uuid, "Field 'uuid' (2nd series) is not valid"); } @Test @@ -161,27 +165,32 @@ public void testParseSeriesAs_testNonNullAndValidValues() { //Then... MyCustomMeasurement myObject = result.get(0); - assertEquals("field 'time' does not match", now.longValue(), myObject.time.toEpochMilli()); - assertEquals("field 'uuid' does not match", uuidAsString, myObject.uuid); + Assertions.assertEquals(now.longValue(), myObject.time.toEpochMilli(), "field 'time' does not match"); + Assertions.assertEquals(uuidAsString, myObject.uuid, "field 'uuid' does not match"); - assertEquals("field 'doubleObject' does not match", asDouble(seriesResult.get(2)), myObject.doubleObject); - assertEquals("field 'longObject' does not match", new Long(asDouble(seriesResult.get(3)).longValue()), myObject.longObject); - assertEquals("field 'integerObject' does not match", new Integer(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject); + Assertions.assertEquals(asDouble(seriesResult.get(2)), myObject.doubleObject, "field 'doubleObject' does not match"); + Assertions.assertEquals(new Long(asDouble(seriesResult.get(3)).longValue()), myObject.longObject, "field 'longObject' does not match"); + Assertions.assertEquals(new Integer(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject, "field 'integerObject' does not match"); - assertTrue("field 'doublePrimitive' does not match", - Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive) == 0); + Assertions.assertTrue( + Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive) == 0, + "field 'doublePrimitive' does not match"); - assertTrue("field 'longPrimitive' does not match", - Long.compare(asDouble(seriesResult.get(6)).longValue(), myObject.longPrimitive) == 0); + Assertions.assertTrue( + Long.compare(asDouble(seriesResult.get(6)).longValue(), myObject.longPrimitive) == 0, + "field 'longPrimitive' does not match"); - assertTrue("field 'integerPrimitive' does not match", - Integer.compare(asDouble(seriesResult.get(7)).intValue(), myObject.integerPrimitive) == 0); + Assertions.assertTrue( + Integer.compare(asDouble(seriesResult.get(7)).intValue(), myObject.integerPrimitive) == 0, + "field 'integerPrimitive' does not match"); - assertEquals("field 'booleanObject' does not match", - Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject); + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject, + "field 'booleanObject' does not match"); - assertEquals("field 'booleanPrimitive' does not match", - Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive); + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive, + "field 'booleanPrimitive' does not match"); } Double asDouble(Object obj) { @@ -205,10 +214,10 @@ public void testFieldValueModified_DateAsISO8601() { mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); //Then... - assertTrue(result.size() == 1); + Assertions.assertTrue(result.size() == 1); } - @Test(expected = InfluxDBMapperException.class) + @Test public void testUnsupportedField() { // Given... mapper.cacheMeasurementClass(MyPojoWithUnsupportedField.class); @@ -222,7 +231,9 @@ public void testUnsupportedField() { //When... List result = new LinkedList<>(); - mapper.parseSeriesAs(series, MyPojoWithUnsupportedField.class, result); + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.parseSeriesAs(series, MyPojoWithUnsupportedField.class, result); + }); } /** @@ -243,7 +254,7 @@ public void testToPOJO_SeriesFromQueryResultIsNull() { List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); // Then... - assertTrue("there must NO entry in the result list", myList.isEmpty()); + Assertions.assertTrue( myList.isEmpty(), "there must NO entry in the result list"); } @Test @@ -292,12 +303,12 @@ public void testToPOJO_QueryResultCreatedByGroupByClause() { // Then... GroupByCarrierDeviceOS firstGroupByEntry = myList.get(0); - assertEquals("field 'carrier' does not match", "000/00", firstGroupByEntry.carrier); - assertEquals("field 'deviceOsVersion' does not match", "4.4.2", firstGroupByEntry.deviceOsVersion); + Assertions.assertEquals("000/00", firstGroupByEntry.carrier, "field 'carrier' does not match"); + Assertions.assertEquals("4.4.2", firstGroupByEntry.deviceOsVersion, "field 'deviceOsVersion' does not match"); GroupByCarrierDeviceOS secondGroupByEntry = myList.get(1); - assertEquals("field 'carrier' does not match", "000/01", secondGroupByEntry.carrier); - assertEquals("field 'deviceOsVersion' does not match", "9.3.5", secondGroupByEntry.deviceOsVersion); + Assertions.assertEquals("000/01", secondGroupByEntry.carrier, "field 'carrier' does not match"); + Assertions.assertEquals("9.3.5", secondGroupByEntry.deviceOsVersion, "field 'deviceOsVersion' does not match"); } @Test @@ -317,8 +328,8 @@ public void testToPOJO_ticket363() { mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); // Then... - assertEquals("incorrect number of elemets", 1, result.size()); - assertEquals("incorrect value for the nanoseconds field", 1, result.get(0).time.getNano()); + Assertions.assertEquals(1, result.size(), "incorrect number of elemets"); + Assertions.assertEquals(1, result.get(0).time.getNano(), "incorrect value for the nanoseconds field"); } @Measurement(name = "CustomMeasurement") diff --git a/src/test/java/org/influxdb/impl/PreconditionsTest.java b/src/test/java/org/influxdb/impl/PreconditionsTest.java index 774d76893..43a65f618 100644 --- a/src/test/java/org/influxdb/impl/PreconditionsTest.java +++ b/src/test/java/org/influxdb/impl/PreconditionsTest.java @@ -1,43 +1,52 @@ package org.influxdb.impl; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Assertions; public class PreconditionsTest { @Test - public void checkNonEmptyString1() { + public void testCheckNonEmptyString1() { final String string = "foo"; Preconditions.checkNonEmptyString(string, "string"); } - @Test(expected = IllegalArgumentException.class) - public void checkNonEmptyString2() { + @Test + public void testCheckNonEmptyString2() { final String string = ""; - Preconditions.checkNonEmptyString(string, "string"); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkNonEmptyString(string, "string"); + }); } - @Test(expected = IllegalArgumentException.class) - public void checkNonEmptyString3() { + @Test + public void testCheckNonEmptyString3() { final String string = null; - Preconditions.checkNonEmptyString(string, "string"); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkNonEmptyString(string, "string"); + }); } @Test - public void checkPositiveNumber1() { + public void testCheckPositiveNumber1() { final Number number = 42; Preconditions.checkPositiveNumber(number, "number"); } - @Test(expected = IllegalArgumentException.class) - public void checkPositiveNumber2() { + @Test + public void testCheckPositiveNumber2() { final Number number = 0; - Preconditions.checkPositiveNumber(number, "number"); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkPositiveNumber(number, "number"); + }); } - @Test(expected = IllegalArgumentException.class) - public void checkPositiveNumber3() { + @Test + public void testCheckPositiveNumber3() { final Number number = null; - Preconditions.checkPositiveNumber(number, "number"); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkPositiveNumber(number, "number"); + }); } } \ No newline at end of file diff --git a/src/test/java/org/influxdb/impl/TimeUtilTest.java b/src/test/java/org/influxdb/impl/TimeUtilTest.java index bb8d8e7b8..a41577c9f 100644 --- a/src/test/java/org/influxdb/impl/TimeUtilTest.java +++ b/src/test/java/org/influxdb/impl/TimeUtilTest.java @@ -1,23 +1,21 @@ package org.influxdb.impl; -import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import org.junit.jupiter.api.Test; public class TimeUtilTest { @Test - public void toInfluxDBTimeFormatTest() throws Exception { - assertThat(TimeUtil.toInfluxDBTimeFormat(1477896740020L), is(equalTo("2016-10-31T06:52:20.020Z"))); - assertThat(TimeUtil.toInfluxDBTimeFormat(1477932740005L), is(equalTo("2016-10-31T16:52:20.005Z"))); + public void testToInfluxDBTimeFormatTest() throws Exception { + assertThat(TimeUtil.toInfluxDBTimeFormat(1477896740020L)).isEqualTo("2016-10-31T06:52:20.020Z"); + assertThat(TimeUtil.toInfluxDBTimeFormat(1477932740005L)).isEqualTo("2016-10-31T16:52:20.005Z"); } @Test - public void fromInfluxDBTimeFormatTest() throws Exception { - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20.020Z"), is(equalTo(1477896740020L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20.005Z"), is(equalTo(1477932740005L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20Z"), is(equalTo(1477932740000L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20Z"), is(equalTo(1477896740000L))); + public void testFromInfluxDBTimeFormatTest() throws Exception { + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20.020Z")).isEqualTo(1477896740020L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20.005Z")).isEqualTo(1477932740005L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20Z")).isEqualTo(1477932740000L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20Z")).isEqualTo(1477896740000L); } } From bde2b4d7a575bc170373621d4b71db2ec8834b60 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 20 Nov 2017 20:57:45 +0100 Subject: [PATCH 048/148] Run all test with JUnitPlatform --- src/test/java/org/influxdb/dto/BatchPointTest.java | 4 +++- src/test/java/org/influxdb/dto/PointTest.java | 3 +++ src/test/java/org/influxdb/dto/QueryTest.java | 3 +++ src/test/java/org/influxdb/impl/BatchProcessorTest.java | 3 +++ src/test/java/org/influxdb/impl/ChunkingExceptionTest.java | 3 +++ src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java | 3 +++ src/test/java/org/influxdb/impl/PreconditionsTest.java | 3 +++ src/test/java/org/influxdb/impl/TimeUtilTest.java | 3 +++ 8 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/dto/BatchPointTest.java b/src/test/java/org/influxdb/dto/BatchPointTest.java index a86029c34..0cac6a64b 100644 --- a/src/test/java/org/influxdb/dto/BatchPointTest.java +++ b/src/test/java/org/influxdb/dto/BatchPointTest.java @@ -10,8 +10,10 @@ import org.influxdb.InfluxDB; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; - +@RunWith(JUnitPlatform.class) public class BatchPointTest { @Test diff --git a/src/test/java/org/influxdb/dto/PointTest.java b/src/test/java/org/influxdb/dto/PointTest.java index 704a2baaf..e828fd9cc 100644 --- a/src/test/java/org/influxdb/dto/PointTest.java +++ b/src/test/java/org/influxdb/dto/PointTest.java @@ -13,6 +13,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; /** * Test for the Point DTO. @@ -20,6 +22,7 @@ * @author stefan.majer [at] gmail.com * */ +@RunWith(JUnitPlatform.class) public class PointTest { /** diff --git a/src/test/java/org/influxdb/dto/QueryTest.java b/src/test/java/org/influxdb/dto/QueryTest.java index 40987e1a5..d5d67f35d 100644 --- a/src/test/java/org/influxdb/dto/QueryTest.java +++ b/src/test/java/org/influxdb/dto/QueryTest.java @@ -3,6 +3,8 @@ import static org.assertj.core.api.Assertions.assertThat; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; @@ -15,6 +17,7 @@ * @author jord [at] moz.com * */ +@RunWith(JUnitPlatform.class) public class QueryTest { /** diff --git a/src/test/java/org/influxdb/impl/BatchProcessorTest.java b/src/test/java/org/influxdb/impl/BatchProcessorTest.java index 216c1d7a1..c30c3b388 100644 --- a/src/test/java/org/influxdb/impl/BatchProcessorTest.java +++ b/src/test/java/org/influxdb/impl/BatchProcessorTest.java @@ -18,7 +18,10 @@ import org.influxdb.dto.Point; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +@RunWith(JUnitPlatform.class) public class BatchProcessorTest { @Test diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java index f2865e0cd..c81189b92 100644 --- a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -20,6 +20,8 @@ import org.influxdb.dto.QueryResult; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import com.squareup.moshi.JsonAdapter; @@ -32,6 +34,7 @@ import retrofit2.Callback; import retrofit2.Response; +@RunWith(JUnitPlatform.class) public class ChunkingExceptionTest { @Test diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java index c11b2b15a..688ab9387 100644 --- a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -37,10 +37,13 @@ import org.influxdb.impl.InfluxDBResultMapper; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; /** * @author fmachado */ +@RunWith(JUnitPlatform.class) public class InfluxDBResultMapperTest { InfluxDBResultMapper mapper = new InfluxDBResultMapper(); diff --git a/src/test/java/org/influxdb/impl/PreconditionsTest.java b/src/test/java/org/influxdb/impl/PreconditionsTest.java index 43a65f618..c0ba82add 100644 --- a/src/test/java/org/influxdb/impl/PreconditionsTest.java +++ b/src/test/java/org/influxdb/impl/PreconditionsTest.java @@ -2,7 +2,10 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Assertions; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +@RunWith(JUnitPlatform.class) public class PreconditionsTest { @Test diff --git a/src/test/java/org/influxdb/impl/TimeUtilTest.java b/src/test/java/org/influxdb/impl/TimeUtilTest.java index a41577c9f..82efe041d 100644 --- a/src/test/java/org/influxdb/impl/TimeUtilTest.java +++ b/src/test/java/org/influxdb/impl/TimeUtilTest.java @@ -3,7 +3,10 @@ import static org.assertj.core.api.Assertions.assertThat; import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +@RunWith(JUnitPlatform.class) public class TimeUtilTest { @Test public void testToInfluxDBTimeFormatTest() throws Exception { From 0710fa3b925f09c0d5eacfb57f1ca595f39ed6d5 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 21 Nov 2017 08:04:05 +0100 Subject: [PATCH 049/148] Add Changelog entry, fix markdownlint issues in CHANGELOG.md --- CHANGELOG.md | 139 ++++++++++++++++++++++++++------------------------- 1 file changed, 72 insertions(+), 67 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27404396c..31dfaa616 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,115 +1,120 @@ +# Changelog + ## 2.8 [unreleased] -#### Fixes +### Fixes - - InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) - - InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350) +- InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) +- InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350) -#### Features +### Features - - API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) - - API: add InfluxDB#query that uses callbacks +- API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) +- API: add InfluxDB#query that uses callbacks -#### Improvements +### Improvements - - Build: all unit and integration test are now running with jdk8 and jdk9. +- Build: all unit and integration test are now running with jdk8 and jdk9. +- Test: migration to junit5 ## v2.7 [2017-06-26] -#### Features +### Features - - Simplify write() methods for use cases writing all points to same database and retention policy [PR #327](https://github.com/influxdata/influxdb-java/pull/327) - - QueryResult to Object mapper added [PR #341](https://github.com/influxdata/influxdb-java/pull/341) +- Simplify write() methods for use cases writing all points to same database and retention policy [PR #327](https://github.com/influxdata/influxdb-java/pull/327) +- QueryResult to Object mapper added [PR #341](https://github.com/influxdata/influxdb-java/pull/341) -#### Fixes +### Fixes - - Replace RuntimeException with InfluxDBException [Issue #323](https://github.com/influxdata/influxdb-java/issues/323) +- Replace RuntimeException with InfluxDBException [Issue #323](https://github.com/influxdata/influxdb-java/issues/323) -#### Improvements +### Improvements - - Significant (~35%) performance improvements for write speed with less memory footprint. [PR #330](https://github.com/influxdata/influxdb-java/pull/330) - - Drop guava runtime dependency which reduces jar size from 1MB -> 49KB [PR #322](https://github.com/influxdata/influxdb-java/pull/322) +- Significant (~35%) performance improvements for write speed with less memory footprint. [PR #330](https://github.com/influxdata/influxdb-java/pull/330) +- Drop guava runtime dependency which reduces jar size from 1MB -> 49KB [PR #322](https://github.com/influxdata/influxdb-java/pull/322) ## v2.6 [2017-06-08] -#### Features +### Features - - Switch to Java 1.8 - - Support chunking - - Add a databaseExists method to InfluxDB interface - - [Issue #289](https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`. - - Add a listener to notify asynchronous errors during batch flushes [PR #318](https://github.com/influxdata/influxdb-java/pull/318). +- Switch to Java 1.8 +- Support chunking +- Add a databaseExists method to InfluxDB interface +- [Issue #289](https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`. +- Add a listener to notify asynchronous errors during batch flushes [PR #318](https://github.com/influxdata/influxdb-java/pull/318). -#### Fixes +### Fixes - - [Issue #263](https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface. +- [Issue #263](https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface. -#### Improvements +### Improvements - - Update retrofit from 2.1 to 2.2 - - Update slf4j from 1.7.22 to 1.7.24 - - Update okhttp3 from 3.5 to 3.6 - - automatically adjust batch processor capacity [PR #282](https://github.com/influxdata/influxdb-java/pull/282) +- Update retrofit from 2.1 to 2.2 +- Update slf4j from 1.7.22 to 1.7.24 +- Update okhttp3 from 3.5 to 3.6 +- automatically adjust batch processor capacity [PR #282](https://github.com/influxdata/influxdb-java/pull/282) ## v2.5 [2016-12-05] -#### Features +### Features - - Support writing by UDP protocal. - - Support gzip compress for http request body. - - Support setting thread factory for batch processor. - - Support chunking +- Support writing by UDP protocal. +- Support gzip compress for http request body. +- Support setting thread factory for batch processor. +- Support chunking -#### Fixes +### Fixes - - [Issue #162](https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp. - - [Issue #214](https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result. - - Write can't be always async if batch is enabled. +- [Issue #162](https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp. +- [Issue #214](https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result. +- Write can't be always async if batch is enabled. -#### Improvements +### Improvements - - Remove the limit for database name: not contain '-'. - - Support creating influxdb instance without username and password. - - Add time related util methods for converting influxdb timestamp or unix epoch time. - - correct exception type when disable batch twice. +- Remove the limit for database name: not contain '-'. +- Support creating influxdb instance without username and password. +- Add time related util methods for converting influxdb timestamp or unix epoch time. +- correct exception type when disable batch twice. ## v2.4 [2016-10-24] -#### Features - - now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp. - - in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET). +### Features + +- now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp. +- in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET). ## v2.2 [2016-04-11] -#### Features +### Features + +- Allow writing of pre-constructed line protocol strings - - Allow writing of pre-constructed line protocol strings +### Fixes -#### Fixes +- Correct escaping of database names for create and delete database actions +- Many bug fixes / improvements in general - - Correct escaping of database names for create and delete database actions - - Many bug fixes / improvements in general +### Other -#### Other - - Deprecated `field()` method in preference for `addField()` methods. +- Deprecated `field()` method in preference for `addField()` methods. ## v2.1 [2015-12-05] -#### Features +### Features - - Extensions to fluent builder classes - - Convenience methods for building Points - - Allow integer types as field values +- Extensions to fluent builder classes +- Convenience methods for building Points +- Allow integer types as field values -#### Fixes +### Fixes - - Fixed escaping of tag and field values - - Always uses nanosecond precision for time - - Uses NumberFormat class for safer formatting of large numbers. +- Fixed escaping of tag and field values +- Always uses nanosecond precision for time +- Uses NumberFormat class for safer formatting of large numbers. ## v2.0 [2015-07-17] -#### Features +### Features - Compatible with InfluxDB version 0.9+ - Support for lineprotocol @@ -121,7 +126,7 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.3 [2014-10-22] -#### Features +### Features - Compatible with InfluxDB Version up to 0.8 - API: add a InfluxDB#createDatabase(DatabaseConfiguration) to be able to create a new Database with ShardSpaces defined. @@ -134,12 +139,12 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.2 [2014-06-28] -#### Features +### Features - [Issue #2](https://github.com/influxdb/influxdb-java/issues/2) Implement the last missing api calls ( interfaces, sync, forceCompaction, servers, shards) - use (http://square.github.io/okhttp/, okhttp) instead of java builtin httpconnection to get failover for the http endpoint. -#### Tasks +### Tasks - [Issue #8](https://github.com/influxdb/influxdb-java/issues/8) Use com.github.docker-java which replaces com.kpelykh for Integration tests. - [Issue #6](https://github.com/influxdb/influxdb-java/issues/6) Update Retrofit to 1.6.0 @@ -148,7 +153,7 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.1 [2014-05-31] -#### Features +### Features - Add InfluxDB#version() to get the InfluxDB Server version information. - Changed InfluxDB#createDatabase() to match [Issue #489](https://github.com/influxdb/influxdb/issues/489) without replicationFactor. @@ -156,4 +161,4 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.0 [2014-05-6] - * Initial Release +- Initial Release From 0df2b2cf5d9d3e103dba0e93e7cc1858d5d37505 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 21 Nov 2017 10:53:30 +0100 Subject: [PATCH 050/148] assertEquals first argument is expected, second actual --- src/test/java/org/influxdb/InfluxDBTest.java | 42 ++++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index cd505dd63..0953f6390 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -306,9 +306,9 @@ public void testWriteMultipleStringDataThroughUDP() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); } /** @@ -327,9 +327,9 @@ public void testWriteMultipleStringDataLinesThroughUDP() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); } /** @@ -373,9 +373,9 @@ public void testWriteMultipleStringData() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); this.influxDB.deleteDatabase(dbName); } @@ -395,9 +395,9 @@ public void testWriteMultipleStringDataSimple() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); this.influxDB.deleteDatabase(dbName); } @@ -419,9 +419,9 @@ public void testWriteMultipleStringDataLines() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); this.influxDB.deleteDatabase(dbName); } @@ -445,9 +445,9 @@ public void testWriteMultipleStringDataLinesSimple() { QueryResult result = this.influxDB.query(query); Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); this.influxDB.deleteDatabase(dbName); } @@ -596,9 +596,9 @@ public void testWriteEnableGzip() { QueryResult result = influxDBForTestGzip.query(query); Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assertions.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); } finally { influxDBForTestGzip.deleteDatabase(dbName); influxDBForTestGzip.close(); From 6f7181ae2844ce72ab46c321fa9170450e638c6b Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 22 Nov 2017 09:37:28 +0100 Subject: [PATCH 051/148] Fix single test which was failing on travis because it relied on order of execution previously --- src/test/java/org/influxdb/PerformanceTests.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index 98827e397..47a80eb58 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -116,6 +116,10 @@ public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { lineProtocols.add(point.lineProtocol()); } + String dbName = "write_compare_udp_" + System.currentTimeMillis(); + this.influxDB.createDatabase(dbName); + this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); + //write batch of 1000 single string. Stopwatch watch = Stopwatch.createStarted(); this.influxDB.write(UDP_PORT, lineProtocols); @@ -127,6 +131,7 @@ public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { for (String lineProtocol: lineProtocols){ this.influxDB.write(UDP_PORT, lineProtocol); } + this.influxDB.deleteDatabase(dbName); long elapsedForSingleWrite = watch.elapsed(TimeUnit.MILLISECONDS); System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); From df4af4599d568d9b1a7acf731254acdb8214f558 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 23 Nov 2017 10:21:37 +0100 Subject: [PATCH 052/148] Get rid of guava even for tests --- pom.xml | 6 ----- src/test/java/org/influxdb/InfluxDBTest.java | 26 +++++++++---------- .../java/org/influxdb/PerformanceTests.java | 21 +++++++-------- 3 files changed, 22 insertions(+), 31 deletions(-) diff --git a/pom.xml b/pom.xml index 979f63a01..84e622574 100644 --- a/pom.xml +++ b/pom.xml @@ -239,12 +239,6 @@ 2.12.0 test - - com.google.guava - guava - 23.4-jre - test - com.squareup.retrofit2 retrofit diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index 0953f6390..e206ffaa0 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -26,8 +26,6 @@ import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; -import com.google.common.util.concurrent.Uninterruptibles; - /** * Test the InfluxDB API. * @@ -195,12 +193,12 @@ public void testWrite() { * Test the implementation of {@link InfluxDB#write(int, Point)}'s sync support. */ @Test - public void testSyncWritePointThroughUDP() { + public void testSyncWritePointThroughUDP() throws InterruptedException { this.influxDB.disableBatch(); String measurement = TestUtils.getRandomMeasurement(); Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); this.influxDB.write(UDP_PORT, point); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); @@ -210,14 +208,14 @@ public void testSyncWritePointThroughUDP() { * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. */ @Test - public void testAsyncWritePointThroughUDP() { + public void testAsyncWritePointThroughUDP() throws InterruptedException { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ Assertions.assertTrue(this.influxDB.isBatchEnabled()); String measurement = TestUtils.getRandomMeasurement(); Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); this.influxDB.write(UDP_PORT, point); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); @@ -282,11 +280,11 @@ public void testWriteStringDataSimple() { * Test writing to the database using string protocol through UDP. */ @Test - public void testWriteStringDataThroughUDP() { + public void testWriteStringDataThroughUDP() throws InterruptedException { String measurement = TestUtils.getRandomMeasurement(); this.influxDB.write(UDP_PORT, measurement + ",atag=test idle=90,usertime=9,system=1"); //write with UDP may be executed on server after query with HTTP. so sleep 2s to handle this case - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); @@ -296,12 +294,12 @@ public void testWriteStringDataThroughUDP() { * Test writing multiple records to the database using string protocol through UDP. */ @Test - public void testWriteMultipleStringDataThroughUDP() { + public void testWriteMultipleStringDataThroughUDP() throws InterruptedException { String measurement = TestUtils.getRandomMeasurement(); this.influxDB.write(UDP_PORT, measurement + ",atag=test1 idle=100,usertime=10,system=1\n" + measurement + ",atag=test2 idle=200,usertime=20,system=2\n" + measurement + ",atag=test3 idle=300,usertime=30,system=3"); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); @@ -315,14 +313,14 @@ public void testWriteMultipleStringDataThroughUDP() { * Test writing multiple separate records to the database using string protocol through UDP. */ @Test - public void testWriteMultipleStringDataLinesThroughUDP() { + public void testWriteMultipleStringDataLinesThroughUDP() throws InterruptedException { String measurement = TestUtils.getRandomMeasurement(); this.influxDB.write(UDP_PORT, Arrays.asList( measurement + ",atag=test1 idle=100,usertime=10,system=1", measurement + ",atag=test2 idle=200,usertime=20,system=2", measurement + ",atag=test3 idle=300,usertime=30,system=3" )); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); QueryResult result = this.influxDB.query(query); @@ -646,7 +644,7 @@ public void testChunking() throws InterruptedException { batchPoints.point(point3); this.influxDB.write(batchPoints); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); final BlockingQueue queue = new LinkedBlockingQueue<>(); Query query = new Query("SELECT * FROM disk", dbName); this.influxDB.query(query, 2, new Consumer() { @@ -655,7 +653,7 @@ public void accept(QueryResult result) { queue.add(result); }}); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + Thread.sleep(2000); this.influxDB.deleteDatabase(dbName); QueryResult result = queue.poll(20, TimeUnit.SECONDS); diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index 47a80eb58..e7c363d0e 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -1,6 +1,5 @@ package org.influxdb; -import com.google.common.base.Stopwatch; import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; @@ -47,7 +46,7 @@ public void testWriteSinglePointPerformance() { this.influxDB.createDatabase(dbName); this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int j = 0; j < SINGLE_POINT_COUNT; j++) { Point point = Point.measurement("cpu") .addField("idle", (double) j) @@ -56,7 +55,7 @@ public void testWriteSinglePointPerformance() { this.influxDB.write(dbName, rp, point); } this.influxDB.disableBatch(); - System.out.println("Single Point Write for " + SINGLE_POINT_COUNT + " writes of Points took:" + watch); + System.out.println("Single Point Write for " + SINGLE_POINT_COUNT + " writes of Points took:" + (System.currentTimeMillis() - start)); this.influxDB.deleteDatabase(dbName); } @@ -67,7 +66,7 @@ public void testWritePerformance() { this.influxDB.createDatabase(dbName); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int i = 0; i < COUNT; i++) { BatchPoints batchPoints = BatchPoints @@ -87,7 +86,7 @@ public void testWritePerformance() { this.influxDB.write(batchPoints); } - System.out.println("WritePoints for " + COUNT + " writes of " + POINT_COUNT + " Points took:" + watch); + System.out.println("WritePoints for " + COUNT + " writes of " + POINT_COUNT + " Points took:" + (System.currentTimeMillis() - start)); this.influxDB.deleteDatabase(dbName); } @@ -98,12 +97,12 @@ public void testMaxWritePointsPerformance() { this.influxDB.enableBatch(100000, 60, TimeUnit.SECONDS); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int i = 0; i < 2000000; i++) { Point point = Point.measurement("s").addField("v", 1.0).build(); this.influxDB.write(dbName, rp, point); } - System.out.println("5Mio points:" + watch); + System.out.println("5Mio points:" + (System.currentTimeMillis() - start)); this.influxDB.deleteDatabase(dbName); } @@ -121,19 +120,19 @@ public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); //write batch of 1000 single string. - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); this.influxDB.write(UDP_PORT, lineProtocols); - long elapsedForBatchWrite = watch.elapsed(TimeUnit.MILLISECONDS); + long elapsedForBatchWrite = System.currentTimeMillis() - start; System.out.println("performance(ms):write udp with batch of 1000 string:" + elapsedForBatchWrite); //write 1000 single string by udp. - watch = Stopwatch.createStarted(); + start = System.currentTimeMillis(); for (String lineProtocol: lineProtocols){ this.influxDB.write(UDP_PORT, lineProtocol); } this.influxDB.deleteDatabase(dbName); - long elapsedForSingleWrite = watch.elapsed(TimeUnit.MILLISECONDS); + long elapsedForSingleWrite = System.currentTimeMillis() - start; System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); From ca76250e7a3b6b3ebd17f0eb658af9010253affc Mon Sep 17 00:00:00 2001 From: ivankudibal Date: Wed, 6 Dec 2017 00:08:15 +0100 Subject: [PATCH 053/148] release 2.8 --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index 84e622574..a48133680 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.8-SNAPSHOT + 2.8 influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -153,7 +153,7 @@ - org.apache.maven.plugins maven-gpg-plugin @@ -168,7 +168,7 @@ ---> + org.jacoco jacoco-maven-plugin From 563c29d0c81683406b565f89e951e53de35540c8 Mon Sep 17 00:00:00 2001 From: ivankudibal Date: Wed, 6 Dec 2017 00:09:20 +0100 Subject: [PATCH 054/148] ready for 2.9-SNAPSHOT --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index a48133680..bd92d18d7 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.8 + 2.9-SNAPSHOT influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -153,7 +153,7 @@ - + +--> org.jacoco jacoco-maven-plugin From 70430d65ff6eed07178b4b2ca9226fdc5644d3b9 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 7 Dec 2017 08:59:34 +0100 Subject: [PATCH 055/148] Mention released version --- CHANGELOG.md | 5 ++++- README.md | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31dfaa616..3ef151b3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,9 @@ # Changelog -## 2.8 [unreleased] +## 2.9 [unreleased] + + +## 2.8 [2017-12-06] ### Fixes diff --git a/README.md b/README.md index 329c66ea4..9f88ddbbb 100644 --- a/README.md +++ b/README.md @@ -250,7 +250,7 @@ The latest version for maven dependence: org.influxdb influxdb-java - 2.7 + 2.8 ``` Or when using with gradle: From 9f379e2972dfd58e4aef07a3a8ca9e6c08bf0fcf Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 7 Dec 2017 09:01:23 +0100 Subject: [PATCH 056/148] Mention released version --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f88ddbbb..df5069b83 100644 --- a/README.md +++ b/README.md @@ -255,7 +255,7 @@ The latest version for maven dependence: ``` Or when using with gradle: ```groovy -compile 'org.influxdb:influxdb-java:2.7' +compile 'org.influxdb:influxdb-java:2.8' ``` For version change history have a look at [ChangeLog](https://github.com/influxdata/influxdb-java/blob/master/CHANGELOG.md). From 7605a843cfb855d789ebc85fff4ccf2e771fa8f1 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 3 Jan 2018 15:07:08 +0100 Subject: [PATCH 057/148] Added 'deprecated' annotations to javadoc and methods --- src/main/java/org/influxdb/InfluxDB.java | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index 3714eb131..ebc92ccad 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -342,7 +342,10 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the new database. + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * CREATE DATABASE query. */ + @Deprecated public void createDatabase(final String name); /** @@ -350,14 +353,20 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the database to delete. + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * DROP DATABASE query. */ + @Deprecated public void deleteDatabase(final String name); /** * Describe all available databases. * * @return a List of all Database names. + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * SHOW DATABASES query. */ + @Deprecated public List describeDatabases(); /** @@ -367,6 +376,8 @@ public void write(final String database, final String retentionPolicy, * the name of the database to search. * * @return true if the database exists or false if it doesn't exist + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * SHOW DATABASES query and inspect the result. */ public boolean databaseExists(final String name); @@ -418,7 +429,10 @@ public void write(final String database, final String retentionPolicy, * @param shardDuration the shardDuration * @param replicationFactor the replicationFactor of the rp * @param isDefault if the rp is the default rp for the database or not + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * CREATE RETENTION POLICY query. */ + @Deprecated public void createRetentionPolicy(final String rpName, final String database, final String duration, final String shardDuration, final int replicationFactor, final boolean isDefault); @@ -429,7 +443,10 @@ public void createRetentionPolicy(final String rpName, final String database, fi * @param duration the duration of the rp * @param replicationFactor the replicationFactor of the rp * @param isDefault if the rp is the default rp for the database or not + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * CREATE RETENTION POLICY query. */ + @Deprecated public void createRetentionPolicy(final String rpName, final String database, final String duration, final int replicationFactor, final boolean isDefault); @@ -440,7 +457,10 @@ public void createRetentionPolicy(final String rpName, final String database, fi * @param duration the duration of the rp * @param shardDuration the shardDuration * @param replicationFactor the replicationFactor of the rp + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * CREATE RETENTION POLICY query. */ + @Deprecated public void createRetentionPolicy(final String rpName, final String database, final String duration, final String shardDuration, final int replicationFactor); @@ -448,6 +468,9 @@ public void createRetentionPolicy(final String rpName, final String database, fi * Drops a retentionPolicy in a database. * @param rpName the name of the retentionPolicy * @param database the name of the database + * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * DROP RETENTION POLICY query. */ + @Deprecated public void dropRetentionPolicy(final String rpName, final String database); } From e6bc0a219378942c9d549ef6b7c82ac057274080 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Wed, 3 Jan 2018 15:14:45 +0100 Subject: [PATCH 058/148] Added missing deprecated annotation. --- src/main/java/org/influxdb/InfluxDB.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index ebc92ccad..a5fa5d2d6 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -379,6 +379,7 @@ public void write(final String database, final String retentionPolicy, * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a * SHOW DATABASES query and inspect the result. */ + @Deprecated public boolean databaseExists(final String name); /** From eefd2aba00f9897b5197aab336cae67c8eb955c0 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Thu, 4 Jan 2018 13:41:35 +0100 Subject: [PATCH 059/148] added "removed in 3.0" to deprecated annotations --- src/main/java/org/influxdb/InfluxDB.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index a5fa5d2d6..c1359ba23 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -342,7 +342,7 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the new database. - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized * CREATE DATABASE query. */ @Deprecated @@ -353,7 +353,7 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the database to delete. - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a * DROP DATABASE query. */ @Deprecated @@ -363,7 +363,7 @@ public void write(final String database, final String retentionPolicy, * Describe all available databases. * * @return a List of all Database names. - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a * SHOW DATABASES query. */ @Deprecated @@ -376,7 +376,7 @@ public void write(final String database, final String retentionPolicy, * the name of the database to search. * * @return true if the database exists or false if it doesn't exist - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a * SHOW DATABASES query and inspect the result. */ @Deprecated @@ -430,7 +430,7 @@ public void write(final String database, final String retentionPolicy, * @param shardDuration the shardDuration * @param replicationFactor the replicationFactor of the rp * @param isDefault if the rp is the default rp for the database or not - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized * CREATE RETENTION POLICY query. */ @Deprecated @@ -444,7 +444,7 @@ public void createRetentionPolicy(final String rpName, final String database, fi * @param duration the duration of the rp * @param replicationFactor the replicationFactor of the rp * @param isDefault if the rp is the default rp for the database or not - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized * CREATE RETENTION POLICY query. */ @Deprecated @@ -458,7 +458,7 @@ public void createRetentionPolicy(final String rpName, final String database, fi * @param duration the duration of the rp * @param shardDuration the shardDuration * @param replicationFactor the replicationFactor of the rp - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a parameterized * CREATE RETENTION POLICY query. */ @Deprecated @@ -469,7 +469,7 @@ public void createRetentionPolicy(final String rpName, final String database, fi * Drops a retentionPolicy in a database. * @param rpName the name of the retentionPolicy * @param database the name of the database - * @deprecated (since 2.9) Use org.influxdb.InfluxDB.query(Query) to execute a + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) to execute a * DROP RETENTION POLICY query. */ @Deprecated From 4c6757470804ee95e9dd3bfcabf9abba945780b5 Mon Sep 17 00:00:00 2001 From: Eric Goebelbecker Date: Mon, 8 Jan 2018 05:21:06 -0500 Subject: [PATCH 060/148] Add convenience method to Pong for checking ping status. (#403) * Add convenience method to Pong for checking ping status. * Fix checkstyle checks. * Use constant for "unknown." --- src/main/java/org/influxdb/dto/Pong.java | 8 ++++++++ src/test/java/org/influxdb/InfluxDBTest.java | 2 +- src/test/java/org/influxdb/TicketTest.java | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/influxdb/dto/Pong.java b/src/main/java/org/influxdb/dto/Pong.java index 0245a1fde..278633ce1 100644 --- a/src/main/java/org/influxdb/dto/Pong.java +++ b/src/main/java/org/influxdb/dto/Pong.java @@ -9,6 +9,7 @@ public class Pong { private String version; private long responseTime; + private static final String UNKNOWN_VERSION = "unknown"; /** * @return the status @@ -25,6 +26,13 @@ public void setVersion(final String version) { this.version = version; } + /** + * Good or bad connection status. + */ + public boolean isGood() { + return !UNKNOWN_VERSION.equalsIgnoreCase(version); + } + /** * @return the responseTime */ diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index e206ffaa0..3f7351ac1 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -53,7 +53,7 @@ public void setUp() throws InterruptedException, IOException { Pong response; try { response = this.influxDB.ping(); - if (!response.getVersion().equalsIgnoreCase("unknown")) { + if (response.isGood()) { influxDBstarted = true; } } catch (Exception e) { diff --git a/src/test/java/org/influxdb/TicketTest.java b/src/test/java/org/influxdb/TicketTest.java index 828b30a8e..29aecf09e 100644 --- a/src/test/java/org/influxdb/TicketTest.java +++ b/src/test/java/org/influxdb/TicketTest.java @@ -40,7 +40,7 @@ public void setUp() throws InterruptedException, IOException { Pong response; try { response = this.influxDB.ping(); - if (!response.getVersion().equalsIgnoreCase("unknown")) { + if (response.isGood()) { influxDBstarted = true; } } catch (Exception e) { From 2c63292b864315cfdfb950b38a9d50f415fe8468 Mon Sep 17 00:00:00 2001 From: rbkasat Date: Mon, 8 Jan 2018 02:28:54 -0800 Subject: [PATCH 061/148] added consistency configuration for batch processing (#385) * added consistency configuration for batch processing * added test --- src/main/java/org/influxdb/InfluxDB.java | 24 ++++++++ .../org/influxdb/impl/BatchProcessor.java | 45 ++++++++++---- .../java/org/influxdb/impl/InfluxDBImpl.java | 31 ++++++---- src/test/java/org/influxdb/InfluxDBTest.java | 58 +++++++++++-------- .../org/influxdb/impl/BatchProcessorTest.java | 34 +++++++++-- 5 files changed, 141 insertions(+), 51 deletions(-) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index c1359ba23..deb192373 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -134,6 +134,30 @@ public String value() { */ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory); + /** + * Enable batching of single Point writes with consistency set for an entire batch + * flushDurations is reached first, a batch write is issued. + * Note that batch processing needs to be explicitly stopped before the application is shutdown. + * To do so call disableBatch(). Default consistency is ONE. + * + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * @param threadFactory + * a ThreadFactory instance to be used. + * @param exceptionHandler + * a consumer function to handle asynchronous errors + * @param consistency + * a consistency setting for batch writes. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + + InfluxDB enableBatch(int actions, int flushDuration, TimeUnit flushDurationTimeUnit, + ThreadFactory threadFactory, BiConsumer, Throwable> exceptionHandler, + ConsistencyLevel consistency); /** * Enable batching of single Point writes to speed up writes significant. If either actions or diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index 28f973dc9..35457a55c 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -1,5 +1,10 @@ package org.influxdb.impl; +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDB.ConsistencyLevel; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -16,10 +21,6 @@ import java.util.logging.Level; import java.util.logging.Logger; -import org.influxdb.InfluxDB; -import org.influxdb.dto.BatchPoints; -import org.influxdb.dto.Point; - /** * A BatchProcessor can be attached to a InfluxDB Instance to collect single point writes and * aggregates them to BatchPoints to get a better write performance. @@ -27,7 +28,7 @@ * @author stefan.majer [at] gmail.com * */ -public class BatchProcessor { +public final class BatchProcessor { private static final Logger LOG = Logger.getLogger(BatchProcessor.class.getName()); protected final BlockingQueue queue; @@ -37,6 +38,7 @@ public class BatchProcessor { final int actions; private final TimeUnit flushIntervalUnit; private final int flushInterval; + private final ConsistencyLevel consistencyLevel; /** * The Builder to create a BatchProcessor instance. @@ -48,6 +50,7 @@ public static final class Builder { private TimeUnit flushIntervalUnit; private int flushInterval; private BiConsumer, Throwable> exceptionHandler = (entries, throwable) -> { }; + private ConsistencyLevel consistencyLevel; /** * @param threadFactory @@ -107,6 +110,18 @@ public Builder exceptionHandler(final BiConsumer, Throwable> han this.exceptionHandler = handler; return this; } + /** + * Consistency level for batch write. + * + * @param consistencyLevel + * the consistencyLevel + * + * @return this Builder to use it fluent + */ + public Builder consistencyLevel(final ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return this; + } /** * Create the BatchProcessor. @@ -120,8 +135,9 @@ public BatchProcessor build() { Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit"); Objects.requireNonNull(this.threadFactory, "threadFactory"); Objects.requireNonNull(this.exceptionHandler, "exceptionHandler"); - return new BatchProcessor(this.influxDB, this.threadFactory, this.actions, this.flushIntervalUnit, - this.flushInterval, exceptionHandler); + return new BatchProcessor(this.influxDB, this.threadFactory, + this.actions, this.flushIntervalUnit, + this.flushInterval, exceptionHandler, this.consistencyLevel); } } @@ -180,9 +196,10 @@ public static Builder builder(final InfluxDB influxDB) { return new Builder(influxDB); } - BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, - final TimeUnit flushIntervalUnit, final int flushInterval, - final BiConsumer, Throwable> exceptionHandler) { + private BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, + final TimeUnit flushIntervalUnit, final int flushInterval, + final BiConsumer, Throwable> exceptionHandler, + final ConsistencyLevel consistencyLevel) { super(); this.influxDB = influxDB; this.actions = actions; @@ -190,6 +207,7 @@ public static Builder builder(final InfluxDB influxDB) { this.flushInterval = flushInterval; this.scheduler = Executors.newSingleThreadScheduledExecutor(threadFactory); this.exceptionHandler = exceptionHandler; + this.consistencyLevel = consistencyLevel; if (actions > 1 && actions < Integer.MAX_VALUE) { this.queue = new LinkedBlockingQueue<>(actions); } else { @@ -229,7 +247,7 @@ void write() { String batchKey = dbName + "_" + rp; if (!batchKeyToBatchPoints.containsKey(batchKey)) { BatchPoints batchPoints = BatchPoints.database(dbName) - .retentionPolicy(rp).build(); + .retentionPolicy(rp).consistency(getConsistencyLevel()).build(); batchKeyToBatchPoints.put(batchKey, batchPoints); } batchKeyToBatchPoints.get(batchKey).point(point); @@ -297,4 +315,9 @@ void flushAndShutdown() { void flush() { this.write(); } + + public ConsistencyLevel getConsistencyLevel() { + return consistencyLevel; + } + } diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 070a6dbe8..d85600efc 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -3,7 +3,15 @@ import com.squareup.moshi.JsonAdapter; import com.squareup.moshi.Moshi; - +import okhttp3.Headers; +import okhttp3.HttpUrl; +import okhttp3.MediaType; +import okhttp3.OkHttpClient; +import okhttp3.RequestBody; +import okhttp3.ResponseBody; +import okhttp3.logging.HttpLoggingInterceptor; +import okhttp3.logging.HttpLoggingInterceptor.Level; +import okio.BufferedSource; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBException; import org.influxdb.InfluxDBIOException; @@ -14,16 +22,6 @@ import org.influxdb.dto.QueryResult; import org.influxdb.impl.BatchProcessor.HttpBatchEntry; import org.influxdb.impl.BatchProcessor.UdpBatchEntry; - -import okhttp3.Headers; -import okhttp3.HttpUrl; -import okhttp3.MediaType; -import okhttp3.OkHttpClient; -import okhttp3.RequestBody; -import okhttp3.ResponseBody; -import okhttp3.logging.HttpLoggingInterceptor; -import okhttp3.logging.HttpLoggingInterceptor.Level; -import okio.BufferedSource; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; @@ -201,6 +199,16 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, return this; } + @Override + public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, + final ThreadFactory threadFactory, + final BiConsumer, Throwable> exceptionHandler, + final ConsistencyLevel consistency) { + enableBatch(actions, flushDuration, flushDurationTimeUnit, threadFactory, exceptionHandler) + .setConsistency(consistency); + return this; + } + @Override public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory, @@ -214,6 +222,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti .exceptionHandler(exceptionHandler) .interval(flushDuration, flushDurationTimeUnit) .threadFactory(threadFactory) + .consistencyLevel(consistency) .build(); this.batchEnabled.set(true); return this; diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index 3f7351ac1..dda093351 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -1,17 +1,5 @@ package org.influxdb; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; - import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; @@ -19,13 +7,26 @@ import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; import org.influxdb.impl.InfluxDBImpl; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + /** * Test the InfluxDB API. * @@ -147,7 +148,7 @@ public void testDescribeDatabases() { Assertions.assertTrue(found, "It is expected that describeDataBases contents the newly create database."); this.influxDB.deleteDatabase(dbName); } - + /** * Test that Database exists works. */ @@ -188,7 +189,7 @@ public void testWrite() { Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); this.influxDB.deleteDatabase(dbName); } - + /** * Test the implementation of {@link InfluxDB#write(int, Point)}'s sync support. */ @@ -203,7 +204,7 @@ public void testSyncWritePointThroughUDP() throws InterruptedException { QueryResult result = this.influxDB.query(query); Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); } - + /** * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. */ @@ -223,8 +224,8 @@ public void testAsyncWritePointThroughUDP() throws InterruptedException { this.influxDB.disableBatch(); } } - - + + /** * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. */ @@ -461,7 +462,7 @@ public void testCreateNumericNamedDatabase() { Assertions.assertTrue(result.contains(numericDbName)); this.influxDB.deleteDatabase(numericDbName); } - + /** * Test that creating database which name is empty will throw expected exception */ @@ -501,7 +502,7 @@ public void testIsBatchEnabled() { this.influxDB.disableBatch(); Assertions.assertFalse(this.influxDB.isBatchEnabled()); } - + /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ @@ -509,7 +510,7 @@ public void testIsBatchEnabled() { public void testBatchEnabledWithThreadFactory() { final String threadName = "async_influxdb_write"; this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, new ThreadFactory() { - + @Override public Thread newThread(Runnable r) { Thread thread = new Thread(r); @@ -524,7 +525,7 @@ public Thread newThread(Runnable r) { existThreadWithSettedName = true; break; } - + } Assertions.assertTrue(existThreadWithSettedName); this.influxDB.disableBatch(); @@ -536,7 +537,7 @@ public void testBatchEnabledWithThreadFactoryIsNull() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, null); }); } - + /** * Test the implementation of {@link InfluxDBImpl#InfluxDBImpl(String, String, String, okhttp3.OkHttpClient.Builder)}. */ @@ -778,4 +779,15 @@ public void testCreateDropRetentionPolicies() { Assertions.assertTrue(retentionPolicies.size() == 1); } + /** + * Test the implementation of {@link InfluxDB#isBatchEnabled() with consistency}. + */ + @Test + public void testIsBatchEnabledWithConsistency() { + Assertions.assertFalse(this.influxDB.isBatchEnabled()); + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, Executors.defaultThreadFactory(), + (a, b) -> { + }, InfluxDB.ConsistencyLevel.ALL); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); + } } diff --git a/src/test/java/org/influxdb/impl/BatchProcessorTest.java b/src/test/java/org/influxdb/impl/BatchProcessorTest.java index c30c3b388..8a17245f0 100644 --- a/src/test/java/org/influxdb/impl/BatchProcessorTest.java +++ b/src/test/java/org/influxdb/impl/BatchProcessorTest.java @@ -21,6 +21,11 @@ import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; +import static org.junit.Assert.assertNull; +import static org.hamcrest.CoreMatchers.*; +import static org.junit.Assert.assertThat; + + @RunWith(JUnitPlatform.class) public class BatchProcessorTest { @@ -115,8 +120,8 @@ public void testFlushWritesBufferedPointsAndDoesNotShutdownScheduler() throws In public void testActionsIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); Assertions.assertThrows(IllegalArgumentException.class, () -> { - BatchProcessor.builder(mockInfluxDB).actions(0) - .interval(1, TimeUnit.NANOSECONDS).build(); + BatchProcessor.builder(mockInfluxDB).actions(0) + .interval(1, TimeUnit.NANOSECONDS).build(); }); } @@ -124,8 +129,8 @@ public void testActionsIsZero() throws InterruptedException, IOException { public void testIntervalIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); Assertions.assertThrows(IllegalArgumentException.class, () -> { - BatchProcessor.builder(mockInfluxDB).actions(1) - .interval(0, TimeUnit.NANOSECONDS).build(); + BatchProcessor.builder(mockInfluxDB).actions(1) + .interval(0, TimeUnit.NANOSECONDS).build(); }); } @@ -133,8 +138,25 @@ public void testIntervalIsZero() throws InterruptedException, IOException { public void testInfluxDBIsNull() throws InterruptedException, IOException { InfluxDB mockInfluxDB = null; Assertions.assertThrows(NullPointerException.class, () -> { - BatchProcessor.builder(mockInfluxDB).actions(1) - .interval(1, TimeUnit.NANOSECONDS).build(); + BatchProcessor.builder(mockInfluxDB).actions(1) + .interval(1, TimeUnit.NANOSECONDS).build(); }); } + + @Test + public void testConsistencyLevelNull() throws InterruptedException, IOException { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).build(); + assertNull(batchProcessor.getConsistencyLevel()); + } + + @Test + public void testConsistencyLevelUpdated() throws InterruptedException, IOException { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).consistencyLevel(InfluxDB.ConsistencyLevel.ANY).build(); + assertThat(batchProcessor.getConsistencyLevel(), is(equalTo(InfluxDB.ConsistencyLevel.ANY))); + } + } From c7d94556ed3db4d59175fe285ca0662cce4ba5d2 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 13:40:20 +0100 Subject: [PATCH 062/148] separating InfluxDB initialization into TestUtils --- src/test/java/org/influxdb/InfluxDBTest.java | 20 +--------------- src/test/java/org/influxdb/TestUtils.java | 25 ++++++++++++++++++++ src/test/java/org/influxdb/TicketTest.java | 20 +--------------- 3 files changed, 27 insertions(+), 38 deletions(-) diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index dda093351..c9b1eee21 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -48,26 +48,8 @@ public class InfluxDBTest { */ @BeforeEach public void setUp() throws InterruptedException, IOException { - this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); - boolean influxDBstarted = false; - do { - Pong response; - try { - response = this.influxDB.ping(); - if (response.isGood()) { - influxDBstarted = true; - } - } catch (Exception e) { - // NOOP intentional - e.printStackTrace(); - } - Thread.sleep(100L); - } while (!influxDBstarted); - this.influxDB.setLogLevel(LogLevel.NONE); + this.influxDB = TestUtils.connectToInfluxDB(); this.influxDB.createDatabase(UDP_DATABASE); - System.out.println("################################################################################## "); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); - System.out.println("##################################################################################"); } /** diff --git a/src/test/java/org/influxdb/TestUtils.java b/src/test/java/org/influxdb/TestUtils.java index 7ad8dff24..865ecdcf2 100644 --- a/src/test/java/org/influxdb/TestUtils.java +++ b/src/test/java/org/influxdb/TestUtils.java @@ -1,5 +1,8 @@ package org.influxdb; +import org.influxdb.dto.Pong; + +import java.io.IOException; import java.util.Map; public class TestUtils { @@ -44,4 +47,26 @@ public static String defaultRetentionPolicy(String version) { } } + public static InfluxDB connectToInfluxDB() throws InterruptedException, IOException { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); + boolean influxDBstarted = false; + do { + Pong response; + try { + response = influxDB.ping(); + if (response.isGood()) { + influxDBstarted = true; + } + } catch (Exception e) { + // NOOP intentional + e.printStackTrace(); + } + Thread.sleep(100L); + } while (!influxDBstarted); + influxDB.setLogLevel(InfluxDB.LogLevel.NONE); + System.out.println("##################################################################################"); + System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); + System.out.println("##################################################################################"); + return influxDB; + } } diff --git a/src/test/java/org/influxdb/TicketTest.java b/src/test/java/org/influxdb/TicketTest.java index 29aecf09e..dc373fb6e 100644 --- a/src/test/java/org/influxdb/TicketTest.java +++ b/src/test/java/org/influxdb/TicketTest.java @@ -34,25 +34,7 @@ public class TicketTest { */ @BeforeEach public void setUp() throws InterruptedException, IOException { - this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); - boolean influxDBstarted = false; - do { - Pong response; - try { - response = this.influxDB.ping(); - if (response.isGood()) { - influxDBstarted = true; - } - } catch (Exception e) { - // NOOP intentional - e.printStackTrace(); - } - Thread.sleep(100L); - } while (!influxDBstarted); - this.influxDB.setLogLevel(LogLevel.NONE); - System.out.println("##################################################################################"); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); - System.out.println("##################################################################################"); + this.influxDB = TestUtils.connectToInfluxDB(); } /** From f61ae41e816a1ae5d1e07f95a317fa01753c5229 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 16:19:58 +0100 Subject: [PATCH 063/148] fix #396 --- src/main/java/org/influxdb/BatchOptions.java | 60 ++++++++++++++++ src/main/java/org/influxdb/InfluxDB.java | 17 +++++ .../org/influxdb/impl/BatchOptionsImpl.java | 71 +++++++++++++++++++ .../java/org/influxdb/impl/InfluxDBImpl.java | 16 +++++ .../java/org/influxdb/BatchOptionsTest.java | 69 ++++++++++++++++++ 5 files changed, 233 insertions(+) create mode 100644 src/main/java/org/influxdb/BatchOptions.java create mode 100644 src/main/java/org/influxdb/impl/BatchOptionsImpl.java create mode 100644 src/test/java/org/influxdb/BatchOptionsTest.java diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java new file mode 100644 index 000000000..cf054229c --- /dev/null +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -0,0 +1,60 @@ +package org.influxdb; + +import org.influxdb.dto.Point; +import org.influxdb.impl.BatchOptionsImpl; + +import java.util.concurrent.ThreadFactory; +import java.util.function.BiConsumer; + +/** + * BatchOptions are used to configure batching of individual data point writes + * into InfluxDB. See {@link InfluxDB#enableBatch(BatchOptions)} + */ +public interface BatchOptions { + + BatchOptions DEFAULTS = BatchOptionsImpl.DEFAULTS; + + /** + * @param actions the number of actions to collect + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions actions(final int actions); + + /** + * @param flushDuration the time to wait at most (milliseconds). + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions flushDuration(final int flushDuration); + + /** + * @param threadFactory a ThreadFactory instance to be used + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions threadFactory(final ThreadFactory threadFactory); + + /** + * @param exceptionHandler a consumer function to handle asynchronous errors + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler); + + /** + * @return actions the number of actions to collect + */ + int getActions(); + + /** + * @return flushDuration the time to wait at most (milliseconds). + */ + int getFlushDuration(); + + /** + * @return a ThreadFactory instance to be used + */ + ThreadFactory getThreadFactory(); + + /** + * @return a consumer function to handle asynchronous errors + */ + BiConsumer, Throwable> getExceptionHandler(); +} diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index deb192373..7bfd17d76 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -97,6 +97,23 @@ public String value() { */ public boolean isGzipEnabled(); + /** + * Enable batching of single Point writes to speed up writes significantly. This is the same as calling + * InfluxDB.enableBatch(BatchingOptions.DEFAULTS) + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB enableBatch(); + + /** + * Enable batching of single Point writes to speed up writes significantly. If either number of points written or + * flushDuration time limit is reached, a batch write is issued. + * Note that batch processing needs to be explicitly stopped before the application is shutdown. + * To do so call disableBatch(). + * + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB enableBatch(final BatchOptions batchOptions); + /** * Enable batching of single Point writes as {@link #enableBatch(int, int, TimeUnit, ThreadFactory)}} * using {@linkplain java.util.concurrent.Executors#defaultThreadFactory() default thread factory}. diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java new file mode 100644 index 000000000..a1ba79bdc --- /dev/null +++ b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java @@ -0,0 +1,71 @@ +package org.influxdb.impl; + +import org.influxdb.BatchOptions; +import org.influxdb.dto.Point; + +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.function.BiConsumer; + +public class BatchOptionsImpl implements BatchOptions, Cloneable { + + public static BatchOptions DEFAULTS = new BatchOptionsImpl(); + + // default values here are consistent with Telegraf + int actions = 1000; + int flushDuration = 10000; + ThreadFactory threadFactory = Executors.defaultThreadFactory(); + BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { + }; + + private BatchOptionsImpl() { + } + + public BatchOptions actions(final int actions) { + BatchOptionsImpl clone = getClone(); + clone.actions = actions; + return clone; + } + + public BatchOptions flushDuration(final int flushDuration) { + BatchOptionsImpl clone = getClone(); + clone.flushDuration = flushDuration; + return clone; + } + + public BatchOptions threadFactory(final ThreadFactory threadFactory) { + BatchOptionsImpl clone = getClone(); + clone.threadFactory = threadFactory; + return clone; + } + + public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { + BatchOptionsImpl clone = getClone(); + clone.exceptionHandler = exceptionHandler; + return clone; + } + + private BatchOptionsImpl getClone() { + try { + return (BatchOptionsImpl) this.clone(); + } catch (CloneNotSupportedException e) { + throw new RuntimeException(e); + } + } + + public int getActions() { + return actions; + } + + public int getFlushDuration() { + return flushDuration; + } + + public ThreadFactory getThreadFactory() { + return threadFactory; + } + + public BiConsumer, Throwable> getExceptionHandler() { + return exceptionHandler; + } +} diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index d85600efc..de7cf7b2e 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -12,6 +12,7 @@ import okhttp3.logging.HttpLoggingInterceptor; import okhttp3.logging.HttpLoggingInterceptor.Level; import okio.BufferedSource; +import org.influxdb.BatchOptions; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBException; import org.influxdb.InfluxDBIOException; @@ -185,6 +186,21 @@ public boolean isGzipEnabled() { return this.gzipRequestInterceptor.isEnabled(); } + @Override + public InfluxDB enableBatch() { + enableBatch(BatchOptions.DEFAULTS); + return this; + } + + @Override + public InfluxDB enableBatch(BatchOptions batchOptions) { + enableBatch(batchOptions.getActions(), + batchOptions.getFlushDuration(), + TimeUnit.MILLISECONDS,batchOptions.getThreadFactory(), + batchOptions.getExceptionHandler() ); + return this; + } + @Override public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit) { diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java new file mode 100644 index 000000000..126f17f84 --- /dev/null +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -0,0 +1,69 @@ +package org.influxdb; + +import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import java.io.IOException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +@RunWith(JUnitPlatform.class) +public class BatchOptionsTest { + + private InfluxDB influxDB; + + @BeforeEach + public void setUp() throws InterruptedException, IOException { + this.influxDB = TestUtils.connectToInfluxDB(); + } + + /** + * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. + */ + @Test + public void testBatchEnabledWithDefaultSettings() { + try { + this.influxDB.enableBatch(); + + } + finally { + this.influxDB.disableBatch(); + } + } + + /** + * Test the implementation of {@link BatchOptions#actions(int)} }. + */ + @Test + public void testActionsSetting() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.actions(3); + Assertions.assertEquals(3, options.getActions()); + this.influxDB.enableBatch(options); + this.influxDB.createDatabase(dbName); + this.influxDB.setDatabase(dbName); + for (int j = 0; j < 5; j++) { + Point point = Point.measurement("cpu") + .time(j,TimeUnit.MILLISECONDS) + .addField("idle", (double) j) + .addField("user", 2.0 * j) + .addField("system", 3.0 * j).build(); + this.influxDB.write(point); + } + Thread.sleep(500); + QueryResult result=influxDB.query(new Query("select * from cpu", dbName)); + Assertions.assertEquals(3, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.deleteDatabase(dbName); + } + } +} From a0b227b7de62fa19abddc7657e7a9d2609b0c936 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 16:22:53 +0100 Subject: [PATCH 064/148] fix #397 fixing checkstyle fixing checkstyle --- src/main/java/org/influxdb/BatchOptions.java | 15 ++++++ .../org/influxdb/impl/BatchOptionsImpl.java | 24 +++++++-- .../org/influxdb/impl/BatchProcessor.java | 50 +++++++++++++++---- .../java/org/influxdb/impl/InfluxDBImpl.java | 16 ++++-- .../java/org/influxdb/BatchOptionsTest.java | 1 + 5 files changed, 87 insertions(+), 19 deletions(-) diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index cf054229c..df5e39a9e 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -26,6 +26,16 @@ public interface BatchOptions { */ BatchOptions flushDuration(final int flushDuration); + /** + * Jitters the batch flush interval by a random amount. This is primarily to avoid + * large write spikes for users running a large number of client instances. + * ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s. + * + * @param jitterDuration (milliseconds) + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions jitterDuration(final int jitterDuration); + /** * @param threadFactory a ThreadFactory instance to be used * @return the BatchOptions instance to be able to use it in a fluent manner. @@ -48,6 +58,11 @@ public interface BatchOptions { */ int getFlushDuration(); + /** + * @return batch flush interval jitter value (milliseconds) + */ + int getJitterDuration(); + /** * @return a ThreadFactory instance to be used */ diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java index a1ba79bdc..380b2befd 100644 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java @@ -7,13 +7,19 @@ import java.util.concurrent.ThreadFactory; import java.util.function.BiConsumer; -public class BatchOptionsImpl implements BatchOptions, Cloneable { +public final class BatchOptionsImpl implements BatchOptions, Cloneable { - public static BatchOptions DEFAULTS = new BatchOptionsImpl(); + public static final BatchOptions DEFAULTS = new BatchOptionsImpl(); // default values here are consistent with Telegraf - int actions = 1000; - int flushDuration = 10000; + public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; + public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; + public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + + int actions = DEFAULT_BATCH_ACTIONS_LIMIT; + int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; + int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + ThreadFactory threadFactory = Executors.defaultThreadFactory(); BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { }; @@ -33,6 +39,12 @@ public BatchOptions flushDuration(final int flushDuration) { return clone; } + public BatchOptions jitterDuration(final int jitterDuration) { + BatchOptionsImpl clone = getClone(); + clone.jitterDuration = jitterDuration; + return clone; + } + public BatchOptions threadFactory(final ThreadFactory threadFactory) { BatchOptionsImpl clone = getClone(); clone.threadFactory = threadFactory; @@ -61,6 +73,10 @@ public int getFlushDuration() { return flushDuration; } + public int getJitterDuration() { + return jitterDuration; + } + public ThreadFactory getThreadFactory() { return threadFactory; } diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index 35457a55c..65536210e 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -39,6 +39,7 @@ public final class BatchProcessor { private final TimeUnit flushIntervalUnit; private final int flushInterval; private final ConsistencyLevel consistencyLevel; + private final int jitterInterval; /** * The Builder to create a BatchProcessor instance. @@ -49,6 +50,7 @@ public static final class Builder { private int actions; private TimeUnit flushIntervalUnit; private int flushInterval; + private int jitterInterval; private BiConsumer, Throwable> exceptionHandler = (entries, throwable) -> { }; private ConsistencyLevel consistencyLevel; @@ -98,6 +100,25 @@ public Builder interval(final int interval, final TimeUnit unit) { return this; } + /** + * The interval at which at least should issued a write. + * + * @param flushInterval + * the flush interval + * @param jitterInterval + * the flush jitter interval + * @param unit + * the TimeUnit of the interval + * + * @return this Builder to use it fluent + */ + public Builder interval(final int flushInterval, final int jitterInterval, final TimeUnit unit) { + this.flushInterval = flushInterval; + this.jitterInterval = jitterInterval; + this.flushIntervalUnit = unit; + return this; + } + /** * A callback to be used when an error occurs during a batchwrite. * @@ -135,9 +156,8 @@ public BatchProcessor build() { Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit"); Objects.requireNonNull(this.threadFactory, "threadFactory"); Objects.requireNonNull(this.exceptionHandler, "exceptionHandler"); - return new BatchProcessor(this.influxDB, this.threadFactory, - this.actions, this.flushIntervalUnit, - this.flushInterval, exceptionHandler, this.consistencyLevel); + return new BatchProcessor(this.influxDB, this.threadFactory, this.actions, this.flushIntervalUnit, + this.flushInterval, this.jitterInterval, exceptionHandler, this.consistencyLevel); } } @@ -196,15 +216,16 @@ public static Builder builder(final InfluxDB influxDB) { return new Builder(influxDB); } - private BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, - final TimeUnit flushIntervalUnit, final int flushInterval, - final BiConsumer, Throwable> exceptionHandler, - final ConsistencyLevel consistencyLevel) { + BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, + final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval, + final BiConsumer, Throwable> exceptionHandler, + final ConsistencyLevel consistencyLevel) { super(); this.influxDB = influxDB; this.actions = actions; this.flushIntervalUnit = flushIntervalUnit; this.flushInterval = flushInterval; + this.jitterInterval = jitterInterval; this.scheduler = Executors.newSingleThreadScheduledExecutor(threadFactory); this.exceptionHandler = exceptionHandler; this.consistencyLevel = consistencyLevel; @@ -213,14 +234,21 @@ private BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFa } else { this.queue = new LinkedBlockingQueue<>(); } - // Flush at specified Rate - this.scheduler.scheduleAtFixedRate(new Runnable() { + + Runnable flushRunnable = new Runnable() { @Override public void run() { + // write doesn't throw any exceptions write(); + int jitterInterval = (int) (Math.random() * BatchProcessor.this.jitterInterval); + BatchProcessor.this.scheduler.schedule(this, + BatchProcessor.this.flushInterval + jitterInterval, BatchProcessor.this.flushIntervalUnit); } - }, this.flushInterval, this.flushInterval, this.flushIntervalUnit); - + }; + // Flush at specified Rate + this.scheduler.schedule(flushRunnable, + this.flushInterval + (int) (Math.random() * BatchProcessor.this.jitterInterval), + this.flushIntervalUnit); } void write() { diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index de7cf7b2e..0c12bcb9b 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -193,11 +193,12 @@ public InfluxDB enableBatch() { } @Override - public InfluxDB enableBatch(BatchOptions batchOptions) { + public InfluxDB enableBatch(final BatchOptions batchOptions) { enableBatch(batchOptions.getActions(), batchOptions.getFlushDuration(), - TimeUnit.MILLISECONDS,batchOptions.getThreadFactory(), - batchOptions.getExceptionHandler() ); + batchOptions.getJitterDuration(), + TimeUnit.MILLISECONDS, batchOptions.getThreadFactory(), + batchOptions.getExceptionHandler()); return this; } @@ -229,6 +230,13 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory, final BiConsumer, Throwable> exceptionHandler) { + enableBatch(actions, flushDuration, 0, flushDurationTimeUnit, threadFactory, exceptionHandler); + return this; + } + + private InfluxDB enableBatch(final int actions, final int flushDuration, final int jitterDuration, + final TimeUnit durationTimeUnit, final ThreadFactory threadFactory, + final BiConsumer, Throwable> exceptionHandler) { if (this.batchEnabled.get()) { throw new IllegalStateException("BatchProcessing is already enabled."); } @@ -236,7 +244,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti .builder(this) .actions(actions) .exceptionHandler(exceptionHandler) - .interval(flushDuration, flushDurationTimeUnit) + .interval(flushDuration, jitterDuration, durationTimeUnit) .threadFactory(threadFactory) .consistencyLevel(consistency) .build(); diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 126f17f84..f58a685af 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -66,4 +66,5 @@ public void testActionsSetting() throws InterruptedException { this.influxDB.deleteDatabase(dbName); } } + } From 8c2f2e6fc75bc93908c87b2722d3dc45b726e6ca Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 9 Jan 2018 17:05:13 +0100 Subject: [PATCH 065/148] implementing cluster consistency setting into batch options --- src/main/java/org/influxdb/BatchOptions.java | 17 ++++++++++++++++- .../org/influxdb/impl/BatchOptionsImpl.java | 12 ++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index df5e39a9e..26e654f24 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -49,8 +49,16 @@ public interface BatchOptions { BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler); /** - * @return actions the number of actions to collect + * @param consistency cluster consistency setting (how many nodes have to store data points + * to treat a write as a success) + * @return the BatchOptions instance to be able to use it in a fluent manner. */ + BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency); + + + /** + * @return actions the number of actions to collect + */ int getActions(); /** @@ -72,4 +80,11 @@ public interface BatchOptions { * @return a consumer function to handle asynchronous errors */ BiConsumer, Throwable> getExceptionHandler(); + + /** + * @return cluster consistency setting (how many nodes have to store data points + * to treat a write as a success) + */ + InfluxDB.ConsistencyLevel getConsistency(); + } diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java index 380b2befd..2fd8bba8d 100644 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java @@ -1,6 +1,7 @@ package org.influxdb.impl; import org.influxdb.BatchOptions; +import org.influxdb.InfluxDB; import org.influxdb.dto.Point; import java.util.concurrent.Executors; @@ -23,6 +24,7 @@ public final class BatchOptionsImpl implements BatchOptions, Cloneable { ThreadFactory threadFactory = Executors.defaultThreadFactory(); BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { }; + InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; private BatchOptionsImpl() { } @@ -57,6 +59,12 @@ public BatchOptions exceptionHandler(final BiConsumer, Throwable return clone; } + public BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency) { + BatchOptionsImpl clone = getClone(); + clone.consistency = consistency; + return clone; + } + private BatchOptionsImpl getClone() { try { return (BatchOptionsImpl) this.clone(); @@ -77,6 +85,10 @@ public int getJitterDuration() { return jitterDuration; } + public InfluxDB.ConsistencyLevel getConsistency() { + return consistency; + } + public ThreadFactory getThreadFactory() { return threadFactory; } From e3f3db52a74801fcd30430a0886e348a663da604 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 13:40:20 +0100 Subject: [PATCH 066/148] separating InfluxDB initialization into TestUtils --- src/test/java/org/influxdb/TestUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/TestUtils.java b/src/test/java/org/influxdb/TestUtils.java index 865ecdcf2..e85f6feaf 100644 --- a/src/test/java/org/influxdb/TestUtils.java +++ b/src/test/java/org/influxdb/TestUtils.java @@ -65,7 +65,7 @@ public static InfluxDB connectToInfluxDB() throws InterruptedException, IOExcept } while (!influxDBstarted); influxDB.setLogLevel(InfluxDB.LogLevel.NONE); System.out.println("##################################################################################"); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); + System.out.println("# Connected to InfluxDB Version: " + influxDB.version() + " #"); System.out.println("##################################################################################"); return influxDB; } From 92a58877cb186265af740458e7eefe4abe231104 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 16:19:58 +0100 Subject: [PATCH 067/148] fix #396 --- src/main/java/org/influxdb/BatchOptions.java | 6 +++--- src/main/java/org/influxdb/impl/InfluxDBImpl.java | 1 + src/test/java/org/influxdb/BatchOptionsTest.java | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index 26e654f24..8732a245a 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -56,9 +56,9 @@ public interface BatchOptions { BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency); - /** - * @return actions the number of actions to collect - */ + /** + * @return actions the number of actions to collect + */ int getActions(); /** diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 0c12bcb9b..8fe0d4255 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -12,6 +12,7 @@ import okhttp3.logging.HttpLoggingInterceptor; import okhttp3.logging.HttpLoggingInterceptor.Level; import okio.BufferedSource; + import org.influxdb.BatchOptions; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBException; diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index f58a685af..126f17f84 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -66,5 +66,4 @@ public void testActionsSetting() throws InterruptedException { this.influxDB.deleteDatabase(dbName); } } - } From d5155e0f2b8749d0bd7a1591c33c9023f8c75956 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 13 Dec 2017 16:22:53 +0100 Subject: [PATCH 068/148] fix #397 fixing checkstyle fixing checkstyle --- src/test/java/org/influxdb/BatchOptionsTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 126f17f84..f58a685af 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -66,4 +66,5 @@ public void testActionsSetting() throws InterruptedException { this.influxDB.deleteDatabase(dbName); } } + } From 7bdd5e5b459bcaa52aa1218dd7bc3b3e06ef988e Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 3 Jan 2018 20:37:39 +0100 Subject: [PATCH 069/148] Adding analysis of error returned by InfluxDB for further use --- pom.xml | 5 ++ .../java/org/influxdb/InfluxDBException.java | 80 +++++++++++++++++++ .../java/org/influxdb/impl/InfluxDBImpl.java | 10 +++ 3 files changed, 95 insertions(+) diff --git a/pom.xml b/pom.xml index bd92d18d7..6510af401 100644 --- a/pom.xml +++ b/pom.xml @@ -261,5 +261,10 @@ logging-interceptor 3.9.1 + + org.json + json + 20171018 + diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java index 69f28e0b3..dd7939d0d 100644 --- a/src/main/java/org/influxdb/InfluxDBException.java +++ b/src/main/java/org/influxdb/InfluxDBException.java @@ -18,4 +18,84 @@ public InfluxDBException(final String message, final Throwable cause) { public InfluxDBException(final Throwable cause) { super(cause); } + + /** + * @return true if the operation may succeed if repeated, false otherwise. + */ + public boolean isRetryWorth() { + return true; + } + + static final String FIELD_TYPE_CONFLICT_ERROR="field type conflict"; + static final String POINTS_BEYOND_RETENTION_POLICY_ERROR="points beyond retention policy"; + static final String UNABLE_TO_PARSE_ERROR="unable to parse"; + static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR="hinted handoff queue not empty"; + static final String DATABASE_NOT_FOUND_ERROR="database not found"; + static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR="cache-max-memory-size exceeded"; + + public static class DatabaseNotFoundError extends InfluxDBException { + private DatabaseNotFoundError(String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static class HintedHandOffQueueNotEmptyException extends InfluxDBException { + private HintedHandOffQueueNotEmptyException(String message) { + super(message); + } + public boolean isRetryWorth() { + return false; + } + } + + public static class UnableToParseException extends InfluxDBException { + private UnableToParseException(String message) { + super(message); + } + public boolean isRetryWorth() { + return false; + } + } + + public static class FieldTypeConflictException extends InfluxDBException { + private FieldTypeConflictException(String message) { + super(message); + } + public boolean isRetryWorth() { + return false; + } + } + + public static class PointsBeyondRetentionPolicyException extends InfluxDBException { + private PointsBeyondRetentionPolicyException(String message) { + super(message); + } + public boolean isRetryWorth() { + return false; + } + } + + public static class CacheMaxMemorySizeExceededException extends InfluxDBException { + private CacheMaxMemorySizeExceededException(String message) { + super(message); + } + public boolean isRetryWorth() { + return true; + } + } + + public static InfluxDBException buildExceptionForErrorState(String error) { + if(error.contains(DATABASE_NOT_FOUND_ERROR)) return new DatabaseNotFoundError(error); + if(error.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) return new PointsBeyondRetentionPolicyException(error); + if(error.contains(FIELD_TYPE_CONFLICT_ERROR)) return new FieldTypeConflictException(error); + if(error.contains(UNABLE_TO_PARSE_ERROR)) return new UnableToParseException(error); + if(error.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) return new HintedHandOffQueueNotEmptyException(error); + if(error.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) return new CacheMaxMemorySizeExceededException(error); + throw new InfluxDBException(error); + } + } diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 8fe0d4255..843ffb01e 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -24,6 +24,8 @@ import org.influxdb.dto.QueryResult; import org.influxdb.impl.BatchProcessor.HttpBatchEntry; import org.influxdb.impl.BatchProcessor.UdpBatchEntry; +import org.json.JSONException; +import org.json.JSONObject; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; @@ -569,6 +571,14 @@ private T execute(final Call call) { return response.body(); } try (ResponseBody errorBody = response.errorBody()) { + try { + JSONObject body=new JSONObject(errorBody.string()); + Object error=body.getString("error"); + if(error!=null && error instanceof String) { + throw InfluxDBException.buildExceptionForErrorState((String) error); + } + } + catch(JSONException e) {} throw new InfluxDBException(errorBody.string()); } } catch (IOException e) { From 3fe6a1a8acc28c5b4187b057fd2d9f408d80f371 Mon Sep 17 00:00:00 2001 From: dubsky Date: Mon, 8 Jan 2018 18:06:04 +0100 Subject: [PATCH 070/148] implementing retry buffer for failed writes due to occupancy of the influxdb server --- checkstyle.xml | 5 +- src/main/java/org/influxdb/BatchOptions.java | 17 +++ .../java/org/influxdb/InfluxDBException.java | 79 ++++++++---- .../java/org/influxdb/dto/BatchPoints.java | 27 ++++ .../org/influxdb/impl/BatchOptionsImpl.java | 19 ++- .../org/influxdb/impl/BatchProcessor.java | 35 +++++- .../java/org/influxdb/impl/BatchWriter.java | 15 +++ .../java/org/influxdb/impl/InfluxDBImpl.java | 26 ++-- .../org/influxdb/impl/OneShotBatchWriter.java | 21 ++++ .../impl/RetryCapableBatchWriter.java | 119 ++++++++++++++++++ 10 files changed, 320 insertions(+), 43 deletions(-) create mode 100644 src/main/java/org/influxdb/impl/BatchWriter.java create mode 100644 src/main/java/org/influxdb/impl/OneShotBatchWriter.java create mode 100644 src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java diff --git a/checkstyle.xml b/checkstyle.xml index d27f8b51d..d739eb82f 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -28,8 +28,9 @@ - - + diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index 8732a245a..1d7e9c727 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -36,6 +36,18 @@ public interface BatchOptions { */ BatchOptions jitterDuration(final int jitterDuration); + /** + * The client maintains a buffer for failed writes so that the writes will be retried later on. This may + * help to overcome temporary network problems or InfluxDB load spikes. + * When the buffer is full and new points are written, oldest entries in the buffer are lost. + * + * To disable this feature set buffer limit to a value smaller than {@link BatchOptions#getActions} + * + * @param bufferLimit maximum number of points stored in the retry buffer + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + BatchOptions bufferLimit(final int bufferLimit); + /** * @param threadFactory a ThreadFactory instance to be used * @return the BatchOptions instance to be able to use it in a fluent manner. @@ -71,6 +83,11 @@ public interface BatchOptions { */ int getJitterDuration(); + /** + * @return Maximum number of points stored in the retry buffer, see {@link BatchOptions#bufferLimit(int)} + */ + int getBufferLimit(); + /** * @return a ThreadFactory instance to be used */ diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java index dd7939d0d..5444bb354 100644 --- a/src/main/java/org/influxdb/InfluxDBException.java +++ b/src/main/java/org/influxdb/InfluxDBException.java @@ -26,15 +26,15 @@ public boolean isRetryWorth() { return true; } - static final String FIELD_TYPE_CONFLICT_ERROR="field type conflict"; - static final String POINTS_BEYOND_RETENTION_POLICY_ERROR="points beyond retention policy"; - static final String UNABLE_TO_PARSE_ERROR="unable to parse"; - static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR="hinted handoff queue not empty"; - static final String DATABASE_NOT_FOUND_ERROR="database not found"; - static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR="cache-max-memory-size exceeded"; - - public static class DatabaseNotFoundError extends InfluxDBException { - private DatabaseNotFoundError(String message) { + static final String FIELD_TYPE_CONFLICT_ERROR = "field type conflict"; + static final String POINTS_BEYOND_RETENTION_POLICY_ERROR = "points beyond retention policy"; + static final String UNABLE_TO_PARSE_ERROR = "unable to parse"; + static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR = "hinted handoff queue not empty"; + static final String DATABASE_NOT_FOUND_ERROR = "database not found"; + static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR = "cache-max-memory-size exceeded"; + + public static final class DatabaseNotFoundError extends InfluxDBException { + private DatabaseNotFoundError(final String message) { super(message); } @@ -43,58 +43,85 @@ public boolean isRetryWorth() { } } - public static class HintedHandOffQueueNotEmptyException extends InfluxDBException { - private HintedHandOffQueueNotEmptyException(String message) { + public static final class HintedHandOffQueueNotEmptyException extends InfluxDBException { + private HintedHandOffQueueNotEmptyException(final String message) { super(message); } + public boolean isRetryWorth() { return false; } } - public static class UnableToParseException extends InfluxDBException { - private UnableToParseException(String message) { + public static final class UnableToParseException extends InfluxDBException { + private UnableToParseException(final String message) { super(message); } + public boolean isRetryWorth() { return false; } } - public static class FieldTypeConflictException extends InfluxDBException { - private FieldTypeConflictException(String message) { + public static final class FieldTypeConflictException extends InfluxDBException { + private FieldTypeConflictException(final String message) { super(message); } + public boolean isRetryWorth() { return false; } } - public static class PointsBeyondRetentionPolicyException extends InfluxDBException { - private PointsBeyondRetentionPolicyException(String message) { + public static final class PointsBeyondRetentionPolicyException extends InfluxDBException { + private PointsBeyondRetentionPolicyException(final String message) { super(message); } + public boolean isRetryWorth() { return false; } } - public static class CacheMaxMemorySizeExceededException extends InfluxDBException { - private CacheMaxMemorySizeExceededException(String message) { + public static final class CacheMaxMemorySizeExceededException extends InfluxDBException { + private CacheMaxMemorySizeExceededException(final String message) { super(message); } + public boolean isRetryWorth() { return true; } } - public static InfluxDBException buildExceptionForErrorState(String error) { - if(error.contains(DATABASE_NOT_FOUND_ERROR)) return new DatabaseNotFoundError(error); - if(error.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) return new PointsBeyondRetentionPolicyException(error); - if(error.contains(FIELD_TYPE_CONFLICT_ERROR)) return new FieldTypeConflictException(error); - if(error.contains(UNABLE_TO_PARSE_ERROR)) return new UnableToParseException(error); - if(error.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) return new HintedHandOffQueueNotEmptyException(error); - if(error.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) return new CacheMaxMemorySizeExceededException(error); + public static final class RetryBufferOverrunException extends InfluxDBException { + public RetryBufferOverrunException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static InfluxDBException buildExceptionForErrorState(final String error) { + if (error.contains(DATABASE_NOT_FOUND_ERROR)) { + return new DatabaseNotFoundError(error); + } + if (error.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) { + return new PointsBeyondRetentionPolicyException(error); + } + if (error.contains(FIELD_TYPE_CONFLICT_ERROR)) { + return new FieldTypeConflictException(error); + } + if (error.contains(UNABLE_TO_PARSE_ERROR)) { + return new UnableToParseException(error); + } + if (error.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) { + return new HintedHandOffQueueNotEmptyException(error); + } + if (error.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) { + return new CacheMaxMemorySizeExceededException(error); + } throw new InfluxDBException(error); } diff --git a/src/main/java/org/influxdb/dto/BatchPoints.java b/src/main/java/org/influxdb/dto/BatchPoints.java index c67ddcf28..9d29d6cd3 100644 --- a/src/main/java/org/influxdb/dto/BatchPoints.java +++ b/src/main/java/org/influxdb/dto/BatchPoints.java @@ -280,4 +280,31 @@ public String lineProtocol() { } return sb.toString(); } + + /** + * Test whether is possible to merge two BatchPoints objects. + * + * @param that batch point to merge in + * @return true if the batch points can be sent in a single HTTP request write + */ + public boolean isMergeAbleWith(final BatchPoints that) { + return Objects.equals(database, that.database) + && Objects.equals(retentionPolicy, that.retentionPolicy) + && Objects.equals(tags, that.tags) + && consistency == that.consistency; + } + + /** + * Merge two BatchPoints objects. + * + * @param that batch point to merge in + * @return true if the batch points have been merged into this BatchPoints instance. Return false otherwise. + */ + public boolean mergeIn(final BatchPoints that) { + boolean mergeAble = isMergeAbleWith(that); + if (mergeAble) { + this.points.addAll(that.points); + } + return mergeAble; + } } diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java index 2fd8bba8d..5edbc995f 100644 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java @@ -16,10 +16,12 @@ public final class BatchOptionsImpl implements BatchOptions, Cloneable { public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + public static final int DEFAULT_BUFFER_LIMIT = 10000; - int actions = DEFAULT_BATCH_ACTIONS_LIMIT; - int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; - int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + private int actions = DEFAULT_BATCH_ACTIONS_LIMIT; + private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; + private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + private int bufferLimit = DEFAULT_BUFFER_LIMIT; ThreadFactory threadFactory = Executors.defaultThreadFactory(); BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { @@ -47,6 +49,12 @@ public BatchOptions jitterDuration(final int jitterDuration) { return clone; } + public BatchOptions bufferLimit(final int bufferLimit) { + BatchOptionsImpl clone = getClone(); + clone.bufferLimit = bufferLimit; + return clone; + } + public BatchOptions threadFactory(final ThreadFactory threadFactory) { BatchOptionsImpl clone = getClone(); clone.threadFactory = threadFactory; @@ -96,4 +104,9 @@ public ThreadFactory getThreadFactory() { public BiConsumer, Throwable> getExceptionHandler() { return exceptionHandler; } + + @Override + public int getBufferLimit() { + return bufferLimit; + } } diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index 65536210e..f2f3ad013 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -21,6 +21,11 @@ import java.util.logging.Level; import java.util.logging.Logger; +import org.influxdb.InfluxDB; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; + + /** * A BatchProcessor can be attached to a InfluxDB Instance to collect single point writes and * aggregates them to BatchPoints to get a better write performance. @@ -40,6 +45,7 @@ public final class BatchProcessor { private final int flushInterval; private final ConsistencyLevel consistencyLevel; private final int jitterInterval; + private final BatchWriter batchWriter; /** * The Builder to create a BatchProcessor instance. @@ -51,6 +57,10 @@ public static final class Builder { private TimeUnit flushIntervalUnit; private int flushInterval; private int jitterInterval; + // this is a default value if the InfluxDb.enableBatch(BatchOptions) IS NOT used + // the reason is backward compatibility + private int bufferLimit = 0; + private BiConsumer, Throwable> exceptionHandler = (entries, throwable) -> { }; private ConsistencyLevel consistencyLevel; @@ -119,6 +129,18 @@ public Builder interval(final int flushInterval, final int jitterInterval, final return this; } + /** + * A buffer for failed writes so that the writes will be retried later on. When the buffer is full and + * new points are written, oldest entries in the buffer are lost. + * + * @param bufferLimit maximum number of points stored in the buffer + * @return this Builder to use it fluent + */ + public Builder bufferLimit(final int bufferLimit) { + this.bufferLimit = bufferLimit; + return this; + } + /** * A callback to be used when an error occurs during a batchwrite. * @@ -156,7 +178,13 @@ public BatchProcessor build() { Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit"); Objects.requireNonNull(this.threadFactory, "threadFactory"); Objects.requireNonNull(this.exceptionHandler, "exceptionHandler"); - return new BatchProcessor(this.influxDB, this.threadFactory, this.actions, this.flushIntervalUnit, + BatchWriter batchWriter; + if (this.bufferLimit > this.actions) { + batchWriter = new RetryCapableBatchWriter(this.influxDB, this.exceptionHandler, this.bufferLimit, this.actions); + } else { + batchWriter = new OneShotBatchWriter(this.influxDB); + } + return new BatchProcessor(this.influxDB, batchWriter, this.threadFactory, this.actions, this.flushIntervalUnit, this.flushInterval, this.jitterInterval, exceptionHandler, this.consistencyLevel); } } @@ -216,12 +244,13 @@ public static Builder builder(final InfluxDB influxDB) { return new Builder(influxDB); } - BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, + BatchProcessor(final InfluxDBImpl influxDB, final BatchWriter batchWriter, final ThreadFactory threadFactory, final int actions, final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval, final BiConsumer, Throwable> exceptionHandler, final ConsistencyLevel consistencyLevel) { super(); this.influxDB = influxDB; + this.batchWriter = batchWriter; this.actions = actions; this.flushIntervalUnit = flushIntervalUnit; this.flushInterval = flushInterval; @@ -291,7 +320,7 @@ void write() { } for (BatchPoints batchPoints : batchKeyToBatchPoints.values()) { - BatchProcessor.this.influxDB.write(batchPoints); + BatchProcessor.this.batchWriter.write(batchPoints); } for (Entry> entry : udpPortToBatchPoints.entrySet()) { for (String lineprotocolStr : entry.getValue()) { diff --git a/src/main/java/org/influxdb/impl/BatchWriter.java b/src/main/java/org/influxdb/impl/BatchWriter.java new file mode 100644 index 000000000..e2a169143 --- /dev/null +++ b/src/main/java/org/influxdb/impl/BatchWriter.java @@ -0,0 +1,15 @@ +package org.influxdb.impl; + +import org.influxdb.dto.BatchPoints; + +/** + * Write individual batches to InfluxDB. + */ +interface BatchWriter { + /** + * Write the given batch into InfluxDB. + * @param batchPoints to write + */ + void write(BatchPoints batchPoints); +} + diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 843ffb01e..720f443d5 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -197,11 +197,19 @@ public InfluxDB enableBatch() { @Override public InfluxDB enableBatch(final BatchOptions batchOptions) { - enableBatch(batchOptions.getActions(), - batchOptions.getFlushDuration(), - batchOptions.getJitterDuration(), - TimeUnit.MILLISECONDS, batchOptions.getThreadFactory(), - batchOptions.getExceptionHandler()); + + if (this.batchEnabled.get()) { + throw new IllegalStateException("BatchProcessing is already enabled."); + } + this.batchProcessor = BatchProcessor + .builder(this) + .actions(batchOptions.getActions()) + .exceptionHandler(batchOptions.getExceptionHandler()) + .interval(batchOptions.getFlushDuration(), batchOptions.getJitterDuration(), TimeUnit.MILLISECONDS) + .threadFactory(batchOptions.getThreadFactory()) + .bufferLimit(batchOptions.getBufferLimit()) + .build(); + this.batchEnabled.set(true); return this; } @@ -572,13 +580,13 @@ private T execute(final Call call) { } try (ResponseBody errorBody = response.errorBody()) { try { - JSONObject body=new JSONObject(errorBody.string()); - Object error=body.getString("error"); - if(error!=null && error instanceof String) { + JSONObject body = new JSONObject(errorBody.string()); + Object error = body.getString("error"); + if (error != null && error instanceof String) { throw InfluxDBException.buildExceptionForErrorState((String) error); } + } catch (JSONException e) { } - catch(JSONException e) {} throw new InfluxDBException(errorBody.string()); } } catch (IOException e) { diff --git a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java new file mode 100644 index 000000000..c6594f6f5 --- /dev/null +++ b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java @@ -0,0 +1,21 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.dto.BatchPoints; + +/** + * Batch writer that tries to write BatchPoints exactly once. + */ +class OneShotBatchWriter implements BatchWriter { + + private InfluxDB influxDB; + + OneShotBatchWriter(final InfluxDB influxDB) { + this.influxDB = influxDB; + } + + @Override + public void write(final BatchPoints batchPoints) { + influxDB.write(batchPoints); + } +} diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java new file mode 100644 index 000000000..bf3723374 --- /dev/null +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -0,0 +1,119 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBException; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.function.BiConsumer; + +/** + * Batch writer that tries to retry a write if it failed previously and + * the reason of the failure is not permanent. + */ +class RetryCapableBatchWriter implements BatchWriter { + + private InfluxDB influxDB; + private BiConsumer, Throwable> exceptionHandler; + private LinkedList batchQueue; + private int requestActionsLimit; + private int retryBufferCapacity; + private int usedRetryBufferCapacity; + + RetryCapableBatchWriter(final InfluxDB influxDB, final BiConsumer, Throwable> exceptionHandler, + final int retryBufferCapacity, final int requestActionsLimit) { + this.influxDB = influxDB; + this.exceptionHandler = exceptionHandler; + batchQueue = new LinkedList<>(); + this.retryBufferCapacity = retryBufferCapacity; + this.requestActionsLimit = requestActionsLimit; + } + + private enum WriteResultOutcome { WRITTEN, FAILED_RETRY_POSSIBLE, FAILED_RETRY_IMPOSSIBLE } + + private static class WriteResult { + + static final WriteResult WRITTEN = new WriteResult(WriteResultOutcome.WRITTEN); + + WriteResultOutcome outcome; + Throwable throwable; + + public WriteResult(final WriteResultOutcome outcome) { + this.outcome = outcome; + } + + public WriteResult(final WriteResultOutcome outcome, final Throwable throwable) { + this.outcome = outcome; + this.throwable = throwable; + } + + public WriteResult(final InfluxDBException e) { + this.throwable = e; + if (e.isRetryWorth()) { + this.outcome = WriteResultOutcome.FAILED_RETRY_POSSIBLE; + } else { + this.outcome = WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE; + } + } + } + + @Override + public void write(final BatchPoints batchPoints) { + // empty the cached data first + ListIterator iterator = batchQueue.listIterator(); + while (iterator.hasNext()) { + BatchPoints entry = iterator.next(); + WriteResult result = tryToWrite(entry); + if (result.outcome == WriteResultOutcome.WRITTEN + || result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { + iterator.remove(); + usedRetryBufferCapacity -= entry.getPoints().size(); + exceptionHandler.accept(entry.getPoints(), result.throwable); + } + } + // write the last given batch last so that duplicate data points get overwritten correctly + WriteResult result = tryToWrite(batchPoints); + if (result.outcome == WriteResultOutcome.FAILED_RETRY_POSSIBLE) { + addToBatchQueue(batchPoints); + } + } + + private WriteResult tryToWrite(final BatchPoints batchPoints) { + try { + influxDB.write(batchPoints); + return WriteResult.WRITTEN; + } catch (InfluxDBException e) { + return new WriteResult(e); + } catch (Exception e) { + return new WriteResult(WriteResultOutcome.FAILED_RETRY_POSSIBLE, e); + } + } + + private void evictTooOldFailedWrites() { + while (usedRetryBufferCapacity > retryBufferCapacity && batchQueue.size() > 0) { + List points = batchQueue.removeFirst().getPoints(); + usedRetryBufferCapacity -= points.size(); + exceptionHandler.accept(points, + new InfluxDBException.RetryBufferOverrunException( + "Retry buffer overrun, current capacity: " + retryBufferCapacity)); + } + } + + private void addToBatchQueue(final BatchPoints batchPoints) { + if (batchQueue.size() > 0) { + BatchPoints last = batchQueue.getLast(); + if (last.getPoints().size() + batchPoints.getPoints().size() <= requestActionsLimit) { + boolean hasBeenMergedIn = last.mergeIn(batchPoints); + if (hasBeenMergedIn) { + return; + } + } + } + batchQueue.add(batchPoints); + usedRetryBufferCapacity += batchPoints.getPoints().size(); + evictTooOldFailedWrites(); + } +} From ce90c44e04d9b7706e19d83c68c85d6a41c15ab9 Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 9 Jan 2018 17:31:21 +0100 Subject: [PATCH 071/148] implementing cluster consistency setting into batch options --- src/main/java/org/influxdb/impl/InfluxDBImpl.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 720f443d5..701f2f5c2 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -208,6 +208,7 @@ public InfluxDB enableBatch(final BatchOptions batchOptions) { .interval(batchOptions.getFlushDuration(), batchOptions.getJitterDuration(), TimeUnit.MILLISECONDS) .threadFactory(batchOptions.getThreadFactory()) .bufferLimit(batchOptions.getBufferLimit()) + .consistencyLevel(batchOptions.getConsistency()) .build(); this.batchEnabled.set(true); return this; From 3218ac0a88c9295d6fc72b24c474777531903720 Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 9 Jan 2018 17:35:44 +0100 Subject: [PATCH 072/148] fixing checkstyle after rebase --- checkstyle.xml | 4 +--- src/main/java/org/influxdb/impl/BatchProcessor.java | 9 ++------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/checkstyle.xml b/checkstyle.xml index d739eb82f..fe2de79f3 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -28,9 +28,7 @@ - + diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index f2f3ad013..e1d6a5073 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -21,11 +21,6 @@ import java.util.logging.Level; import java.util.logging.Logger; -import org.influxdb.InfluxDB; -import org.influxdb.dto.BatchPoints; -import org.influxdb.dto.Point; - - /** * A BatchProcessor can be attached to a InfluxDB Instance to collect single point writes and * aggregates them to BatchPoints to get a better write performance. @@ -244,8 +239,8 @@ public static Builder builder(final InfluxDB influxDB) { return new Builder(influxDB); } - BatchProcessor(final InfluxDBImpl influxDB, final BatchWriter batchWriter, final ThreadFactory threadFactory, final int actions, - final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval, + BatchProcessor(final InfluxDBImpl influxDB, final BatchWriter batchWriter, final ThreadFactory threadFactory, + final int actions, final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval, final BiConsumer, Throwable> exceptionHandler, final ConsistencyLevel consistencyLevel) { super(); From d0411b7220c7ce59efb3615d451308d3f4c8171c Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 9 Jan 2018 17:41:10 +0100 Subject: [PATCH 073/148] fixing after a wrong merge after rebase --- src/test/java/org/influxdb/TestUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/TestUtils.java b/src/test/java/org/influxdb/TestUtils.java index 865ecdcf2..e85f6feaf 100644 --- a/src/test/java/org/influxdb/TestUtils.java +++ b/src/test/java/org/influxdb/TestUtils.java @@ -65,7 +65,7 @@ public static InfluxDB connectToInfluxDB() throws InterruptedException, IOExcept } while (!influxDBstarted); influxDB.setLogLevel(InfluxDB.LogLevel.NONE); System.out.println("##################################################################################"); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); + System.out.println("# Connected to InfluxDB Version: " + influxDB.version() + " #"); System.out.println("##################################################################################"); return influxDB; } From c640568ec34f729d5baf02ec8b70a2717fb5e402 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 10 Jan 2018 19:30:46 +0100 Subject: [PATCH 074/148] various maven plugin and dependency updates --- pom.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pom.xml b/pom.xml index bd92d18d7..a791f661e 100644 --- a/pom.xml +++ b/pom.xml @@ -84,7 +84,7 @@ org.apache.maven.plugins maven-site-plugin - 3.6 + 3.7 org.apache.maven.plugins @@ -109,7 +109,7 @@ org.apache.maven.plugins maven-resources-plugin - 3.0.1 + 3.0.2 @@ -143,7 +143,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.0-M1 + 3.0.0 attach-javadocs @@ -172,7 +172,7 @@ org.jacoco jacoco-maven-plugin - 0.7.9 + 0.8.0 @@ -191,7 +191,7 @@ org.apache.maven.plugins maven-checkstyle-plugin - 2.17 + 3.0.0 true checkstyle.xml @@ -230,13 +230,13 @@ org.assertj assertj-core - 3.8.0 + 3.9.0 test org.mockito mockito-core - 2.12.0 + 2.13.0 test From a10840b59cd56caedb63eae843e6e38125db300c Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Wed, 10 Jan 2018 19:36:02 +0100 Subject: [PATCH 075/148] fix one small javadoc nit --- src/main/java/org/influxdb/dto/Pong.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/org/influxdb/dto/Pong.java b/src/main/java/org/influxdb/dto/Pong.java index 278633ce1..4aa041e41 100644 --- a/src/main/java/org/influxdb/dto/Pong.java +++ b/src/main/java/org/influxdb/dto/Pong.java @@ -28,6 +28,8 @@ public void setVersion(final String version) { /** * Good or bad connection status. + * + * @return true if the version of influxdb is not unknown. */ public boolean isGood() { return !UNKNOWN_VERSION.equalsIgnoreCase(version); From 00367da5534f4b1f68e42190c2fea7fd4e0c8230 Mon Sep 17 00:00:00 2001 From: dubsky Date: Fri, 12 Jan 2018 14:29:57 +0100 Subject: [PATCH 076/148] adding test for retry writer --- src/main/java/org/influxdb/BatchOptions.java | 2 +- src/main/java/org/influxdb/impl/BatchOptionsImpl.java | 2 +- .../java/org/influxdb/impl/RetryCapableBatchWriter.java | 9 ++++++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index 1d7e9c727..7c611ea82 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -65,7 +65,7 @@ public interface BatchOptions { * to treat a write as a success) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency); + BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency); /** diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java index 5edbc995f..21cc2e757 100644 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java @@ -67,7 +67,7 @@ public BatchOptions exceptionHandler(final BiConsumer, Throwable return clone; } - public BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency) { + public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) { BatchOptionsImpl clone = getClone(); clone.consistency = consistency; return clone; diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index bf3723374..49be0ecfd 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -71,7 +71,14 @@ public void write(final BatchPoints batchPoints) { || result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { iterator.remove(); usedRetryBufferCapacity -= entry.getPoints().size(); - exceptionHandler.accept(entry.getPoints(), result.throwable); + // we are throwing out data, notify the client + if (result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) + exceptionHandler.accept(entry.getPoints(), result.throwable); + } else { + // we cannot send more data otherwise we would write them in different + // order than in which were submitted + addToBatchQueue(batchPoints); + return; } } // write the last given batch last so that duplicate data points get overwritten correctly From 59a787b8c45fb913060cd67d754b79524e3969d8 Mon Sep 17 00:00:00 2001 From: dubsky Date: Fri, 12 Jan 2018 14:54:38 +0100 Subject: [PATCH 077/148] adding test for retry writer --- .../java/org/influxdb/BatchOptionsTest.java | 29 ++++++- src/test/java/org/influxdb/TestUtils.java | 17 +++- .../impl/RetryCapableBatchWriterTest.java | 86 +++++++++++++++++++ 3 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index f58a685af..40a93398f 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -10,8 +10,10 @@ import org.junit.runner.RunWith; import java.io.IOException; +import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; @RunWith(JUnitPlatform.class) public class BatchOptionsTest { @@ -37,6 +39,31 @@ public void testBatchEnabledWithDefaultSettings() { } } + @Test + public void testParametersSet() { + BatchOptions options = BatchOptions.DEFAULTS.actions(3); + Assertions.assertEquals(3, options.getActions()); + options=options.consistency(InfluxDB.ConsistencyLevel.ANY); + Assertions.assertEquals(InfluxDB.ConsistencyLevel.ANY, options.getConsistency()); + options=options.flushDuration(1001); + Assertions.assertEquals(1001, options.getFlushDuration()); + options=options.bufferLimit(7070); + Assertions.assertEquals(7070, options.getBufferLimit()); + options=options.jitterDuration(104); + Assertions.assertEquals(104, options.getJitterDuration()); + BiConsumer, Throwable> handler=new BiConsumer, Throwable>() { + @Override + public void accept(Iterable points, Throwable throwable) { + + } + }; + options=options.exceptionHandler(handler); + Assertions.assertEquals(handler, options.getExceptionHandler()); + ThreadFactory tf=Executors.defaultThreadFactory(); + options=options.threadFactory(tf); + Assertions.assertEquals(tf, options.getThreadFactory()); + } + /** * Test the implementation of {@link BatchOptions#actions(int)} }. */ @@ -45,7 +72,7 @@ public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { BatchOptions options = BatchOptions.DEFAULTS.actions(3); - Assertions.assertEquals(3, options.getActions()); + this.influxDB.enableBatch(options); this.influxDB.createDatabase(dbName); this.influxDB.setDatabase(dbName); diff --git a/src/test/java/org/influxdb/TestUtils.java b/src/test/java/org/influxdb/TestUtils.java index e85f6feaf..ee225a05b 100644 --- a/src/test/java/org/influxdb/TestUtils.java +++ b/src/test/java/org/influxdb/TestUtils.java @@ -1,5 +1,6 @@ package org.influxdb; +import okhttp3.OkHttpClient; import org.influxdb.dto.Pong; import java.io.IOException; @@ -47,8 +48,20 @@ public static String defaultRetentionPolicy(String version) { } } - public static InfluxDB connectToInfluxDB() throws InterruptedException, IOException { - InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); + public static InfluxDB connectToInfluxDB() throws InterruptedException, IOException { + return connectToInfluxDB(null); + } + + public static InfluxDB connectToInfluxDB( final OkHttpClient.Builder client) throws InterruptedException, IOException { + OkHttpClient.Builder clientToUse; + if (client == null) { + clientToUse = new OkHttpClient.Builder(); + } else { + clientToUse = client; + } + InfluxDB influxDB = InfluxDBFactory.connect( + "http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), + "admin", "admin", clientToUse); boolean influxDBstarted = false; do { Pong response; diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java new file mode 100644 index 000000000..9ebf638a3 --- /dev/null +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -0,0 +1,86 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBException; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.junit.Assert; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.util.List; +import java.util.function.BiConsumer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.*; + +@RunWith(JUnitPlatform.class) +public class RetryCapableBatchWriterTest { + + BatchPoints getBP(int count) { + BatchPoints.Builder b = BatchPoints.database("d1"); + for (int i = 0; i < count; i++) { + b.point(Point.measurement("x1").addField("x", 1).build()).build(); + } + return b.build(); + } + + @Test + public void test() { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BiConsumer errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + BatchPoints bp0 = getBP(5); + BatchPoints bp1 = getBP(90); + BatchPoints bp2 = getBP(90); + BatchPoints bp3 = getBP(8); + BatchPoints bp4 = getBP(100); + + Exception nonRecoverable = InfluxDBException.buildExceptionForErrorState("database not found: cvfdgf"); + Exception recoverable = InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + Mockito.doThrow(nonRecoverable).when(mockInfluxDB).write(bp0); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp1); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp2); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp3); + // first one will fail with non-recoverable error + rw.write(bp0); + // second one will fail with recoverable error + rw.write(bp1); + // will fail with recoverable error again, will remove data due to buffer limit + rw.write(bp2); + // will write fail with recoverable error + rw.write(bp3); + + ArgumentCaptor captor = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(4)).write(captor.capture()); + final List capturedArgument1 = captor.getAllValues(); + for (BatchPoints b : capturedArgument1) { + System.out.println("batchSize written " + b.getPoints().size()); + } + + Assert.assertEquals(capturedArgument1.get(0).getPoints().size(), 5); + Assert.assertEquals(capturedArgument1.get(1).getPoints().size(), 90); + Assert.assertEquals(capturedArgument1.get(2).getPoints().size(), 90); + Assert.assertEquals(capturedArgument1.get(3).getPoints().size(), 98); + + verify(errorHandler, times(1)).accept(any(),any()); + + // will write data that previously were not sent, will send additional data + Mockito.reset(mockInfluxDB); + rw.write(bp4); + + ArgumentCaptor captor2 = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(2)).write(captor2.capture()); + final List capturedArgument2 = captor2.getAllValues(); + for (BatchPoints b : capturedArgument2) { + System.out.println("batchSize written " + b.getPoints().size()); + } + Assert.assertEquals(capturedArgument2.get(0).getPoints().size(), 98); + Assert.assertEquals(capturedArgument2.get(1).getPoints().size(), 100); + + } +} From aa74fc4d68965dc4f23595f0d10b442fcdb3a1a2 Mon Sep 17 00:00:00 2001 From: dubsky Date: Fri, 12 Jan 2018 17:00:23 +0100 Subject: [PATCH 078/148] implementing code review --- src/main/java/org/influxdb/BatchOptions.java | 105 ++++++++++++++---- .../org/influxdb/impl/BatchOptionsImpl.java | 99 ----------------- 2 files changed, 85 insertions(+), 119 deletions(-) delete mode 100644 src/main/java/org/influxdb/impl/BatchOptionsImpl.java diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index 26e654f24..e9e8c87fc 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -1,8 +1,8 @@ package org.influxdb; import org.influxdb.dto.Point; -import org.influxdb.impl.BatchOptionsImpl; +import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.function.BiConsumer; @@ -10,21 +10,51 @@ * BatchOptions are used to configure batching of individual data point writes * into InfluxDB. See {@link InfluxDB#enableBatch(BatchOptions)} */ -public interface BatchOptions { +public final class BatchOptions implements Cloneable { - BatchOptions DEFAULTS = BatchOptionsImpl.DEFAULTS; + /** + * Default batch options. This class is immutable, each configuration + * is built by taking the DEFAULTS and setting specific configuration + * properties. + */ + public static final BatchOptions DEFAULTS = new BatchOptions(); + + // default values here are consistent with Telegraf + public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; + public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; + public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + + private int actions = DEFAULT_BATCH_ACTIONS_LIMIT; + private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; + private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + + private ThreadFactory threadFactory = Executors.defaultThreadFactory(); + BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { + }; + private InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; + + private BatchOptions() { + } /** * @param actions the number of actions to collect * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions actions(final int actions); + public BatchOptions actions(final int actions) { + BatchOptions clone = getClone(); + clone.actions = actions; + return clone; + } /** * @param flushDuration the time to wait at most (milliseconds). * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions flushDuration(final int flushDuration); + public BatchOptions flushDuration(final int flushDuration) { + BatchOptions clone = getClone(); + clone.flushDuration = flushDuration; + return clone; + } /** * Jitters the batch flush interval by a random amount. This is primarily to avoid @@ -34,57 +64,92 @@ public interface BatchOptions { * @param jitterDuration (milliseconds) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions jitterDuration(final int jitterDuration); + public BatchOptions jitterDuration(final int jitterDuration) { + BatchOptions clone = getClone(); + clone.jitterDuration = jitterDuration; + return clone; + } /** * @param threadFactory a ThreadFactory instance to be used * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions threadFactory(final ThreadFactory threadFactory); + public BatchOptions threadFactory(final ThreadFactory threadFactory) { + BatchOptions clone = getClone(); + clone.threadFactory = threadFactory; + return clone; + } /** * @param exceptionHandler a consumer function to handle asynchronous errors * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler); + public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { + BatchOptions clone = getClone(); + clone.exceptionHandler = exceptionHandler; + return clone; + } /** * @param consistency cluster consistency setting (how many nodes have to store data points - * to treat a write as a success) + * to treat a write as a success) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency); - + public BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency) { + BatchOptions clone = getClone(); + clone.consistency = consistency; + return clone; + } - /** - * @return actions the number of actions to collect - */ - int getActions(); + /** + * @return actions the number of actions to collect + */ + public int getActions() { + return actions; + } /** * @return flushDuration the time to wait at most (milliseconds). */ - int getFlushDuration(); + public int getFlushDuration() { + return flushDuration; + } /** * @return batch flush interval jitter value (milliseconds) */ - int getJitterDuration(); + public int getJitterDuration() { + return jitterDuration; + } /** * @return a ThreadFactory instance to be used */ - ThreadFactory getThreadFactory(); + public ThreadFactory getThreadFactory() { + return threadFactory; + } /** * @return a consumer function to handle asynchronous errors */ - BiConsumer, Throwable> getExceptionHandler(); + public BiConsumer, Throwable> getExceptionHandler() { + return exceptionHandler; + } /** * @return cluster consistency setting (how many nodes have to store data points * to treat a write as a success) */ - InfluxDB.ConsistencyLevel getConsistency(); + public InfluxDB.ConsistencyLevel getConsistency() { + return consistency; + } + + private BatchOptions getClone() { + try { + return (BatchOptions) this.clone(); + } catch (CloneNotSupportedException e) { + throw new RuntimeException(e); + } + } } diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java deleted file mode 100644 index 2fd8bba8d..000000000 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ /dev/null @@ -1,99 +0,0 @@ -package org.influxdb.impl; - -import org.influxdb.BatchOptions; -import org.influxdb.InfluxDB; -import org.influxdb.dto.Point; - -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.function.BiConsumer; - -public final class BatchOptionsImpl implements BatchOptions, Cloneable { - - public static final BatchOptions DEFAULTS = new BatchOptionsImpl(); - - // default values here are consistent with Telegraf - public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; - public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; - public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; - - int actions = DEFAULT_BATCH_ACTIONS_LIMIT; - int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; - int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; - - ThreadFactory threadFactory = Executors.defaultThreadFactory(); - BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { - }; - InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; - - private BatchOptionsImpl() { - } - - public BatchOptions actions(final int actions) { - BatchOptionsImpl clone = getClone(); - clone.actions = actions; - return clone; - } - - public BatchOptions flushDuration(final int flushDuration) { - BatchOptionsImpl clone = getClone(); - clone.flushDuration = flushDuration; - return clone; - } - - public BatchOptions jitterDuration(final int jitterDuration) { - BatchOptionsImpl clone = getClone(); - clone.jitterDuration = jitterDuration; - return clone; - } - - public BatchOptions threadFactory(final ThreadFactory threadFactory) { - BatchOptionsImpl clone = getClone(); - clone.threadFactory = threadFactory; - return clone; - } - - public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { - BatchOptionsImpl clone = getClone(); - clone.exceptionHandler = exceptionHandler; - return clone; - } - - public BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency) { - BatchOptionsImpl clone = getClone(); - clone.consistency = consistency; - return clone; - } - - private BatchOptionsImpl getClone() { - try { - return (BatchOptionsImpl) this.clone(); - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); - } - } - - public int getActions() { - return actions; - } - - public int getFlushDuration() { - return flushDuration; - } - - public int getJitterDuration() { - return jitterDuration; - } - - public InfluxDB.ConsistencyLevel getConsistency() { - return consistency; - } - - public ThreadFactory getThreadFactory() { - return threadFactory; - } - - public BiConsumer, Throwable> getExceptionHandler() { - return exceptionHandler; - } -} From a3eae959c6837a21ff4306cf40a37f39f5aab6f6 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Wed, 17 Jan 2018 16:25:06 +0700 Subject: [PATCH 079/148] add test for flushDuration, jitterDuration, bufferLimit --- .../java/org/influxdb/BatchOptionsTest.java | 154 +++++++++++++++++- 1 file changed, 151 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 40a93398f..2ed53b036 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -6,6 +6,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; @@ -28,7 +29,7 @@ public void setUp() throws InterruptedException, IOException { /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ - @Test + //@Test public void testBatchEnabledWithDefaultSettings() { try { this.influxDB.enableBatch(); @@ -39,7 +40,7 @@ public void testBatchEnabledWithDefaultSettings() { } } - @Test + //@Test public void testParametersSet() { BatchOptions options = BatchOptions.DEFAULTS.actions(3); Assertions.assertEquals(3, options.getActions()); @@ -67,7 +68,7 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. */ - @Test + //@Test public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { @@ -94,4 +95,151 @@ public void testActionsSetting() throws InterruptedException { } } + /** + * Test the implementation of {@link BatchOptions#flushDuration(int)} }. + * @throws InterruptedException + */ + //@Test + public void testFlushDuration() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(10000); + + this.influxDB.createDatabase(dbName); + this.influxDB.setDatabase(dbName); + this.influxDB.enableBatch(options); + for (int j = 0; j < 20; j++) { + Point point = Point.measurement("weather") + .time(j,TimeUnit.HOURS) + .addField("temperature", (double) j) + .addField("humidity", (double) (j) * 1.1) + .addField("uv_index", "moderate").build(); + this.influxDB.write(point); + } + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + Thread.sleep(12000); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.deleteDatabase(dbName); + } + } + + /** + * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. + * @throws InterruptedException + */ + //@Test + public void testJitterDuration() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(1000).jitterDuration(125); + + this.influxDB.createDatabase(dbName); + this.influxDB.setDatabase(dbName); + this.influxDB.enableBatch(options); + for (int j = 0; j < 20; j++) { + Point point = Point.measurement("weather") + .time(j,TimeUnit.HOURS) + .addField("temperature", (double) j) + .addField("humidity", (double) (j) * 1.1) + .addField("uv_index", "moderate").build(); + this.influxDB.write(point); + } + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + Thread.sleep(1125); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.deleteDatabase(dbName); + } + + + } + + /** + * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. + */ + //@Test + public void testNegativeJitterDuration() { + + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchOptions options = BatchOptions.DEFAULTS.jitterDuration(-10); + influxDB.enableBatch(options); + + influxDB.disableBatch(); + options = BatchOptions.DEFAULTS.jitterDuration(0); + influxDB.enableBatch(); + influxDB.disableBatch(); + }); + } + + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + */ + @Test + public void testBufferLimit() throws InterruptedException { + + int[][] bufferLimit2Actions = {{10, 4}, {3, 4}}; + + for (int[] bufferLimit2Action : bufferLimit2Actions) { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(bufferLimit2Action[0]).actions(bufferLimit2Action[1]); + + this.influxDB.createDatabase(dbName); + this.influxDB.setDatabase(dbName); + this.influxDB.enableBatch(options); + for (int j = 0; j < 10; j++) { + Point point = Point.measurement("weather") + .time(j,TimeUnit.HOURS) + .addField("temperature", (double) j) + .addField("humidity", (double) (j) * 1.1) + .addField("uv_index", "moderate").build(); + this.influxDB.write(point); + } + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(8, result.getResults().get(0).getSeries().get(0).getValues().size()); + Thread.sleep(1000); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(10, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.deleteDatabase(dbName); + } + } + + } + + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + */ + //@Test + public void testNegativeBufferLimit() { + + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(-10); + influxDB.enableBatch(options); + + influxDB.disableBatch(); + options = BatchOptions.DEFAULTS.bufferLimit(0); + influxDB.enableBatch(); + influxDB.disableBatch(); + }); + } } From 9e6809e9a6f24e8df26bde32eb47e2d92e15afb0 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Thu, 18 Jan 2018 10:46:38 +0700 Subject: [PATCH 080/148] fixing checkstyle --- src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index 49be0ecfd..6871dfc54 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -72,8 +72,9 @@ public void write(final BatchPoints batchPoints) { iterator.remove(); usedRetryBufferCapacity -= entry.getPoints().size(); // we are throwing out data, notify the client - if (result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) + if (result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { exceptionHandler.accept(entry.getPoints(), result.throwable); + } } else { // we cannot send more data otherwise we would write them in different // order than in which were submitted From 7e6741bf6a71af3f8c9c61ba4e3826e83a165c3b Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Fri, 19 Jan 2018 13:56:50 +0700 Subject: [PATCH 081/148] add test for threadFactory, exceptionHandler, consistency --- .../java/org/influxdb/BatchOptionsTest.java | 286 +++++++++++++----- 1 file changed, 218 insertions(+), 68 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 2ed53b036..d452459aa 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -1,21 +1,27 @@ package org.influxdb; +import org.influxdb.InfluxDB.ConsistencyLevel; +import org.influxdb.InfluxDBException.DatabaseNotFoundError; +import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.function.Executable; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; +import static org.mockito.Mockito.*; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; + @RunWith(JUnitPlatform.class) public class BatchOptionsTest { @@ -29,7 +35,7 @@ public void setUp() throws InterruptedException, IOException { /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ - //@Test + @Test public void testBatchEnabledWithDefaultSettings() { try { this.influxDB.enableBatch(); @@ -40,7 +46,7 @@ public void testBatchEnabledWithDefaultSettings() { } } - //@Test + @Test public void testParametersSet() { BatchOptions options = BatchOptions.DEFAULTS.actions(3); Assertions.assertEquals(3, options.getActions()); @@ -68,7 +74,7 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. */ - //@Test + @Test public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { @@ -99,29 +105,21 @@ public void testActionsSetting() throws InterruptedException { * Test the implementation of {@link BatchOptions#flushDuration(int)} }. * @throws InterruptedException */ - //@Test + @Test public void testFlushDuration() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { BatchOptions options = BatchOptions.DEFAULTS.flushDuration(10000); - - this.influxDB.createDatabase(dbName); - this.influxDB.setDatabase(dbName); - this.influxDB.enableBatch(options); - for (int j = 0; j < 20; j++) { - Point point = Point.measurement("weather") - .time(j,TimeUnit.HOURS) - .addField("temperature", (double) j) - .addField("humidity", (double) (j) * 1.1) - .addField("uv_index", "moderate").build(); - this.influxDB.write(point); - } + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNull(result.getResults().get(0).getSeries()); Assertions.assertNull(result.getResults().get(0).getError()); - Thread.sleep(12000); + Thread.sleep(10000); result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } @@ -135,24 +133,17 @@ public void testFlushDuration() throws InterruptedException { * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. * @throws InterruptedException */ - //@Test + @Test public void testJitterDuration() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); try { BatchOptions options = BatchOptions.DEFAULTS.flushDuration(1000).jitterDuration(125); - - this.influxDB.createDatabase(dbName); - this.influxDB.setDatabase(dbName); - this.influxDB.enableBatch(options); - for (int j = 0; j < 20; j++) { - Point point = Point.measurement("weather") - .time(j,TimeUnit.HOURS) - .addField("temperature", (double) j) - .addField("humidity", (double) (j) * 1.1) - .addField("uv_index", "moderate").build(); - this.influxDB.write(point); - } + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNull(result.getResults().get(0).getSeries()); @@ -163,8 +154,8 @@ public void testJitterDuration() throws InterruptedException { Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } finally { - this.influxDB.disableBatch(); - this.influxDB.deleteDatabase(dbName); + influxDB.disableBatch(); + influxDB.deleteDatabase(dbName); } @@ -173,7 +164,7 @@ public void testJitterDuration() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. */ - //@Test + @Test public void testNegativeJitterDuration() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -187,49 +178,52 @@ public void testNegativeJitterDuration() { }); } + + private void doTestBufferLimit(int bufferLimit, int actions) throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(bufferLimit).actions(actions); + + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Thread.sleep(1000); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + influxDB.disableBatch(); + influxDB.deleteDatabase(dbName); + } + } + + /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ @Test - public void testBufferLimit() throws InterruptedException { - - int[][] bufferLimit2Actions = {{10, 4}, {3, 4}}; - - for (int[] bufferLimit2Action : bufferLimit2Actions) { - String dbName = "write_unittest_" + System.currentTimeMillis(); - try { - BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(bufferLimit2Action[0]).actions(bufferLimit2Action[1]); - - this.influxDB.createDatabase(dbName); - this.influxDB.setDatabase(dbName); - this.influxDB.enableBatch(options); - for (int j = 0; j < 10; j++) { - Point point = Point.measurement("weather") - .time(j,TimeUnit.HOURS) - .addField("temperature", (double) j) - .addField("humidity", (double) (j) * 1.1) - .addField("uv_index", "moderate").build(); - this.influxDB.write(point); - } - - QueryResult result = influxDB.query(new Query("select * from weather", dbName)); - Assertions.assertEquals(8, result.getResults().get(0).getSeries().get(0).getValues().size()); - Thread.sleep(1000); - result = influxDB.query(new Query("select * from weather", dbName)); - Assertions.assertEquals(10, result.getResults().get(0).getSeries().get(0).getValues().size()); - } - finally { - this.influxDB.disableBatch(); - this.influxDB.deleteDatabase(dbName); - } - } + public void testBufferLimit1() throws InterruptedException { + + doTestBufferLimit(3, 4); + + } + + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + */ + @Test + public void testBufferLimit2() throws InterruptedException { + + doTestBufferLimit(10, 4); } - /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - //@Test + @Test public void testNegativeBufferLimit() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -242,4 +236,160 @@ public void testNegativeBufferLimit() { influxDB.disableBatch(); }); } + + /** + * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. + * @throws InterruptedException + */ + @Test + public void testThreadFactory() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.threadFactory((r) -> { + return new Thread(r); + }); + + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); + + Thread.sleep(3000); + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } finally { + this.influxDB.disableBatch(); + this.influxDB.deleteDatabase(dbName); + } + + } + + /** + * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. + * @throws InterruptedException + */ + @Test + public void testHandlerOnRetryImpossible() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + InfluxDB spy = spy(influxDB); + doThrow(DatabaseNotFoundError.class).when(spy).write(any(BatchPoints.class)); + + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler); + + spy.createDatabase(dbName); + spy.setDatabase(dbName); + spy.enableBatch(options); + + writeSomePoints(spy, 1); + + Thread.sleep(1000); + verify(mockHandler, times(1)).accept(any(), any()); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + } finally { + spy.disableBatch(); + spy.deleteDatabase(dbName); + } + + } + + /** + * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. + * @throws InterruptedException + */ + @Test + public void testHandlerOnRetryPossible() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + InfluxDB spy = spy(influxDB); + final Map map = new HashMap<>(1); + map.put("firstCall", true); + doAnswer((invocation) -> { + if (map.get("firstCall")) { + map.put("firstCall", false); + throw new InfluxDBException("error"); + } else { + return invocation.callRealMethod(); + } + + }).when(spy).write(any(BatchPoints.class)); + + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler); + + spy.createDatabase(dbName); + spy.setDatabase(dbName); + spy.enableBatch(options); + + writeSomePoints(spy, 1); + + Thread.sleep(5000); + verify(mockHandler, never()).accept(any(), any()); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNotNull(result.getResults().get(0).getSeries()); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + + } finally { + spy.disableBatch(); + spy.deleteDatabase(dbName); + } + + } + + /** + * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. + * @throws InterruptedException + */ + @Test + public void testConsistency() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + try { + int n = 5; + for (ConsistencyLevel consistencyLevel : ConsistencyLevel.values()) { + BatchOptions options = BatchOptions.DEFAULTS.consistency(consistencyLevel); + + influxDB.enableBatch(options); + writeSomePoints(influxDB, n); + + Thread.sleep(2000); + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(n, result.getResults().get(0).getSeries().get(0).getValues().size()); + + n += 5; + this.influxDB.disableBatch(); + } + + } finally { + this.influxDB.deleteDatabase(dbName); + } + } + + private void writeSomePoints(InfluxDB influxDB, int firstIndex, int lastIndex) { + for (int i = firstIndex; i <= lastIndex; i++) { + Point point = Point.measurement("weather") + .time(i,TimeUnit.HOURS) + .addField("temperature", (double) i) + .addField("humidity", (double) (i) * 1.1) + .addField("uv_index", "moderate").build(); + influxDB.write(point); + } + } + + private void write20Points(InfluxDB influxDB) { + writeSomePoints(influxDB, 0, 19); + } + + private void writeSomePoints(InfluxDB influxDB, int n) { + writeSomePoints(influxDB, 0, n - 1); + } } From 7b92104c7950e94ba12e40761e0161a1bee84d6a Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Fri, 19 Jan 2018 14:15:12 +0700 Subject: [PATCH 082/148] fix test for flushDuration --- .../java/org/influxdb/BatchOptionsTest.java | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index d452459aa..ab63141e1 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -35,7 +35,7 @@ public void setUp() throws InterruptedException, IOException { /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ - @Test + //@Test public void testBatchEnabledWithDefaultSettings() { try { this.influxDB.enableBatch(); @@ -46,7 +46,7 @@ public void testBatchEnabledWithDefaultSettings() { } } - @Test + //@Test public void testParametersSet() { BatchOptions options = BatchOptions.DEFAULTS.actions(3); Assertions.assertEquals(3, options.getActions()); @@ -74,7 +74,7 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. */ - @Test + //@Test public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { @@ -109,7 +109,7 @@ public void testActionsSetting() throws InterruptedException { public void testFlushDuration() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.flushDuration(10000); + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(500); influxDB.createDatabase(dbName); influxDB.setDatabase(dbName); influxDB.enableBatch(options); @@ -119,7 +119,7 @@ public void testFlushDuration() throws InterruptedException { Assertions.assertNull(result.getResults().get(0).getSeries()); Assertions.assertNull(result.getResults().get(0).getError()); - Thread.sleep(10000); + Thread.sleep(1000); result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } @@ -133,7 +133,7 @@ public void testFlushDuration() throws InterruptedException { * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. * @throws InterruptedException */ - @Test + //@Test public void testJitterDuration() throws InterruptedException { @@ -164,7 +164,7 @@ public void testJitterDuration() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. */ - @Test + //@Test public void testNegativeJitterDuration() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -204,7 +204,7 @@ private void doTestBufferLimit(int bufferLimit, int actions) throws InterruptedE /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - @Test + //@Test public void testBufferLimit1() throws InterruptedException { doTestBufferLimit(3, 4); @@ -214,7 +214,7 @@ public void testBufferLimit1() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - @Test + //@Test public void testBufferLimit2() throws InterruptedException { doTestBufferLimit(10, 4); @@ -223,7 +223,7 @@ public void testBufferLimit2() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - @Test + //@Test public void testNegativeBufferLimit() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -241,7 +241,7 @@ public void testNegativeBufferLimit() { * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. * @throws InterruptedException */ - @Test + //@Test public void testThreadFactory() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -269,7 +269,7 @@ public void testThreadFactory() throws InterruptedException { * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException */ - @Test + //@Test public void testHandlerOnRetryImpossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -303,7 +303,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException */ - @Test + //@Test public void testHandlerOnRetryPossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -348,7 +348,7 @@ public void testHandlerOnRetryPossible() throws InterruptedException { * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. * @throws InterruptedException */ - @Test + //@Test public void testConsistency() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); influxDB.createDatabase(dbName); From 4c668651e89a06b81127a8b243930fc0044906b0 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Fri, 19 Jan 2018 14:20:54 +0700 Subject: [PATCH 083/148] uncomment Test annotation --- .../java/org/influxdb/BatchOptionsTest.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index ab63141e1..9b6ca94e4 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -35,7 +35,7 @@ public void setUp() throws InterruptedException, IOException { /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ - //@Test + @Test public void testBatchEnabledWithDefaultSettings() { try { this.influxDB.enableBatch(); @@ -46,7 +46,7 @@ public void testBatchEnabledWithDefaultSettings() { } } - //@Test + @Test public void testParametersSet() { BatchOptions options = BatchOptions.DEFAULTS.actions(3); Assertions.assertEquals(3, options.getActions()); @@ -74,7 +74,7 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. */ - //@Test + @Test public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { @@ -133,7 +133,7 @@ public void testFlushDuration() throws InterruptedException { * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. * @throws InterruptedException */ - //@Test + @Test public void testJitterDuration() throws InterruptedException { @@ -164,7 +164,7 @@ public void testJitterDuration() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. */ - //@Test + @Test public void testNegativeJitterDuration() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -204,7 +204,7 @@ private void doTestBufferLimit(int bufferLimit, int actions) throws InterruptedE /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - //@Test + @Test public void testBufferLimit1() throws InterruptedException { doTestBufferLimit(3, 4); @@ -214,7 +214,7 @@ public void testBufferLimit1() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - //@Test + @Test public void testBufferLimit2() throws InterruptedException { doTestBufferLimit(10, 4); @@ -223,7 +223,7 @@ public void testBufferLimit2() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. */ - //@Test + @Test public void testNegativeBufferLimit() { Assertions.assertThrows(IllegalArgumentException.class, () -> { @@ -241,7 +241,7 @@ public void testNegativeBufferLimit() { * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. * @throws InterruptedException */ - //@Test + @Test public void testThreadFactory() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -269,7 +269,7 @@ public void testThreadFactory() throws InterruptedException { * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException */ - //@Test + @Test public void testHandlerOnRetryImpossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -303,7 +303,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException */ - //@Test + @Test public void testHandlerOnRetryPossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); @@ -348,7 +348,7 @@ public void testHandlerOnRetryPossible() throws InterruptedException { * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. * @throws InterruptedException */ - //@Test + @Test public void testConsistency() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); influxDB.createDatabase(dbName); From b7a2306c3a569c6ea84416d3a4c5d2ab1b0bb117 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Fri, 19 Jan 2018 23:12:50 +0700 Subject: [PATCH 084/148] improve test --- .../java/org/influxdb/BatchOptionsTest.java | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 9b6ca94e4..86f720bce 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -11,11 +11,12 @@ import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + import static org.mockito.Mockito.*; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -308,16 +309,17 @@ public void testHandlerOnRetryPossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); InfluxDB spy = spy(influxDB); - final Map map = new HashMap<>(1); - map.put("firstCall", true); - doAnswer((invocation) -> { - if (map.get("firstCall")) { - map.put("firstCall", false); - throw new InfluxDBException("error"); - } else { - return invocation.callRealMethod(); + doAnswer(new Answer() { + boolean firstCall = true; + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + if (firstCall) { + firstCall = false; + throw new InfluxDBException("error"); + } else { + return invocation.callRealMethod(); + } } - }).when(spy).write(any(BatchPoints.class)); try { @@ -333,6 +335,8 @@ public void testHandlerOnRetryPossible() throws InterruptedException { Thread.sleep(5000); verify(mockHandler, never()).accept(any(), any()); + verify(spy, times(2)).write(any(BatchPoints.class)); + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNotNull(result.getResults().get(0).getSeries()); Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); From ded8b48a164e7e4a5c496d73766e0543fef34f96 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Fri, 19 Jan 2018 23:34:36 +0700 Subject: [PATCH 085/148] fix failures of testNegativeJitterDuration, testNegativeBufferLimit, testHandlerOnRetryImpossible --- src/main/java/org/influxdb/impl/BatchProcessor.java | 2 ++ src/main/java/org/influxdb/impl/Preconditions.java | 11 +++++++++++ .../org/influxdb/impl/RetryCapableBatchWriter.java | 2 ++ 3 files changed, 15 insertions(+) diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index e1d6a5073..d72449287 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -170,6 +170,8 @@ public BatchProcessor build() { Objects.requireNonNull(this.influxDB, "influxDB"); Preconditions.checkPositiveNumber(this.actions, "actions"); Preconditions.checkPositiveNumber(this.flushInterval, "flushInterval"); + Preconditions.checkNotNegativeNumber(jitterInterval, "jitterInterval"); + Preconditions.checkNotNegativeNumber(bufferLimit, "bufferLimit"); Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit"); Objects.requireNonNull(this.threadFactory, "threadFactory"); Objects.requireNonNull(this.exceptionHandler, "exceptionHandler"); diff --git a/src/main/java/org/influxdb/impl/Preconditions.java b/src/main/java/org/influxdb/impl/Preconditions.java index 4a3297db6..e636ce1ce 100644 --- a/src/main/java/org/influxdb/impl/Preconditions.java +++ b/src/main/java/org/influxdb/impl/Preconditions.java @@ -36,6 +36,17 @@ public static void checkPositiveNumber(final Number number, final String name) t } } + /** + * Enforces that the number is not negative. + * @param number the number to test + * @param name variable name for reporting + * @throws IllegalArgumentException if the number is less or equal to 0 + */ + public static void checkNotNegativeNumber(final Number number, final String name) throws IllegalArgumentException { + if (number == null || number.doubleValue() < 0) { + throw new IllegalArgumentException("Expecting a positive or zero number for " + name); + } + } /** * Enforces that the duration is a valid influxDB duration. * @param duration the duration to test diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index 6871dfc54..001a02ef1 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -86,6 +86,8 @@ public void write(final BatchPoints batchPoints) { WriteResult result = tryToWrite(batchPoints); if (result.outcome == WriteResultOutcome.FAILED_RETRY_POSSIBLE) { addToBatchQueue(batchPoints); + } else { + exceptionHandler.accept(batchPoints.getPoints(), result.throwable); } } From 15ae56c0ac3b27d1ae19b44a2a6d53d41a63d5ce Mon Sep 17 00:00:00 2001 From: dubsky Date: Mon, 22 Jan 2018 21:30:37 +0100 Subject: [PATCH 086/148] fixing automatic BatchWriter retry --- .../org/influxdb/impl/BatchProcessor.java | 7 +-- .../java/org/influxdb/impl/BatchWriter.java | 6 ++- .../org/influxdb/impl/OneShotBatchWriter.java | 8 +++- .../impl/RetryCapableBatchWriter.java | 46 +++++++++++++------ .../impl/RetryCapableBatchWriterTest.java | 15 +++--- 5 files changed, 54 insertions(+), 28 deletions(-) diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index d72449287..3f2d7b4b1 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -6,6 +6,7 @@ import org.influxdb.dto.Point; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -281,6 +282,7 @@ void write() { List currentBatch = null; try { if (this.queue.isEmpty()) { + BatchProcessor.this.batchWriter.write(Collections.emptyList()); return; } //for batch on HTTP. @@ -316,9 +318,8 @@ void write() { } } - for (BatchPoints batchPoints : batchKeyToBatchPoints.values()) { - BatchProcessor.this.batchWriter.write(batchPoints); - } + BatchProcessor.this.batchWriter.write(batchKeyToBatchPoints.values()); + for (Entry> entry : udpPortToBatchPoints.entrySet()) { for (String lineprotocolStr : entry.getValue()) { BatchProcessor.this.influxDB.write(entry.getKey(), lineprotocolStr); diff --git a/src/main/java/org/influxdb/impl/BatchWriter.java b/src/main/java/org/influxdb/impl/BatchWriter.java index e2a169143..4763010f9 100644 --- a/src/main/java/org/influxdb/impl/BatchWriter.java +++ b/src/main/java/org/influxdb/impl/BatchWriter.java @@ -2,14 +2,16 @@ import org.influxdb.dto.BatchPoints; +import java.util.Collection; + /** * Write individual batches to InfluxDB. */ interface BatchWriter { /** * Write the given batch into InfluxDB. - * @param batchPoints to write + * @param batchPointsCollection to write */ - void write(BatchPoints batchPoints); + void write(Collection batchPointsCollection); } diff --git a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java index c6594f6f5..e981fe627 100644 --- a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java +++ b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java @@ -3,6 +3,8 @@ import org.influxdb.InfluxDB; import org.influxdb.dto.BatchPoints; +import java.util.Collection; + /** * Batch writer that tries to write BatchPoints exactly once. */ @@ -15,7 +17,9 @@ class OneShotBatchWriter implements BatchWriter { } @Override - public void write(final BatchPoints batchPoints) { - influxDB.write(batchPoints); + public void write(final Collection batchPointsCollection) { + for (BatchPoints batchPoints : batchPointsCollection) { + influxDB.write(batchPoints); + } } } diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index 001a02ef1..db4e66d4c 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -5,6 +5,8 @@ import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; +import java.util.Collection; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; @@ -34,23 +36,23 @@ class RetryCapableBatchWriter implements BatchWriter { private enum WriteResultOutcome { WRITTEN, FAILED_RETRY_POSSIBLE, FAILED_RETRY_IMPOSSIBLE } - private static class WriteResult { + private static final class WriteResult { static final WriteResult WRITTEN = new WriteResult(WriteResultOutcome.WRITTEN); WriteResultOutcome outcome; Throwable throwable; - public WriteResult(final WriteResultOutcome outcome) { + private WriteResult(final WriteResultOutcome outcome) { this.outcome = outcome; } - public WriteResult(final WriteResultOutcome outcome, final Throwable throwable) { + private WriteResult(final WriteResultOutcome outcome, final Throwable throwable) { this.outcome = outcome; this.throwable = throwable; } - public WriteResult(final InfluxDBException e) { + private WriteResult(final InfluxDBException e) { this.throwable = e; if (e.isRetryWorth()) { this.outcome = WriteResultOutcome.FAILED_RETRY_POSSIBLE; @@ -61,15 +63,15 @@ public WriteResult(final InfluxDBException e) { } @Override - public void write(final BatchPoints batchPoints) { + public void write(final Collection collection) { // empty the cached data first - ListIterator iterator = batchQueue.listIterator(); - while (iterator.hasNext()) { - BatchPoints entry = iterator.next(); + ListIterator batchQueueIterator = batchQueue.listIterator(); + while (batchQueueIterator.hasNext()) { + BatchPoints entry = batchQueueIterator.next(); WriteResult result = tryToWrite(entry); if (result.outcome == WriteResultOutcome.WRITTEN || result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { - iterator.remove(); + batchQueueIterator.remove(); usedRetryBufferCapacity -= entry.getPoints().size(); // we are throwing out data, notify the client if (result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { @@ -78,16 +80,30 @@ public void write(final BatchPoints batchPoints) { } else { // we cannot send more data otherwise we would write them in different // order than in which were submitted - addToBatchQueue(batchPoints); + for (BatchPoints batchPoints : collection) { + addToBatchQueue(batchPoints); + } return; } } // write the last given batch last so that duplicate data points get overwritten correctly - WriteResult result = tryToWrite(batchPoints); - if (result.outcome == WriteResultOutcome.FAILED_RETRY_POSSIBLE) { - addToBatchQueue(batchPoints); - } else { - exceptionHandler.accept(batchPoints.getPoints(), result.throwable); + Iterator collectionIterator = collection.iterator(); + while (collectionIterator.hasNext()) { + BatchPoints batchPoints = collectionIterator.next(); + WriteResult result = tryToWrite(batchPoints); + switch (result.outcome) { + case FAILED_RETRY_POSSIBLE: + addToBatchQueue(batchPoints); + while (collectionIterator.hasNext()) { + addToBatchQueue(collectionIterator.next()); + } + break; + case FAILED_RETRY_IMPOSSIBLE: + exceptionHandler.accept(batchPoints.getPoints(), result.throwable); + break; + default: + + } } } diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java index 9ebf638a3..4e635556e 100644 --- a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -11,6 +11,8 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; @@ -47,13 +49,13 @@ public void test() { Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp2); Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp3); // first one will fail with non-recoverable error - rw.write(bp0); + rw.write(Collections.singletonList(bp0)); // second one will fail with recoverable error - rw.write(bp1); + rw.write(Collections.singletonList(bp1)); // will fail with recoverable error again, will remove data due to buffer limit - rw.write(bp2); + rw.write(Collections.singletonList(bp2)); // will write fail with recoverable error - rw.write(bp3); + rw.write(Collections.singletonList(bp3)); ArgumentCaptor captor = ArgumentCaptor.forClass(BatchPoints.class); verify(mockInfluxDB, times(4)).write(captor.capture()); @@ -67,11 +69,12 @@ public void test() { Assert.assertEquals(capturedArgument1.get(2).getPoints().size(), 90); Assert.assertEquals(capturedArgument1.get(3).getPoints().size(), 98); - verify(errorHandler, times(1)).accept(any(),any()); + // error handler called twice; once for first unrecoverable write, se + verify(errorHandler, times(2)).accept(any(),any()); // will write data that previously were not sent, will send additional data Mockito.reset(mockInfluxDB); - rw.write(bp4); + rw.write(Collections.singletonList(bp4)); ArgumentCaptor captor2 = ArgumentCaptor.forClass(BatchPoints.class); verify(mockInfluxDB, times(2)).write(captor2.capture()); From 2cd63ade9b5be0a5b89be3c3b50f4a4cbab1ffc9 Mon Sep 17 00:00:00 2001 From: dubsky Date: Fri, 12 Jan 2018 17:00:23 +0100 Subject: [PATCH 087/148] merging changes from batch-iterval-jittering branch --- src/main/java/org/influxdb/BatchOptions.java | 111 ++++++++++++++--- .../org/influxdb/impl/BatchOptionsImpl.java | 112 ------------------ 2 files changed, 92 insertions(+), 131 deletions(-) delete mode 100644 src/main/java/org/influxdb/impl/BatchOptionsImpl.java diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index 7c611ea82..90c1add32 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -1,8 +1,8 @@ package org.influxdb; import org.influxdb.dto.Point; -import org.influxdb.impl.BatchOptionsImpl; +import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.function.BiConsumer; @@ -10,21 +10,53 @@ * BatchOptions are used to configure batching of individual data point writes * into InfluxDB. See {@link InfluxDB#enableBatch(BatchOptions)} */ -public interface BatchOptions { +public final class BatchOptions implements Cloneable { - BatchOptions DEFAULTS = BatchOptionsImpl.DEFAULTS; + /** + * Default batch options. This class is immutable, each configuration + * is built by taking the DEFAULTS and setting specific configuration + * properties. + */ + public static final BatchOptions DEFAULTS = new BatchOptions(); + + // default values here are consistent with Telegraf + public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; + public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; + public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + public static final int DEFAULT_BUFFER_LIMIT = 10000; + + private int actions = DEFAULT_BATCH_ACTIONS_LIMIT; + private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; + private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + private int bufferLimit = DEFAULT_BUFFER_LIMIT; + + private ThreadFactory threadFactory = Executors.defaultThreadFactory(); + BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { + }; + private InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; + + private BatchOptions() { + } /** * @param actions the number of actions to collect * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions actions(final int actions); + public BatchOptions actions(final int actions) { + BatchOptions clone = getClone(); + clone.actions = actions; + return clone; + } /** * @param flushDuration the time to wait at most (milliseconds). * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions flushDuration(final int flushDuration); + public BatchOptions flushDuration(final int flushDuration) { + BatchOptions clone = getClone(); + clone.flushDuration = flushDuration; + return clone; + } /** * Jitters the batch flush interval by a random amount. This is primarily to avoid @@ -34,7 +66,11 @@ public interface BatchOptions { * @param jitterDuration (milliseconds) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions jitterDuration(final int jitterDuration); + public BatchOptions jitterDuration(final int jitterDuration) { + BatchOptions clone = getClone(); + clone.jitterDuration = jitterDuration; + return clone; + } /** * The client maintains a buffer for failed writes so that the writes will be retried later on. This may @@ -46,62 +82,99 @@ public interface BatchOptions { * @param bufferLimit maximum number of points stored in the retry buffer * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions bufferLimit(final int bufferLimit); + public BatchOptions bufferLimit(final int bufferLimit) { + BatchOptions clone = getClone(); + clone.bufferLimit = bufferLimit; + return clone; + } /** * @param threadFactory a ThreadFactory instance to be used * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions threadFactory(final ThreadFactory threadFactory); + public BatchOptions threadFactory(final ThreadFactory threadFactory) { + BatchOptions clone = getClone(); + clone.threadFactory = threadFactory; + return clone; + } /** * @param exceptionHandler a consumer function to handle asynchronous errors * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler); + public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { + BatchOptions clone = getClone(); + clone.exceptionHandler = exceptionHandler; + return clone; + } /** * @param consistency cluster consistency setting (how many nodes have to store data points - * to treat a write as a success) + * to treat a write as a success) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency); - + public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) { + BatchOptions clone = getClone(); + clone.consistency = consistency; + return clone; + } /** * @return actions the number of actions to collect */ - int getActions(); + public int getActions() { + return actions; + } /** * @return flushDuration the time to wait at most (milliseconds). */ - int getFlushDuration(); + public int getFlushDuration() { + return flushDuration; + } /** * @return batch flush interval jitter value (milliseconds) */ - int getJitterDuration(); + public int getJitterDuration() { + return jitterDuration; + } /** * @return Maximum number of points stored in the retry buffer, see {@link BatchOptions#bufferLimit(int)} */ - int getBufferLimit(); + public int getBufferLimit() { + return bufferLimit; + } /** * @return a ThreadFactory instance to be used */ - ThreadFactory getThreadFactory(); + public ThreadFactory getThreadFactory() { + return threadFactory; + } /** * @return a consumer function to handle asynchronous errors */ - BiConsumer, Throwable> getExceptionHandler(); + public BiConsumer, Throwable> getExceptionHandler() { + return exceptionHandler; + } /** * @return cluster consistency setting (how many nodes have to store data points * to treat a write as a success) */ - InfluxDB.ConsistencyLevel getConsistency(); + public InfluxDB.ConsistencyLevel getConsistency() { + return consistency; + } + + private BatchOptions getClone() { + try { + return (BatchOptions) this.clone(); + } catch (CloneNotSupportedException e) { + throw new RuntimeException(e); + } + } } diff --git a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java b/src/main/java/org/influxdb/impl/BatchOptionsImpl.java deleted file mode 100644 index 21cc2e757..000000000 --- a/src/main/java/org/influxdb/impl/BatchOptionsImpl.java +++ /dev/null @@ -1,112 +0,0 @@ -package org.influxdb.impl; - -import org.influxdb.BatchOptions; -import org.influxdb.InfluxDB; -import org.influxdb.dto.Point; - -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.function.BiConsumer; - -public final class BatchOptionsImpl implements BatchOptions, Cloneable { - - public static final BatchOptions DEFAULTS = new BatchOptionsImpl(); - - // default values here are consistent with Telegraf - public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; - public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; - public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; - public static final int DEFAULT_BUFFER_LIMIT = 10000; - - private int actions = DEFAULT_BATCH_ACTIONS_LIMIT; - private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; - private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; - private int bufferLimit = DEFAULT_BUFFER_LIMIT; - - ThreadFactory threadFactory = Executors.defaultThreadFactory(); - BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { - }; - InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; - - private BatchOptionsImpl() { - } - - public BatchOptions actions(final int actions) { - BatchOptionsImpl clone = getClone(); - clone.actions = actions; - return clone; - } - - public BatchOptions flushDuration(final int flushDuration) { - BatchOptionsImpl clone = getClone(); - clone.flushDuration = flushDuration; - return clone; - } - - public BatchOptions jitterDuration(final int jitterDuration) { - BatchOptionsImpl clone = getClone(); - clone.jitterDuration = jitterDuration; - return clone; - } - - public BatchOptions bufferLimit(final int bufferLimit) { - BatchOptionsImpl clone = getClone(); - clone.bufferLimit = bufferLimit; - return clone; - } - - public BatchOptions threadFactory(final ThreadFactory threadFactory) { - BatchOptionsImpl clone = getClone(); - clone.threadFactory = threadFactory; - return clone; - } - - public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { - BatchOptionsImpl clone = getClone(); - clone.exceptionHandler = exceptionHandler; - return clone; - } - - public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) { - BatchOptionsImpl clone = getClone(); - clone.consistency = consistency; - return clone; - } - - private BatchOptionsImpl getClone() { - try { - return (BatchOptionsImpl) this.clone(); - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); - } - } - - public int getActions() { - return actions; - } - - public int getFlushDuration() { - return flushDuration; - } - - public int getJitterDuration() { - return jitterDuration; - } - - public InfluxDB.ConsistencyLevel getConsistency() { - return consistency; - } - - public ThreadFactory getThreadFactory() { - return threadFactory; - } - - public BiConsumer, Throwable> getExceptionHandler() { - return exceptionHandler; - } - - @Override - public int getBufferLimit() { - return bufferLimit; - } -} From 4d25472c250a75912e0bcf8408227db0723223f1 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Tue, 23 Jan 2018 14:48:02 +0700 Subject: [PATCH 088/148] fix testHandlerOnRetryImpossible - increase waiting time --- src/test/java/org/influxdb/BatchOptionsTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 86f720bce..27aad3cf6 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -287,7 +287,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { writeSomePoints(spy, 1); - Thread.sleep(1000); + Thread.sleep(3000); verify(mockHandler, times(1)).accept(any(), any()); QueryResult result = influxDB.query(new Query("select * from weather", dbName)); From 03ebb44b5a68ddd1444c62556cb7cd1c767d734c Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 23 Jan 2018 10:12:53 +0100 Subject: [PATCH 089/148] code review comments inside --- .../java/org/influxdb/BatchOptionsTest.java | 31 ++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 27aad3cf6..84c33e66a 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -74,6 +74,8 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. + * TODO: try to make this run faster, set lower flush duration (eg. 100) + * TODO: you have to test that the points are not in the DB before flush duration as well */ @Test public void testActionsSetting() throws InterruptedException { @@ -104,7 +106,8 @@ public void testActionsSetting() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#flushDuration(int)} }. - * @throws InterruptedException + * @throws InterruptedException + * TODO: Remove this completely it doesn't test anything new compared to testActionSetting */ @Test public void testFlushDuration() throws InterruptedException { @@ -132,7 +135,9 @@ public void testFlushDuration() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. - * @throws InterruptedException + * @throws InterruptedException + * TODO: Make this run faster, set flush duration to 100. Set jitter interval to 500. + * TODO: Test that after 100ms the points are not in the DB yet. */ @Test public void testJitterDuration() throws InterruptedException { @@ -171,7 +176,8 @@ public void testNegativeJitterDuration() { Assertions.assertThrows(IllegalArgumentException.class, () -> { BatchOptions options = BatchOptions.DEFAULTS.jitterDuration(-10); influxDB.enableBatch(options); - + // TODO: the lines below has no function, remove it + // TODO: you should use dedicated influxDB object so that other tests are not influenced influxDB.disableBatch(); options = BatchOptions.DEFAULTS.jitterDuration(0); influxDB.enableBatch(); @@ -204,6 +210,8 @@ private void doTestBufferLimit(int bufferLimit, int actions) throws InterruptedE /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + * TODO: Name the test functions as it is clear what is it testing + * TODO: Remove this test until the test scenario is clear */ @Test public void testBufferLimit1() throws InterruptedException { @@ -214,6 +222,8 @@ public void testBufferLimit1() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + * TODO: Name the test functions as it is clear what is it testing + * TODO: Remove this test until the test scenario is clear */ @Test public void testBufferLimit2() throws InterruptedException { @@ -230,7 +240,8 @@ public void testNegativeBufferLimit() { Assertions.assertThrows(IllegalArgumentException.class, () -> { BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(-10); influxDB.enableBatch(options); - + // TODO: the lines below has no function, remove it + // TODO: you should use dedicated influxDB object so that other tests are not influenced influxDB.disableBatch(); options = BatchOptions.DEFAULTS.bufferLimit(0); influxDB.enableBatch(); @@ -240,7 +251,9 @@ public void testNegativeBufferLimit() { /** * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. - * @throws InterruptedException + * @throws InterruptedException + * TODO: there is no assertion testing that the thread factory is used + * TODO: make this test run faster eg. 100ms */ @Test public void testThreadFactory() throws InterruptedException { @@ -268,7 +281,8 @@ public void testThreadFactory() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. - * @throws InterruptedException + * @throws InterruptedException + * TODO: make this test run faster eg. 100ms */ @Test public void testHandlerOnRetryImpossible() throws InterruptedException { @@ -302,7 +316,8 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. - * @throws InterruptedException + * @throws InterruptedException + * TODO: make this test run faster eg. 100ms */ @Test public void testHandlerOnRetryPossible() throws InterruptedException { @@ -351,6 +366,8 @@ public Object answer(InvocationOnMock invocation) throws Throwable { /** * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. * @throws InterruptedException + * TODO: there is no assertion testing that the consistency value set is propagated to InfluxDB + * TODO: make this test run faster eg. 100ms */ @Test public void testConsistency() throws InterruptedException { From 2e144604f992f40571bf7a8a239c09c46bb1468b Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 24 Jan 2018 13:03:06 +0100 Subject: [PATCH 090/148] Documentation for enableBatch(BatchOptions). Changed the documentation flow so that batching is the default/recommended setup. --- README.md | 139 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 83 insertions(+), 56 deletions(-) diff --git a/README.md b/README.md index df5069b83..40688258f 100644 --- a/README.md +++ b/README.md @@ -12,48 +12,68 @@ To connect to InfluxDB 0.8.x you need to use influxdb-java version 1.6. This implementation is meant as a Java rewrite of the influxdb-go package. All low level REST Api calls are available. -## Usages +## Usage -### Basic Usages: +### Basic Usage: + +This is a recommended approach to write data points into InfluxDB. The influxdb-java +client is storing your writes into an internal buffer and flushes them asynchronously +to InfluxDB at a fixed flush interval to achieve good performance on both client and +server side. This requires influxdb-java v2.7 or newer. + +If you want to write data points immediately into InfluxDB and synchronously process +resulting errors see [this section.](#synchronous-writes) ```java InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); String dbName = "aTimeSeries"; influxDB.createDatabase(dbName); +influxDB.setDatabase(dbName); String rpName = "aRetentionPolicy"; influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); +influxDB.setRetentionPolicy(rpName); + +influxDB.enableBatch(BatchOptions.DEFAULTS); + +influxDB.write(Point.measurement("cpu") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("idle", 90L) + .addField("user", 9L) + .addField("system", 1L) + .build()); + +influxDB.write(Point.measurement("disk") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("used", 80L) + .addField("free", 1L) + .build()); -BatchPoints batchPoints = BatchPoints - .database(dbName) - .tag("async", "true") - .retentionPolicy(rpName) - .consistency(ConsistencyLevel.ALL) - .build(); -Point point1 = Point.measurement("cpu") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("idle", 90L) - .addField("user", 9L) - .addField("system", 1L) - .build(); -Point point2 = Point.measurement("disk") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("used", 80L) - .addField("free", 1L) - .build(); -batchPoints.point(point1); -batchPoints.point(point2); -influxDB.write(batchPoints); Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); +influxDB.close(); ``` + + +Any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level. +If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method: + +```java +influxDB.enableBatch(BatchOptions.DEFAULTS.exceptionHandler( + (failedPoints, throwable) -> { /* custom error handling here */ }) +); +``` + Note: +* Batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` +* `InfluxDB.enableBatch(BatchOptions)` is available since version 2.9. Prior versions use `InfluxDB.enableBatch(actions, flushInterval, timeUnit)` or similar based on the configuration parameters you want to set. * APIs to create and drop retention policies are supported only in versions > 2.7 * If you are using influxdb < 2.8, you should use retention policy: 'autogen' * If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen' -If your application produces only single Points, you can enable the batching functionality of influxdb-java: + +If your points are written into different databases and retention policies, the more complex InfluxDB.write() methods can be used: ```java InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); @@ -63,7 +83,7 @@ String rpName = "aRetentionPolicy"; influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); // Flush every 2000 Points, at least every 100ms -influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); +influxDB.enableBatch(BatchOptions.DEFAULTS.actions(2000).flushDuration(100)); Point point1 = Point.measurement("cpu") .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) @@ -83,53 +103,47 @@ Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); +influxDB.close(); ``` -Note that the batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` -If all of your points are written to the same database and retention policy, the simpler write() methods can be used. -This requires influxdb-java v2.7 or newer. +#### Synchronous writes + +If you want to write the data points immediately to InfluxDB (and handle the errors as well) without any delays see the following example: ```java InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); String dbName = "aTimeSeries"; influxDB.createDatabase(dbName); -influxDB.setDatabase(dbName); String rpName = "aRetentionPolicy"; influxDB.createRetentionPolicy(rpName, dbName, "30d", "30m", 2, true); -influxDB.setRetentionPolicy(rpName); - -// Flush every 2000 Points, at least every 100ms -influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); - -influxDB.write(Point.measurement("cpu") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("idle", 90L) - .addField("user", 9L) - .addField("system", 1L) - .build()); - -influxDB.write(Point.measurement("disk") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("used", 80L) - .addField("free", 1L) - .build()); +BatchPoints batchPoints = BatchPoints + .database(dbName) + .tag("async", "true") + .retentionPolicy(rpName) + .consistency(ConsistencyLevel.ALL) + .build(); +Point point1 = Point.measurement("cpu") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("idle", 90L) + .addField("user", 9L) + .addField("system", 1L) + .build(); +Point point2 = Point.measurement("disk") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("used", 80L) + .addField("free", 1L) + .build(); +batchPoints.point(point1); +batchPoints.point(point2); +influxDB.write(batchPoints); Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query); influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); ``` -Also note that any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level. - -If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method: - -```java -// Flush every 2000 Points, at least every 100ms -influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS, Executors.defaultThreadFactory(), (failedPoints, throwable) -> { /* custom error handling here */ }); -``` - -### Advanced Usages: +### Advanced Usage: #### Gzip's support (version 2.5+ required): @@ -157,7 +171,6 @@ Query query = new Query("SELECT idle FROM cpu", dbName); influxDB.query(query, 20, queryResult -> System.out.println(queryResult)); ``` - #### QueryResult mapper to POJO (version 2.7+ required): An alternative way to handle the QueryResult object is now available. @@ -240,6 +253,20 @@ this.influxDB.query(new Query("SELECT idle FROM cpu", dbName), queryResult -> { }); ``` +#### Batch flush interval jittering (version 2.9+ required) + +When using large number of influxdb-java clients against a single server it may happen that all the clients +will submit their buffered points at the same time and possibly overloading the server. This is usually happening +when all the clients are started at once - for instance as members of cloud hosted large cluster networks. +If all the clients have the same flushDuration set this situation will repeat periodically. + +To solve this situation the influxdb-java offers an option to offset the flushDuration by a random interval so that +the clients will flush their buffers in different intervals: + +```java +influxDB.enableBatch(BatchOptions.DEFAULTS.jitterDuration(500); +``` + ### Other Usages: For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") From 5b6d2465d3a947e0f7cbc87ea5b07d14893685f7 Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 30 Jan 2018 16:45:21 +0100 Subject: [PATCH 091/148] renaming BatchOptions.setConsistency to BatchOptions.consistency --- src/main/java/org/influxdb/BatchOptions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java index e9e8c87fc..316dccd00 100644 --- a/src/main/java/org/influxdb/BatchOptions.java +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -95,7 +95,7 @@ public BatchOptions exceptionHandler(final BiConsumer, Throwable * to treat a write as a success) * @return the BatchOptions instance to be able to use it in a fluent manner. */ - public BatchOptions setConsistency(final InfluxDB.ConsistencyLevel consistency) { + public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) { BatchOptions clone = getClone(); clone.consistency = consistency; return clone; From 028fafff0fd331deef7a6144df0b7a5d9ee05311 Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 30 Jan 2018 16:46:23 +0100 Subject: [PATCH 092/148] Adding test for parameters setting --- .../java/org/influxdb/BatchOptionsTest.java | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index f58a685af..7abd67532 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -10,8 +10,10 @@ import org.junit.runner.RunWith; import java.io.IOException; +import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; @RunWith(JUnitPlatform.class) public class BatchOptionsTest { @@ -23,6 +25,29 @@ public void setUp() throws InterruptedException, IOException { this.influxDB = TestUtils.connectToInfluxDB(); } + @Test + public void testParametersSet() { + BatchOptions options = BatchOptions.DEFAULTS.actions(3); + Assertions.assertEquals(3, options.getActions()); + options=options.consistency(InfluxDB.ConsistencyLevel.ANY); + Assertions.assertEquals(InfluxDB.ConsistencyLevel.ANY, options.getConsistency()); + options=options.flushDuration(1001); + Assertions.assertEquals(1001, options.getFlushDuration()); + options=options.jitterDuration(104); + Assertions.assertEquals(104, options.getJitterDuration()); + BiConsumer, Throwable> handler=new BiConsumer, Throwable>() { + @Override + public void accept(Iterable points, Throwable throwable) { + + } + }; + options=options.exceptionHandler(handler); + Assertions.assertEquals(handler, options.getExceptionHandler()); + ThreadFactory tf= Executors.defaultThreadFactory(); + options=options.threadFactory(tf); + Assertions.assertEquals(tf, options.getThreadFactory()); + } + /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ From 7478124167b5676104aa647e3781f82e35711dc4 Mon Sep 17 00:00:00 2001 From: dubsky Date: Tue, 30 Jan 2018 17:42:27 +0100 Subject: [PATCH 093/148] prevent lost writes when client closes before flush interval --- src/main/java/org/influxdb/impl/BatchProcessor.java | 1 + src/main/java/org/influxdb/impl/BatchWriter.java | 5 +++++ .../java/org/influxdb/impl/OneShotBatchWriter.java | 5 +++++ .../org/influxdb/impl/RetryCapableBatchWriter.java | 11 +++++++++++ 4 files changed, 22 insertions(+) diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index 3f2d7b4b1..b0f153c98 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -362,6 +362,7 @@ public void run() { void flushAndShutdown() { this.write(); this.scheduler.shutdown(); + this.batchWriter.close(); } /** diff --git a/src/main/java/org/influxdb/impl/BatchWriter.java b/src/main/java/org/influxdb/impl/BatchWriter.java index 4763010f9..2a71ebddd 100644 --- a/src/main/java/org/influxdb/impl/BatchWriter.java +++ b/src/main/java/org/influxdb/impl/BatchWriter.java @@ -13,5 +13,10 @@ interface BatchWriter { * @param batchPointsCollection to write */ void write(Collection batchPointsCollection); + + /** + * FLush all cached writes into InfluxDB. The application is about to exit. + */ + void close(); } diff --git a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java index e981fe627..96754f144 100644 --- a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java +++ b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java @@ -22,4 +22,9 @@ public void write(final Collection batchPointsCollection) { influxDB.write(batchPoints); } } + + @Override + public void close() { + + } } diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index db4e66d4c..e5ec0fe42 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -107,6 +107,17 @@ public void write(final Collection collection) { } } + @Override + public void close() { + // try to write everything queued / buffered + for (BatchPoints points : batchQueue) { + WriteResult result = tryToWrite(points); + if (result.outcome != WriteResultOutcome.WRITTEN) { + exceptionHandler.accept(points.getPoints(), result.throwable); + } + } + } + private WriteResult tryToWrite(final BatchPoints batchPoints) { try { influxDB.write(batchPoints); From 042b3370de691e3575c8dea81b962df667fa023e Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 31 Jan 2018 12:44:17 +0100 Subject: [PATCH 094/148] removing dependency on org.json, using moshi, fixing synchronization issue with RetryCapableBatchWriter, added exception for authorization problems --- pom.xml | 5 -- .../java/org/influxdb/InfluxDBException.java | 73 +++++++++++++++---- .../java/org/influxdb/impl/InfluxDBImpl.java | 15 +--- .../impl/RetryCapableBatchWriter.java | 8 +- .../impl/RetryCapableBatchWriterTest.java | 4 +- 5 files changed, 70 insertions(+), 35 deletions(-) diff --git a/pom.xml b/pom.xml index 6510af401..bd92d18d7 100644 --- a/pom.xml +++ b/pom.xml @@ -261,10 +261,5 @@ logging-interceptor 3.9.1 - - org.json - json - 20171018 - diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java index 5444bb354..2db463d04 100644 --- a/src/main/java/org/influxdb/InfluxDBException.java +++ b/src/main/java/org/influxdb/InfluxDBException.java @@ -1,5 +1,8 @@ package org.influxdb; +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.Moshi; + /** * A wrapper for various exceptions caused while interacting with InfluxDB. * @@ -26,12 +29,22 @@ public boolean isRetryWorth() { return true; } + /* See https://github.com/influxdata/influxdb/blob/master/tsdb/shard.go */ static final String FIELD_TYPE_CONFLICT_ERROR = "field type conflict"; + /* See https://github.com/influxdata/influxdb/blob/master/coordinator/points_writer.go */ static final String POINTS_BEYOND_RETENTION_POLICY_ERROR = "points beyond retention policy"; + /* See https://github.com/influxdata/influxdb/blob/master/models/points.go */ static final String UNABLE_TO_PARSE_ERROR = "unable to parse"; + /* See https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb/influxdb.go */ static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR = "hinted handoff queue not empty"; - static final String DATABASE_NOT_FOUND_ERROR = "database not found"; + /* See https://github.com/influxdata/influxdb/blob/master/tsdb/engine/tsm1/cache.go */ static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR = "cache-max-memory-size exceeded"; + /* For all messages below see https://github.com/influxdata/influxdb/blob/master/services/httpd/handler.go */ + static final String DATABASE_NOT_FOUND_ERROR = "database not found"; + static final String USER_REQUIRED_ERROR = "user is required to write to database"; + static final String USER_NOT_AUTHORIZED_ERROR = "user is not authorized to write to database"; + static final String AUTHORIZATION_FAILED_ERROR = "authorization failed"; + static final String USERNAME_REQUIRED_ERROR = "username required"; public static final class DatabaseNotFoundError extends InfluxDBException { private DatabaseNotFoundError(final String message) { @@ -103,26 +116,56 @@ public boolean isRetryWorth() { } } - public static InfluxDBException buildExceptionForErrorState(final String error) { - if (error.contains(DATABASE_NOT_FOUND_ERROR)) { - return new DatabaseNotFoundError(error); + public static final class AuthorizationFailedException extends InfluxDBException { + public AuthorizationFailedException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + private static InfluxDBException buildExceptionFromErrorMessage(final String errorMessage) { + if (errorMessage.contains(DATABASE_NOT_FOUND_ERROR)) { + return new DatabaseNotFoundError(errorMessage); + } + if (errorMessage.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) { + return new PointsBeyondRetentionPolicyException(errorMessage); } - if (error.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) { - return new PointsBeyondRetentionPolicyException(error); + if (errorMessage.contains(FIELD_TYPE_CONFLICT_ERROR)) { + return new FieldTypeConflictException(errorMessage); } - if (error.contains(FIELD_TYPE_CONFLICT_ERROR)) { - return new FieldTypeConflictException(error); + if (errorMessage.contains(UNABLE_TO_PARSE_ERROR)) { + return new UnableToParseException(errorMessage); } - if (error.contains(UNABLE_TO_PARSE_ERROR)) { - return new UnableToParseException(error); + if (errorMessage.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) { + return new HintedHandOffQueueNotEmptyException(errorMessage); } - if (error.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) { - return new HintedHandOffQueueNotEmptyException(error); + if (errorMessage.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) { + return new CacheMaxMemorySizeExceededException(errorMessage); } - if (error.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) { - return new CacheMaxMemorySizeExceededException(error); + if (errorMessage.contains(USER_REQUIRED_ERROR) + || errorMessage.contains(USER_NOT_AUTHORIZED_ERROR) + || errorMessage.contains(AUTHORIZATION_FAILED_ERROR) + || errorMessage.contains(USERNAME_REQUIRED_ERROR)) { + return new AuthorizationFailedException(errorMessage); } - throw new InfluxDBException(error); + return new InfluxDBException(errorMessage); } + private static class ErrorMessage { + public String error; + } + + public static InfluxDBException buildExceptionForErrorState(final String errorBody) { + try { + Moshi moshi = new Moshi.Builder().build(); + JsonAdapter adapter = moshi.adapter(ErrorMessage.class).lenient(); + ErrorMessage errorMessage = adapter.fromJson(errorBody); + return InfluxDBException.buildExceptionFromErrorMessage(errorMessage.error); + } catch (Exception e) { + return new InfluxDBException(errorBody); + } + } } diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 701f2f5c2..ee221ab4d 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -24,8 +24,6 @@ import org.influxdb.dto.QueryResult; import org.influxdb.impl.BatchProcessor.HttpBatchEntry; import org.influxdb.impl.BatchProcessor.UdpBatchEntry; -import org.json.JSONException; -import org.json.JSONObject; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; @@ -572,6 +570,9 @@ private Call callQuery(final Query query) { return call; } + static class ErrorMessage { + public String error; + } private T execute(final Call call) { try { @@ -580,15 +581,7 @@ private T execute(final Call call) { return response.body(); } try (ResponseBody errorBody = response.errorBody()) { - try { - JSONObject body = new JSONObject(errorBody.string()); - Object error = body.getString("error"); - if (error != null && error instanceof String) { - throw InfluxDBException.buildExceptionForErrorState((String) error); - } - } catch (JSONException e) { - } - throw new InfluxDBException(errorBody.string()); + throw InfluxDBException.buildExceptionForErrorState(errorBody.string()); } } catch (IOException e) { throw new InfluxDBIOException(e); diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java index e5ec0fe42..141b215de 100644 --- a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -62,8 +62,10 @@ private WriteResult(final InfluxDBException e) { } } + /* This method is synchronized to avoid parallel execution when the user invokes flush/close + * of the client in the middle of scheduled write execution (buffer flush / action limit overrun) */ @Override - public void write(final Collection collection) { + public synchronized void write(final Collection collection) { // empty the cached data first ListIterator batchQueueIterator = batchQueue.listIterator(); while (batchQueueIterator.hasNext()) { @@ -107,8 +109,10 @@ public void write(final Collection collection) { } } + /* This method is synchronized to avoid parallel execution when the BatchProcessor scheduler + * has been shutdown but there are jobs still being executed (using RetryCapableBatchWriter.write).*/ @Override - public void close() { + public synchronized void close() { // try to write everything queued / buffered for (BatchPoints points : batchQueue) { WriteResult result = tryToWrite(points); diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java index 4e635556e..0777fc89e 100644 --- a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -42,8 +42,8 @@ public void test() { BatchPoints bp3 = getBP(8); BatchPoints bp4 = getBP(100); - Exception nonRecoverable = InfluxDBException.buildExceptionForErrorState("database not found: cvfdgf"); - Exception recoverable = InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + Exception nonRecoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"database not found: cvfdgf\" }"); + Exception recoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"cache-max-memory-size exceeded 104/1400\" }"); Mockito.doThrow(nonRecoverable).when(mockInfluxDB).write(bp0); Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp1); Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp2); From 93fd32fea6a1639c718311d53f8d6801af096d06 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Thu, 1 Feb 2018 13:11:05 +0700 Subject: [PATCH 095/148] + fix BatchOptionsTest + add mote tests for RetryCapableBatchWriterTest --- .../java/org/influxdb/BatchOptionsTest.java | 261 ++++++++++++------ src/test/java/org/influxdb/TestAnswer.java | 27 ++ .../impl/RetryCapableBatchWriterTest.java | 90 +++++- 3 files changed, 299 insertions(+), 79 deletions(-) create mode 100644 src/test/java/org/influxdb/TestAnswer.java diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 84c33e66a..611bfa2fb 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -17,6 +17,7 @@ import static org.mockito.Mockito.*; import java.io.IOException; +import java.text.MessageFormat; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -74,14 +75,12 @@ public void accept(Iterable points, Throwable throwable) { /** * Test the implementation of {@link BatchOptions#actions(int)} }. - * TODO: try to make this run faster, set lower flush duration (eg. 100) - * TODO: you have to test that the points are not in the DB before flush duration as well */ @Test public void testActionsSetting() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.actions(3); + BatchOptions options = BatchOptions.DEFAULTS.actions(3).flushDuration(100); this.influxDB.enableBatch(options); this.influxDB.createDatabase(dbName); @@ -94,9 +93,20 @@ public void testActionsSetting() throws InterruptedException { .addField("system", 3.0 * j).build(); this.influxDB.write(point); } - Thread.sleep(500); - QueryResult result=influxDB.query(new Query("select * from cpu", dbName)); - Assertions.assertEquals(3, result.getResults().get(0).getSeries().get(0).getValues().size()); + + //wait for at least one flush period + Thread.sleep(200); + //test at least 3 points was written + QueryResult result = influxDB.query(new Query("select * from cpu", dbName)); + int size = result.getResults().get(0).getSeries().get(0).getValues().size(); + Assertions.assertTrue(size >= 3, "there must be be at least 3 points written"); + + //wait for at least one flush period + Thread.sleep(200); + + //test all 5 points was written + result = influxDB.query(new Query("select * from cpu", dbName)); + Assertions.assertEquals(5, result.getResults().get(0).getSeries().get(0).getValues().size()); } finally { this.influxDB.disableBatch(); @@ -107,24 +117,27 @@ public void testActionsSetting() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#flushDuration(int)} }. * @throws InterruptedException - * TODO: Remove this completely it doesn't test anything new compared to testActionSetting */ @Test public void testFlushDuration() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.flushDuration(500); + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(200); influxDB.createDatabase(dbName); influxDB.setDatabase(dbName); influxDB.enableBatch(options); write20Points(influxDB); + //check no points writen to DB before the flush duration QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNull(result.getResults().get(0).getSeries()); Assertions.assertNull(result.getResults().get(0).getError()); - Thread.sleep(1000); + //wait for at least one flush + Thread.sleep(500); result = influxDB.query(new Query("select * from weather", dbName)); + + //check points written already to DB Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } finally { @@ -136,26 +149,26 @@ public void testFlushDuration() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. * @throws InterruptedException - * TODO: Make this run faster, set flush duration to 100. Set jitter interval to 500. - * TODO: Test that after 100ms the points are not in the DB yet. */ @Test public void testJitterDuration() throws InterruptedException { - String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.flushDuration(1000).jitterDuration(125); + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(100).jitterDuration(500); influxDB.createDatabase(dbName); influxDB.setDatabase(dbName); influxDB.enableBatch(options); write20Points(influxDB); + Thread.sleep(100); + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNull(result.getResults().get(0).getSeries()); Assertions.assertNull(result.getResults().get(0).getError()); - Thread.sleep(1125); + //wait for at least one flush + Thread.sleep(500); result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } @@ -176,60 +189,123 @@ public void testNegativeJitterDuration() { Assertions.assertThrows(IllegalArgumentException.class, () -> { BatchOptions options = BatchOptions.DEFAULTS.jitterDuration(-10); influxDB.enableBatch(options); - // TODO: the lines below has no function, remove it - // TODO: you should use dedicated influxDB object so that other tests are not influenced - influxDB.disableBatch(); - options = BatchOptions.DEFAULTS.jitterDuration(0); - influxDB.enableBatch(); influxDB.disableBatch(); }); } - - private void doTestBufferLimit(int bufferLimit, int actions) throws InterruptedException { + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + * use a bufferLimit that less than actions, then OneShotBatchWrite is used + */ + @Test + public void testBufferLimitLessThanActions() throws InterruptedException { + + TestAnswer answer = new TestAnswer() { + + InfluxDBException influxDBException = InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + @Override + protected void check(InvocationOnMock invocation) { + if ((Boolean) params.get("throwException")) { + throw influxDBException; + } + } + }; + + InfluxDB spy = spy(influxDB); + //the spied influxDB.write(BatchPoints) will always throw InfluxDBException + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(bufferLimit).actions(actions); + answer.params.put("throwException", true); + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(3).actions(4).flushDuration(100).exceptionHandler(mockHandler); - influxDB.createDatabase(dbName); - influxDB.setDatabase(dbName); - influxDB.enableBatch(options); - write20Points(influxDB); + spy.createDatabase(dbName); + spy.setDatabase(dbName); + spy.enableBatch(options); + write20Points(spy); - QueryResult result = influxDB.query(new Query("select * from weather", dbName)); - Thread.sleep(1000); - result = influxDB.query(new Query("select * from weather", dbName)); + Thread.sleep(300); + verify(mockHandler, atLeastOnce()).accept(any(), any()); + + QueryResult result = spy.query(new Query("select * from weather", dbName)); + //assert 0 point written because of InfluxDBException and OneShotBatchWriter did not retry + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + answer.params.put("throwException", false); + write20Points(spy); + Thread.sleep(300); + result = spy.query(new Query("select * from weather", dbName)); + //assert all 20 points written to DB due to no exception Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } finally { - influxDB.disableBatch(); - influxDB.deleteDatabase(dbName); + spy.disableBatch(); + spy.deleteDatabase(dbName); } - } - - - /** - * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. - * TODO: Name the test functions as it is clear what is it testing - * TODO: Remove this test until the test scenario is clear - */ - @Test - public void testBufferLimit1() throws InterruptedException { - - doTestBufferLimit(3, 4); } /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. - * TODO: Name the test functions as it is clear what is it testing - * TODO: Remove this test until the test scenario is clear + * use a bufferLimit that greater than actions, then RetryCapableBatchWriter is used */ @Test - public void testBufferLimit2() throws InterruptedException { - - doTestBufferLimit(10, 4); + public void testBufferLimitGreaterThanActions() throws InterruptedException { + TestAnswer answer = new TestAnswer() { + + int nthCall = 0; + InfluxDBException cacheMaxMemorySizeExceededException = InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + @Override + protected void check(InvocationOnMock invocation) { + + switch (nthCall++) { + case 0: + throw InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.DATABASE_NOT_FOUND_ERROR)); + case 1: + throw InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + default: + break; + } + } + }; + + InfluxDB spy = spy(influxDB); + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(10).actions(8).flushDuration(100).exceptionHandler(mockHandler); + + spy.createDatabase(dbName); + spy.setDatabase(dbName); + spy.enableBatch(options); + writeSomePoints(spy, "measurement1", 0, 5); + + Thread.sleep(300); + verify(mockHandler, atLeastOnce()).accept(any(), any()); + + QueryResult result = spy.query(new Query("select * from measurement1", dbName)); + //assert 0 point written because of non-retry capable DATABASE_NOT_FOUND_ERROR and RetryCapableBatchWriter did not retry + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + writeSomePoints(spy, "measurement2", 0, 5); + + Thread.sleep(300); + + result = spy.query(new Query("select * from measurement2", dbName)); + //assert all 6 point written because of retry capable CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR and RetryCapableBatchWriter did retry + Assertions.assertEquals(6, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + spy.disableBatch(); + spy.deleteDatabase(dbName); + } + } /** * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. @@ -240,11 +316,6 @@ public void testNegativeBufferLimit() { Assertions.assertThrows(IllegalArgumentException.class, () -> { BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(-10); influxDB.enableBatch(options); - // TODO: the lines below has no function, remove it - // TODO: you should use dedicated influxDB object so that other tests are not influenced - influxDB.disableBatch(); - options = BatchOptions.DEFAULTS.bufferLimit(0); - influxDB.enableBatch(); influxDB.disableBatch(); }); } @@ -252,24 +323,31 @@ public void testNegativeBufferLimit() { /** * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. * @throws InterruptedException - * TODO: there is no assertion testing that the thread factory is used - * TODO: make this test run faster eg. 100ms */ @Test public void testThreadFactory() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); try { - BatchOptions options = BatchOptions.DEFAULTS.threadFactory((r) -> { - return new Thread(r); + ThreadFactory spy = spy(new ThreadFactory() { + + ThreadFactory threadFactory = Executors.defaultThreadFactory(); + @Override + public Thread newThread(Runnable r) { + return threadFactory.newThread(r); + } }); + BatchOptions options = BatchOptions.DEFAULTS.threadFactory(spy).flushDuration(100); influxDB.createDatabase(dbName); influxDB.setDatabase(dbName); influxDB.enableBatch(options); write20Points(influxDB); - Thread.sleep(3000); + Thread.sleep(500); + //Test the thread factory is used somewhere + verify(spy, atLeastOnce()).newThread(any()); + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } finally { @@ -282,7 +360,6 @@ public void testThreadFactory() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException - * TODO: make this test run faster eg. 100ms */ @Test public void testHandlerOnRetryImpossible() throws InterruptedException { @@ -293,7 +370,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { try { BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); - BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); spy.createDatabase(dbName); spy.setDatabase(dbName); @@ -301,7 +378,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { writeSomePoints(spy, 1); - Thread.sleep(3000); + Thread.sleep(200); verify(mockHandler, times(1)).accept(any(), any()); QueryResult result = influxDB.query(new Query("select * from weather", dbName)); @@ -317,7 +394,6 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { /** * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. * @throws InterruptedException - * TODO: make this test run faster eg. 100ms */ @Test public void testHandlerOnRetryPossible() throws InterruptedException { @@ -339,7 +415,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { try { BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); - BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); spy.createDatabase(dbName); spy.setDatabase(dbName); @@ -347,7 +423,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { writeSomePoints(spy, 1); - Thread.sleep(5000); + Thread.sleep(500); verify(mockHandler, never()).accept(any(), any()); verify(spy, times(2)).write(any(BatchPoints.class)); @@ -366,32 +442,57 @@ public Object answer(InvocationOnMock invocation) throws Throwable { /** * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. * @throws InterruptedException - * TODO: there is no assertion testing that the consistency value set is propagated to InfluxDB - * TODO: make this test run faster eg. 100ms */ @Test public void testConsistency() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); - influxDB.createDatabase(dbName); - influxDB.setDatabase(dbName); + + InfluxDB spy = spy(influxDB); + spy.createDatabase(dbName); + spy.setDatabase(dbName); try { - int n = 5; - for (ConsistencyLevel consistencyLevel : ConsistencyLevel.values()) { - BatchOptions options = BatchOptions.DEFAULTS.consistency(consistencyLevel); + TestAnswer answer = new TestAnswer() { + @Override + protected void check(InvocationOnMock invocation) { + BatchPoints batchPoints = (BatchPoints) invocation.getArgument(0); + Assertions.assertEquals(params.get("consistencyLevel"), batchPoints.getConsistency()); + + } + }; + doAnswer(answer).when(spy).write(any(BatchPoints.class)); - influxDB.enableBatch(options); - writeSomePoints(influxDB, n); + int n = 0; + for (final ConsistencyLevel consistencyLevel : ConsistencyLevel.values()) { + answer.params.put("consistencyLevel", consistencyLevel); + BatchOptions options = BatchOptions.DEFAULTS.consistency(consistencyLevel).flushDuration(100); + spy.enableBatch(options); + Assertions.assertEquals(options.getConsistency(), consistencyLevel); + + writeSomePoints(spy, n, n + 4); + n += 5; + Thread.sleep(300); - Thread.sleep(2000); - QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + verify(spy, atLeastOnce()).write(any(BatchPoints.class)); + QueryResult result = spy.query(new Query("select * from weather", dbName)); Assertions.assertEquals(n, result.getResults().get(0).getSeries().get(0).getValues().size()); - n += 5; - this.influxDB.disableBatch(); + + spy.disableBatch(); } } finally { - this.influxDB.deleteDatabase(dbName); + spy.deleteDatabase(dbName); + } + } + + private void writeSomePoints(InfluxDB influxDB, String measurement, int firstIndex, int lastIndex) { + for (int i = firstIndex; i <= lastIndex; i++) { + Point point = Point.measurement(measurement) + .time(i,TimeUnit.HOURS) + .addField("field1", (double) i) + .addField("field2", (double) (i) * 1.1) + .addField("field3", "moderate").build(); + influxDB.write(point); } } @@ -413,4 +514,8 @@ private void write20Points(InfluxDB influxDB) { private void writeSomePoints(InfluxDB influxDB, int n) { writeSomePoints(influxDB, 0, n - 1); } + + private static String createErrorBody(String errorMessage) { + return MessageFormat.format("'{' \"error\": \"{0}\" '}'", errorMessage); + } } diff --git a/src/test/java/org/influxdb/TestAnswer.java b/src/test/java/org/influxdb/TestAnswer.java new file mode 100644 index 000000000..8b0a2cd41 --- /dev/null +++ b/src/test/java/org/influxdb/TestAnswer.java @@ -0,0 +1,27 @@ +package org.influxdb; + +import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; + +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public abstract class TestAnswer implements Answer { + + Map params = new HashMap<>(); + + protected abstract void check(InvocationOnMock invocation); + + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + check(invocation); + //call only non-abstract real method + if (Modifier.isAbstract(invocation.getMethod().getModifiers())) { + return null; + } else { + return invocation.callRealMethod(); + } + } + +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java index 0777fc89e..e4b9ec020 100644 --- a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -2,16 +2,20 @@ import org.influxdb.InfluxDB; import org.influxdb.InfluxDBException; +import org.influxdb.TestAnswer; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; import org.junit.Assert; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; -import java.util.Collection; +import java.text.MessageFormat; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; @@ -86,4 +90,88 @@ public void test() { Assert.assertEquals(capturedArgument2.get(1).getPoints().size(), 100); } + + @Test + public void testAllNonRecoverableExceptions() { + + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BiConsumer errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + + InfluxDBException nonRecoverable1 = InfluxDBException.buildExceptionForErrorState(createErrorBody("database not found: cvfdgf")); + InfluxDBException nonRecoverable2 = InfluxDBException.buildExceptionForErrorState(createErrorBody("points beyond retention policy 'abc'")); + InfluxDBException nonRecoverable3 = InfluxDBException.buildExceptionForErrorState(createErrorBody("unable to parse 'abc'")); + InfluxDBException nonRecoverable4 = InfluxDBException.buildExceptionForErrorState(createErrorBody("hinted handoff queue not empty service='abc'")); + InfluxDBException nonRecoverable5 = InfluxDBException.buildExceptionForErrorState(createErrorBody("field type conflict 'abc'")); + InfluxDBException nonRecoverable6 = new InfluxDBException.RetryBufferOverrunException(createErrorBody("Retry BufferOverrun Exception")); + InfluxDBException nonRecoverable7 = InfluxDBException.buildExceptionForErrorState(createErrorBody("user is not authorized to write to database")); + + List exceptions = Arrays.asList(nonRecoverable1, nonRecoverable2, nonRecoverable3, + nonRecoverable4, nonRecoverable5, nonRecoverable6, nonRecoverable7); + int size = exceptions.size(); + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + if (i < size) { + throw exceptions.get(i++); + } + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + BatchPoints bp = getBP(8); + for (int i = 0; i < size; i++) { + rw.write(Collections.singletonList(bp)); + } + verify(errorHandler, times(size)).accept(any(), any());; + } + + @Test + public void testClosingWriter() { + InfluxDB mockInfluxDB = mock(InfluxDB.class); + BiConsumer, Throwable> errorHandler = mock(BiConsumer.class); + + BatchPoints bp5 = getBP(5); + BatchPoints bp6 = getBP(6); + BatchPoints bp90 = getBP(90); + + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + //first 4 calls + if (i++ < 4) { + throw InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + } + return; + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + + rw.write(Collections.singletonList(bp5)); + rw.write(Collections.singletonList(bp6)); + rw.write(Collections.singletonList(bp90)); + //recoverable exception -> never errorHandler + verify(errorHandler, never()).accept(any(), any()); + verify(mockInfluxDB, times(3)).write(any(BatchPoints.class)); + + rw.close(); + + ArgumentCaptor captor4Write = ArgumentCaptor.forClass(BatchPoints.class); + ArgumentCaptor> captor4Accept = ArgumentCaptor.forClass(List.class); + verify(errorHandler, times(1)).accept(captor4Accept.capture(), any()); + verify(mockInfluxDB, times(5)).write(captor4Write.capture()); + + //bp5 and bp6 were merged and writing of the merged batch points on closing should be failed + Assertions.assertEquals(11, captor4Accept.getValue().size()); + //bp90 was written because no more exception thrown + Assertions.assertEquals(90, captor4Write.getAllValues().get(4).getPoints().size()); + } + + private static String createErrorBody(String errorMessage) { + return MessageFormat.format("'{' \"error\": \"{0}\" '}'", errorMessage); + } } From 21e335025f13aaff951fadcaa8c20bf573997439 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Thu, 1 Feb 2018 17:28:16 +0700 Subject: [PATCH 096/148] add testRetryingKeepChronologicalOrder, test RetryCapableBatchWriterTest is aware of and guarantees the chronological order of the written batchpoints --- .../impl/RetryCapableBatchWriterTest.java | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java index e4b9ec020..189f4d6d1 100644 --- a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -18,6 +18,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import static org.mockito.Mockito.mock; @@ -171,6 +172,60 @@ protected void check(InvocationOnMock invocation) { Assertions.assertEquals(90, captor4Write.getAllValues().get(4).getPoints().size()); } + @Test + public void testRetryingKeepChronologicalOrder() { + + BatchPoints.Builder b = BatchPoints.database("d1"); + for (int i = 0; i < 200; i++) { + b.point(Point.measurement("x1").time(1,TimeUnit.HOURS). + addField("x", 1). + tag("t", "v1").build()).build(); + } + + BatchPoints bp1 = b.build(); + + b = BatchPoints.database("d1"); + + b.point(Point.measurement("x1").time(1,TimeUnit.HOURS). + addField("x", 2). + tag("t", "v2").build()).build(); + + for (int i = 0; i < 199; i++) { + b.point(Point.measurement("x1").time(2,TimeUnit.HOURS). + addField("x", 2). + tag("t", "v2").build()).build(); + } + BatchPoints bp2 = b.build(); + + InfluxDB mockInfluxDB = mock(InfluxDB.class); + BiConsumer, Throwable> errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 450, 150); + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + if (i++ < 1) { + throw InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + } + return; + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + rw.write(Collections.singletonList(bp1)); + rw.write(Collections.singletonList(bp2)); + + ArgumentCaptor captor4Write = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(3)).write(captor4Write.capture()); + + //bp1 written but failed because of recoverable cache-max-memory-size error + Assertions.assertEquals(bp1, captor4Write.getAllValues().get(0)); + //bp1 rewritten on writing of bp2 + Assertions.assertEquals(bp1, captor4Write.getAllValues().get(1)); + //bp2 written + Assertions.assertEquals(bp2, captor4Write.getAllValues().get(2)); + + } private static String createErrorBody(String errorMessage) { return MessageFormat.format("'{' \"error\": \"{0}\" '}'", errorMessage); } From b664dd0fd6848309920f85c71917c3e5d0b49344 Mon Sep 17 00:00:00 2001 From: dubsky Date: Thu, 1 Feb 2018 20:40:41 +0100 Subject: [PATCH 097/148] adding very basic jitter interval test --- .../java/org/influxdb/BatchOptionsTest.java | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 7abd67532..e1311dcf2 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -92,4 +92,50 @@ public void testActionsSetting() throws InterruptedException { } } + /** + * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. + * @throws InterruptedException + */ + @Test + public void testJitterDuration() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(100).jitterDuration(500); + influxDB.createDatabase(dbName); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); + + Thread.sleep(100); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + //wait for at least one flush + Thread.sleep(500); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + influxDB.disableBatch(); + influxDB.deleteDatabase(dbName); + } + } + + private void writeSomePoints(InfluxDB influxDB, int firstIndex, int lastIndex) { + for (int i = firstIndex; i <= lastIndex; i++) { + Point point = Point.measurement("weather") + .time(i,TimeUnit.HOURS) + .addField("temperature", (double) i) + .addField("humidity", (double) (i) * 1.1) + .addField("uv_index", "moderate").build(); + influxDB.write(point); + } + } + + private void write20Points(InfluxDB influxDB) { + writeSomePoints(influxDB, 0, 19); + } } From a189aade5b8ade810d846fe41d74067e50b3bbbe Mon Sep 17 00:00:00 2001 From: dubsky Date: Thu, 1 Feb 2018 21:35:24 +0100 Subject: [PATCH 098/148] making the jitter duration test more durable for Travis CI --- .../java/org/influxdb/BatchOptionsTest.java | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index e1311dcf2..11c255e37 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -108,15 +108,26 @@ public void testJitterDuration() throws InterruptedException { write20Points(influxDB); Thread.sleep(100); + System.out.println("Jitter duration test wrote points"); QueryResult result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertNull(result.getResults().get(0).getSeries()); Assertions.assertNull(result.getResults().get(0).getError()); - //wait for at least one flush + System.out.println("Waiting for jitter to expire"); Thread.sleep(500); - result = influxDB.query(new Query("select * from weather", dbName)); - Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + + //wait for at least one flush + for(int i=0;i<10;i++) { + if(i==9) throw new RuntimeException("waited for too long"); + result = influxDB.query(new Query("select * from weather", dbName)); + if(result.getResults().get(0).getSeries()!=null) { + System.out.println("Jitter duration result series "+i+"/"+result.getResults().get(0).getSeries()); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + break; + } + Thread.sleep(100); + } } finally { influxDB.disableBatch(); From a686b891ffc674e56ef16aea22ab8aef02f3c6a7 Mon Sep 17 00:00:00 2001 From: dubsky Date: Thu, 1 Feb 2018 22:26:20 +0100 Subject: [PATCH 099/148] fixing 'write compare udp performance'; test will run longer and will not count database deletion into results --- .../java/org/influxdb/PerformanceTests.java | 65 ++++++++++--------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index e7c363d0e..cd10298ed 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -106,36 +106,43 @@ public void testMaxWritePointsPerformance() { this.influxDB.deleteDatabase(dbName); } + /** + * states that String.join("\n", records)*/ @Test public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { - //prepare data - List lineProtocols = new ArrayList(); - for (int i = 0; i < 1000; i++) { - Point point = Point.measurement("udp_single_poit").addField("v", i).build(); - lineProtocols.add(point.lineProtocol()); - } - - String dbName = "write_compare_udp_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); - this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); - - //write batch of 1000 single string. - long start = System.currentTimeMillis(); - this.influxDB.write(UDP_PORT, lineProtocols); - long elapsedForBatchWrite = System.currentTimeMillis() - start; - System.out.println("performance(ms):write udp with batch of 1000 string:" + elapsedForBatchWrite); - - //write 1000 single string by udp. - start = System.currentTimeMillis(); - for (String lineProtocol: lineProtocols){ - this.influxDB.write(UDP_PORT, lineProtocol); - } - this.influxDB.deleteDatabase(dbName); - - long elapsedForSingleWrite = System.currentTimeMillis() - start; - System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); - - Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); - } + //prepare data + List lineProtocols = new ArrayList(); + for (int i = 0; i < 2000; i++) { + Point point = Point.measurement("udp_single_poit").addField("v", i).build(); + lineProtocols.add(point.lineProtocol()); + } + + String dbName = "write_compare_udp_" + System.currentTimeMillis(); + this.influxDB.createDatabase(dbName); + this.influxDB.enableBatch(10000, 100, TimeUnit.MILLISECONDS); + + int repetitions = 15; + long start = System.currentTimeMillis(); + for (int i = 0; i < repetitions; i++) { + //write batch of 2000 single string. + this.influxDB.write(UDP_PORT, lineProtocols); + } + long elapsedForBatchWrite = System.currentTimeMillis() - start; + System.out.println("performance(ms):write udp with batch of 1000 string:" + elapsedForBatchWrite); + + // write 2000 single string by udp. + start = System.currentTimeMillis(); + for (int i = 0; i < repetitions; i++) { + for (String lineProtocol : lineProtocols) { + this.influxDB.write(UDP_PORT, lineProtocol); + } + } + + long elapsedForSingleWrite = System.currentTimeMillis() - start; + System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); + + this.influxDB.deleteDatabase(dbName); + Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); + } } From 6374a5beefe97aec31dd30e9d4943a0b3dad4fb7 Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Wed, 7 Feb 2018 13:54:29 +0700 Subject: [PATCH 100/148] fix testJitterDuration - increase waiting time --- src/test/java/org/influxdb/BatchOptionsTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index 611bfa2fb..df11552c8 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -168,7 +168,7 @@ public void testJitterDuration() throws InterruptedException { Assertions.assertNull(result.getResults().get(0).getError()); //wait for at least one flush - Thread.sleep(500); + Thread.sleep(1000); result = influxDB.query(new Query("select * from weather", dbName)); Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); } From b703db457d50fb0717b34b3873311e732d8ad288 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 7 Feb 2018 16:22:35 +0100 Subject: [PATCH 101/148] PR #409 changes documented --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ef151b3c..289056baf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## 2.9 [unreleased] +### Features + +- New extensible API to configure batching properties. [PR #409] +- New configuration property 'jitter interval' to avoid multiple clients hit the server periodically at the same time. [PR #409] ## 2.8 [2017-12-06] From 513c176c3eebd3bd29bf2c9d74f7bca085f37579 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 7 Feb 2018 17:15:15 +0100 Subject: [PATCH 102/148] updating documentation for PR#410 --- CHANGELOG.md | 2 ++ README.md | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 289056baf..e21aeb30d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ - New extensible API to configure batching properties. [PR #409] - New configuration property 'jitter interval' to avoid multiple clients hit the server periodically at the same time. [PR #409] +- New strategy on handling errors, client performs retries writes when server gets overloaded [PR #410] +- New exceptions give the client user easier way to classify errors reported by the server. [PR #410] ## 2.8 [2017-12-06] diff --git a/README.md b/README.md index 40688258f..5e3c2e714 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,16 @@ influxDB.enableBatch(BatchOptions.DEFAULTS.exceptionHandler( ); ``` +With batching enabled the client provides two strategies how to deal with errors thrown by the InfluxDB server. + + 1. 'One shot' write - on failed write request to InfluxDB server an error is reported to the client using the means mentioned above. + 2. 'Retry on error' write (used by default) - on failed write the request by the client is repeated after batchInterval elapses + (if there is a chance the write will succeed - the error was caused by overloading the server, a network error etc.) + When new data points are written before the previous (failed) points are successfully written, those are queued inside the client + and wait until older data points are successfully written. + Size of this queue is limited and configured by `BatchOptions.bufferLimit` property. When the limit is reached, the oldest points + in the queue are dropped. 'Retry on error' strategy is used when individual write batch size defined by `BatchOptions.actions` is lower than `BatchOptions.bufferLimit`. + Note: * Batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` * `InfluxDB.enableBatch(BatchOptions)` is available since version 2.9. Prior versions use `InfluxDB.enableBatch(actions, flushInterval, timeUnit)` or similar based on the configuration parameters you want to set. @@ -72,7 +82,6 @@ Note: * If you are using influxdb < 2.8, you should use retention policy: 'autogen' * If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen' - If your points are written into different databases and retention policies, the more complex InfluxDB.write() methods can be used: ```java @@ -105,6 +114,7 @@ influxDB.dropRetentionPolicy(rpName, dbName); influxDB.deleteDatabase(dbName); influxDB.close(); ``` + #### Synchronous writes From d2ff8243eb5e5f459ae833f96750cef43bd5b087 Mon Sep 17 00:00:00 2001 From: dubsky Date: Wed, 7 Feb 2018 17:37:51 +0100 Subject: [PATCH 103/148] fixing wrong exception name --- src/main/java/org/influxdb/InfluxDBException.java | 6 +++--- src/test/java/org/influxdb/BatchOptionsTest.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java index 2db463d04..bc09396eb 100644 --- a/src/main/java/org/influxdb/InfluxDBException.java +++ b/src/main/java/org/influxdb/InfluxDBException.java @@ -46,8 +46,8 @@ public boolean isRetryWorth() { static final String AUTHORIZATION_FAILED_ERROR = "authorization failed"; static final String USERNAME_REQUIRED_ERROR = "username required"; - public static final class DatabaseNotFoundError extends InfluxDBException { - private DatabaseNotFoundError(final String message) { + public static final class DatabaseNotFoundException extends InfluxDBException { + private DatabaseNotFoundException(final String message) { super(message); } @@ -128,7 +128,7 @@ public boolean isRetryWorth() { private static InfluxDBException buildExceptionFromErrorMessage(final String errorMessage) { if (errorMessage.contains(DATABASE_NOT_FOUND_ERROR)) { - return new DatabaseNotFoundError(errorMessage); + return new DatabaseNotFoundException(errorMessage); } if (errorMessage.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) { return new PointsBeyondRetentionPolicyException(errorMessage); diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java index df11552c8..b7d1830a3 100644 --- a/src/test/java/org/influxdb/BatchOptionsTest.java +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -1,7 +1,7 @@ package org.influxdb; import org.influxdb.InfluxDB.ConsistencyLevel; -import org.influxdb.InfluxDBException.DatabaseNotFoundError; +import org.influxdb.InfluxDBException.DatabaseNotFoundException; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; import org.influxdb.dto.Query; @@ -366,7 +366,7 @@ public void testHandlerOnRetryImpossible() throws InterruptedException { String dbName = "write_unittest_" + System.currentTimeMillis(); InfluxDB spy = spy(influxDB); - doThrow(DatabaseNotFoundError.class).when(spy).write(any(BatchPoints.class)); + doThrow(DatabaseNotFoundException.class).when(spy).write(any(BatchPoints.class)); try { BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); From 3562a1a658aa69a2ec3d00dd91a4d2926de02fae Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Tue, 20 Feb 2018 13:45:04 +0700 Subject: [PATCH 104/148] add test for batch writing and retry --- .../java/org/influxdb/PerformanceTests.java | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index cd10298ed..1fd0a0444 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -3,6 +3,8 @@ import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -10,7 +12,11 @@ import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import static org.mockito.Mockito.*; + +import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; @@ -144,5 +150,61 @@ public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { this.influxDB.deleteDatabase(dbName); Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); } + + @Test + public void testRetryWritePointsInBatch() throws InterruptedException { + String dbName = "d"; + + InfluxDB spy = spy(influxDB); + TestAnswer answer = new TestAnswer() { + boolean started = false; + InfluxDBException influxDBException = new InfluxDBException(new SocketTimeoutException()); + @Override + protected void check(InvocationOnMock invocation) { + if (started || System.currentTimeMillis() >= (Long) params.get("startTime")) { + System.out.println("call real"); + started = true; + } else { + System.out.println("throw"); + throw influxDBException; + } + } + }; + + answer.params.put("startTime", System.currentTimeMillis() + 80000); + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + + spy.createDatabase(dbName); + BatchOptions batchOptions = BatchOptions.DEFAULTS.actions(100000).flushDuration(20000).bufferLimit(3000000).exceptionHandler((points, throwable) -> { + System.out.println("+++++++++++ exceptionHandler +++++++++++"); + System.out.println(throwable); + System.out.println("++++++++++++++++++++++++++++++++++++++++"); + }); + + //this.influxDB.enableBatch(100000, 60, TimeUnit.SECONDS); + spy.enableBatch(batchOptions); + String rp = TestUtils.defaultRetentionPolicy(spy.version()); + + for (long i = 0; i < 400000; i++) { + Point point = Point.measurement("s").time(i, TimeUnit.MILLISECONDS).addField("v", 1.0).build(); + spy.write(dbName, rp, point); + } + + System.out.println("sleep"); + Thread.sleep(120000); + try { + QueryResult result = spy.query(new Query("select count(v) from s", dbName)); + double d = Double.parseDouble(result.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString()); + Assertions.assertEquals(400000, d); + } catch (Exception e) { + System.out.println("+++++++++++++++++count() +++++++++++++++++++++"); + System.out.println(e); + System.out.println("++++++++++++++++++++++++++++++++++++++++++++++"); + + } + + spy.disableBatch(); + spy.deleteDatabase(dbName); + } } From 7d045c183d6a1d0bf2a697d46e26054c0b96e80b Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Tue, 20 Feb 2018 10:19:10 +0100 Subject: [PATCH 105/148] Update junit from 5.0.2 -> 5.1.0, update mockito from 2.13.0 -> 2.15.0 --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index a791f661e..ce94089a2 100644 --- a/pom.xml +++ b/pom.xml @@ -212,13 +212,13 @@ org.junit.jupiter junit-jupiter-engine - 5.0.2 + 5.1.0 test org.junit.platform junit-platform-runner - 1.0.2 + 1.1.0 test @@ -236,7 +236,7 @@ org.mockito mockito-core - 2.13.0 + 2.15.0 test From 3af477ad6b068a6d070d49a998ec5df5b15b5b1f Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Mon, 26 Feb 2018 15:01:32 +0100 Subject: [PATCH 106/148] release preparation --- pom.xml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pom.xml b/pom.xml index ce94089a2..6ace49203 100644 --- a/pom.xml +++ b/pom.xml @@ -111,6 +111,11 @@ maven-resources-plugin 3.0.2 + + org.apache.maven.plugins + maven-release-plugin + 2.5.3 + @@ -206,6 +211,10 @@ + + org.apache.maven.plugins + maven-release-plugin + From b9a795902df6314ecdbdb79adf827a442f023213 Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Mon, 26 Feb 2018 15:03:05 +0100 Subject: [PATCH 107/148] influxdb run script --- run_influxdb.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100755 run_influxdb.sh diff --git a/run_influxdb.sh b/run_influxdb.sh new file mode 100755 index 000000000..2fe5206b9 --- /dev/null +++ b/run_influxdb.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +docker run \ + --detach \ + --name influxdb \ + --publish 8086:8086 \ + --publish 8089:8089/udp \ + --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ + influxdb:latest From 9b200f03f49d06d7bec4b5e53348ce2cbc867a0e Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Tue, 27 Feb 2018 11:06:14 +0100 Subject: [PATCH 108/148] final solution for running the release --- pom.xml | 607 +++++++++++++++----------- run_influxdb.sh | 9 - src/main/resources/docker-compose.yml | 11 + 3 files changed, 355 insertions(+), 272 deletions(-) delete mode 100755 run_influxdb.sh create mode 100644 src/main/resources/docker-compose.yml diff --git a/pom.xml b/pom.xml index 6ace49203..dd0d27f4d 100644 --- a/pom.xml +++ b/pom.xml @@ -1,274 +1,355 @@ - - 4.0.0 - org.influxdb - influxdb-java - jar - 2.9-SNAPSHOT - influxdb java bindings - Java API to access the InfluxDB REST API - http://www.influxdb.org + + 4.0.0 + org.influxdb + influxdb-java + jar + 2.9-SNAPSHOT + influxdb java bindings + Java API to access the InfluxDB REST API + http://www.influxdb.org - - 3.2.1 - + + 3.2.1 + - - - The MIT License (MIT) - http://www.opensource.org/licenses/mit-license.php - repo - - - - UTF-8 - + + + The MIT License (MIT) + http://www.opensource.org/licenses/mit-license.php + repo + + + + UTF-8 + - - scm:git:git@github.com:influxdata/influxdb-java.git - scm:git:git@github.com:influxdata/influxdb-java.git - git@github.com:influxdata/influxdb-java.git - + + scm:git:git@github.com:influxdata/influxdb-java.git + scm:git:git@github.com:influxdata/influxdb-java.git + git@github.com:influxdata/influxdb-java.git + - - - majst01 - Stefan Majer - stefan.majer@gmail.com - - - - - - org.codehaus.mojo - findbugs-maven-plugin - 3.0.5 - - true - - target/site - - - - + + + majst01 + Stefan Majer + stefan.majer@gmail.com + + + + + + org.codehaus.mojo + findbugs-maven-plugin + 3.0.5 + + true + + target/site + + + + - - - ossrh - https://oss.sonatype.org/content/repositories/snapshots - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://oss.sonatype.org/service/local/staging/deploy/maven2/ + + - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.7.0 - - 1.8 - 1.8 - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.20 - - - org.apache.maven.plugins - maven-site-plugin - 3.7 - - - org.apache.maven.plugins - maven-clean-plugin - 3.0.0 - - - org.apache.maven.plugins - maven-deploy-plugin - 2.8.2 - - - org.apache.maven.plugins - maven-install-plugin - 2.5.2 - - - org.apache.maven.plugins - maven-jar-plugin - 3.0.2 - - - org.apache.maven.plugins - maven-resources-plugin - 3.0.2 - - - org.apache.maven.plugins - maven-release-plugin - 2.5.3 - - - - + + + + src/main/resources + + docker-compose.yml + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.7.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + org.apache.maven.plugins + maven-site-plugin + 3.7 + + + org.apache.maven.plugins + maven-clean-plugin + 3.0.0 + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + + org.apache.maven.plugins + maven-install-plugin + 2.5.2 + + + org.apache.maven.plugins + maven-jar-plugin + 3.0.2 + + + org.apache.maven.plugins + maven-resources-plugin + 3.0.2 + + + org.apache.maven.plugins + maven-release-plugin + 2.5.3 + + + + - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.8 - true - - ossrh - https://oss.sonatype.org/ - true - - - - org.apache.maven.plugins - maven-source-plugin - 3.0.1 - - - attach-sources - - jar-no-fork - - - - + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-source-plugin + 3.0.1 + + + attach-sources + + jar-no-fork + + + + - - org.apache.maven.plugins - maven-javadoc-plugin - 3.0.0 - - - attach-javadocs - - jar - - - - - - - org.jacoco - jacoco-maven-plugin - 0.8.0 - - - - prepare-agent - - - - report - test - - report - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - 3.0.0 - - true - checkstyle.xml - true - - - - verify - - checkstyle - - - - - - org.apache.maven.plugins - maven-release-plugin - - - - - - org.junit.jupiter - junit-jupiter-engine - 5.1.0 - test - - - org.junit.platform - junit-platform-runner - 1.1.0 - test - - - org.hamcrest - hamcrest-all - 1.3 - test - - - org.assertj - assertj-core - 3.9.0 - test - - - org.mockito - mockito-core - 2.15.0 - test - - - com.squareup.retrofit2 - retrofit - 2.3.0 - - - com.squareup.retrofit2 - converter-moshi - 2.3.0 - - - - com.squareup.okhttp3 - okhttp - 3.9.1 - - - com.squareup.okhttp3 - logging-interceptor - 3.9.1 - - + + org.apache.maven.plugins + maven-javadoc-plugin + 3.0.0 + + + attach-javadocs + + jar + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.0 + + + + prepare-agent + + + + report + test + + report + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.0.0 + + true + checkstyle.xml + true + + + + verify + + checkstyle + + + + + + org.apache.maven.plugins + maven-release-plugin + + + release + + + + + + + org.junit.jupiter + junit-jupiter-engine + 5.1.0 + test + + + org.junit.platform + junit-platform-runner + 1.1.0 + test + + + org.hamcrest + hamcrest-all + 1.3 + test + + + org.assertj + assertj-core + 3.9.0 + test + + + org.mockito + mockito-core + 2.15.0 + test + + + com.squareup.retrofit2 + retrofit + 2.3.0 + + + com.squareup.retrofit2 + converter-moshi + 2.3.0 + + + + com.squareup.okhttp3 + okhttp + 3.9.1 + + + com.squareup.okhttp3 + logging-interceptor + 3.9.1 + + + + + release + + influxdb:latest + + + + + maven-resources-plugin + 3.0.2 + + + copy-resources + + validate + + copy-resources + + + ${project.build.directory} + + + src/main/resources + true + + docker-compose.yml + + + + + + + + + com.dkanejs.maven.plugins + docker-compose-maven-plugin + 1.0.1 + + + up + process-test-resources + + up + + + ${project.build.directory}/docker-compose.yml + true + + + + down + post-integration-test + + down + + + ${project.build.directory}/docker-compose.yml + true + + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.6 + + + sign-artifacts + verify + + sign + + + + + + + + diff --git a/run_influxdb.sh b/run_influxdb.sh deleted file mode 100755 index 2fe5206b9..000000000 --- a/run_influxdb.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -docker run \ - --detach \ - --name influxdb \ - --publish 8086:8086 \ - --publish 8089:8089/udp \ - --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ - influxdb:latest diff --git a/src/main/resources/docker-compose.yml b/src/main/resources/docker-compose.yml new file mode 100644 index 000000000..91d5d5cea --- /dev/null +++ b/src/main/resources/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3.1' + +services: + # Define an InfluxDB service + influxdb: + image: ${image} + volumes: + - ${project.basedir}/influxdb.conf:/etc/influxdb/influxdb.conf + ports: + - "8086:8086" + - "8089:8089/udp" \ No newline at end of file From 0e78b6c05cbdaeb8d1fd131761e16c53579d5c92 Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Tue, 27 Feb 2018 13:51:41 +0100 Subject: [PATCH 109/148] changing line indents --- pom.xml | 670 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 334 insertions(+), 336 deletions(-) diff --git a/pom.xml b/pom.xml index dd0d27f4d..b700c0a79 100644 --- a/pom.xml +++ b/pom.xml @@ -1,355 +1,353 @@ - 4.0.0 - org.influxdb - influxdb-java - jar - 2.9-SNAPSHOT - influxdb java bindings - Java API to access the InfluxDB REST API - http://www.influxdb.org + 4.0.0 + org.influxdb + influxdb-java + jar + 2.9-SNAPSHOT + influxdb java bindings + Java API to access the InfluxDB REST API + http://www.influxdb.org - - 3.2.1 - + + 3.2.1 + - - - The MIT License (MIT) - http://www.opensource.org/licenses/mit-license.php - repo - - - - UTF-8 - + + + The MIT License (MIT) + http://www.opensource.org/licenses/mit-license.php + repo + + + + UTF-8 + - - scm:git:git@github.com:influxdata/influxdb-java.git - scm:git:git@github.com:influxdata/influxdb-java.git - git@github.com:influxdata/influxdb-java.git - + + scm:git:git@github.com:influxdata/influxdb-java.git + scm:git:git@github.com:influxdata/influxdb-java.git + git@github.com:influxdata/influxdb-java.git + - - - majst01 - Stefan Majer - stefan.majer@gmail.com - - - - - - org.codehaus.mojo - findbugs-maven-plugin - 3.0.5 - - true - - target/site - - - - + + + majst01 + Stefan Majer + stefan.majer@gmail.com + + + + + + org.codehaus.mojo + findbugs-maven-plugin + 3.0.5 + + true + + target/site + + + + - - - ossrh - https://oss.sonatype.org/content/repositories/snapshots - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + ossrh + https://oss.sonatype.org/service/local/staging/deploy/maven2/ + + - - - - src/main/resources - - docker-compose.yml - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.7.0 - - 1.8 - 1.8 - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.20 - - - org.apache.maven.plugins - maven-site-plugin - 3.7 - - - org.apache.maven.plugins - maven-clean-plugin - 3.0.0 - - - org.apache.maven.plugins - maven-deploy-plugin - 2.8.2 - - - org.apache.maven.plugins - maven-install-plugin - 2.5.2 - - - org.apache.maven.plugins - maven-jar-plugin - 3.0.2 - - - org.apache.maven.plugins - maven-resources-plugin - 3.0.2 - - - org.apache.maven.plugins - maven-release-plugin - 2.5.3 - - - + + + + src/main/resources + + docker-compose.yml + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.7.0 + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + org.apache.maven.plugins + maven-site-plugin + 3.7 + + + org.apache.maven.plugins + maven-clean-plugin + 3.0.0 + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + + org.apache.maven.plugins + maven-install-plugin + 2.5.2 + + + org.apache.maven.plugins + maven-jar-plugin + 3.0.2 + + + org.apache.maven.plugins + maven-resources-plugin + 3.0.2 + + + org.apache.maven.plugins + maven-release-plugin + 2.5.3 + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-source-plugin + 3.0.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.0.0 + + + attach-javadocs + + jar + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.0 + + + + prepare-agent + + + + report + test + + report + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.0.0 + + true + checkstyle.xml + true + + + + verify + + checkstyle + + + + + + org.apache.maven.plugins + maven-release-plugin + + + release + + + + + + + org.junit.jupiter + junit-jupiter-engine + 5.1.0 + test + + + org.junit.platform + junit-platform-runner + 1.1.0 + test + + + org.hamcrest + hamcrest-all + 1.3 + test + + + org.assertj + assertj-core + 3.9.0 + test + + + org.mockito + mockito-core + 2.15.0 + test + + + com.squareup.retrofit2 + retrofit + 2.3.0 + + + com.squareup.retrofit2 + converter-moshi + 2.3.0 + + + + com.squareup.okhttp3 + okhttp + 3.9.1 + + + com.squareup.okhttp3 + logging-interceptor + 3.9.1 + + + + + release + + influxdb:latest + + - - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.8 - true + + maven-resources-plugin + 3.0.2 + + + copy-resources + + validate + + copy-resources + - ossrh - https://oss.sonatype.org/ - true + ${project.build.directory} + + + src/main/resources + true + + docker-compose.yml + + + - - - org.apache.maven.plugins - maven-source-plugin - 3.0.1 - - - attach-sources - - jar-no-fork - - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 3.0.0 - - - attach-javadocs - - jar - - - - - - org.jacoco - jacoco-maven-plugin - 0.8.0 - - - - prepare-agent - - - - report - test - - report - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - 3.0.0 + + + + + com.dkanejs.maven.plugins + docker-compose-maven-plugin + 1.0.1 + + + up + process-test-resources + + up + - true - checkstyle.xml - true + ${project.build.directory}/docker-compose.yml + true - - - verify - - checkstyle - - - - - - org.apache.maven.plugins - maven-release-plugin + + + down + post-integration-test + + down + - - release + ${project.build.directory}/docker-compose.yml + true - + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.6 + + + sign-artifacts + verify + + sign + + + + - - - - org.junit.jupiter - junit-jupiter-engine - 5.1.0 - test - - - org.junit.platform - junit-platform-runner - 1.1.0 - test - - - org.hamcrest - hamcrest-all - 1.3 - test - - - org.assertj - assertj-core - 3.9.0 - test - - - org.mockito - mockito-core - 2.15.0 - test - - - com.squareup.retrofit2 - retrofit - 2.3.0 - - - com.squareup.retrofit2 - converter-moshi - 2.3.0 - - - - com.squareup.okhttp3 - okhttp - 3.9.1 - - - com.squareup.okhttp3 - logging-interceptor - 3.9.1 - - - - - release - - influxdb:latest - - - - - maven-resources-plugin - 3.0.2 - - - copy-resources - - validate - - copy-resources - - - ${project.build.directory} - - - src/main/resources - true - - docker-compose.yml - - - - - - - - - com.dkanejs.maven.plugins - docker-compose-maven-plugin - 1.0.1 - - - up - process-test-resources - - up - - - ${project.build.directory}/docker-compose.yml - true - - - - down - post-integration-test - - down - - - ${project.build.directory}/docker-compose.yml - true - - - - - - - org.apache.maven.plugins - maven-gpg-plugin - 1.6 - - - sign-artifacts - verify - - sign - - - - - - - - + + + From d744bc98def3d09c9d94b435d344d26857ddc998 Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Tue, 27 Feb 2018 14:19:21 +0100 Subject: [PATCH 110/148] [maven-release-plugin] prepare release influxdb-java-2.9 --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index b700c0a79..cabd81039 100644 --- a/pom.xml +++ b/pom.xml @@ -1,11 +1,10 @@ - + 4.0.0 org.influxdb influxdb-java jar - 2.9-SNAPSHOT + 2.9 influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -29,6 +28,7 @@ scm:git:git@github.com:influxdata/influxdb-java.git scm:git:git@github.com:influxdata/influxdb-java.git git@github.com:influxdata/influxdb-java.git + influxdb-java-2.9 From 84235c8c258028acdb7188dbc0ffcc493aab561f Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Tue, 27 Feb 2018 14:19:29 +0100 Subject: [PATCH 111/148] [maven-release-plugin] prepare for next development iteration --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index cabd81039..4c7809dfd 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.9 + 2.10-SNAPSHOT influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -28,7 +28,7 @@ scm:git:git@github.com:influxdata/influxdb-java.git scm:git:git@github.com:influxdata/influxdb-java.git git@github.com:influxdata/influxdb-java.git - influxdb-java-2.9 + HEAD From da1a23d742574806048c83f1b9f355c12b729668 Mon Sep 17 00:00:00 2001 From: ivankudibal Date: Tue, 27 Feb 2018 17:57:37 +0100 Subject: [PATCH 112/148] set release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e21aeb30d..452b04f9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 2.9 [unreleased] +## 2.9 [2017-02-27] ### Features From f53a7e733aee1ef65fb57e6f42263d1959b454b5 Mon Sep 17 00:00:00 2001 From: ivankudibal Date: Tue, 27 Feb 2018 17:58:50 +0100 Subject: [PATCH 113/148] Revert "set release date" This reverts commit da1a23d742574806048c83f1b9f355c12b729668. --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 452b04f9a..e21aeb30d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 2.9 [2017-02-27] +## 2.9 [unreleased] ### Features From be3b8b62af32ef64c098fb429a901663545359a6 Mon Sep 17 00:00:00 2001 From: ivankudibal Date: Tue, 27 Feb 2018 17:59:33 +0100 Subject: [PATCH 114/148] set release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e21aeb30d..2cb32802d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 2.9 [unreleased] +## 2.9 [2018-02-27] ### Features From 30589f7e8b1829529f065b7fa4027fd090b0d1f7 Mon Sep 17 00:00:00 2001 From: Iker Aguayo Ureta Date: Thu, 1 Mar 2018 13:42:27 +0100 Subject: [PATCH 115/148] #371 Support dynamic measurement name in InfluxDBResultMapper --- .../influxdb/impl/InfluxDBResultMapper.java | 58 ++++++++++++++----- 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 2188bcd18..dd3097c12 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -75,31 +75,63 @@ public class InfluxDBResultMapper { * @param queryResult the InfluxDB result object * @param clazz the Class that will be used to hold your measurement data * @param the target type + * * @return a {@link List} of objects from the same Class passed as parameter and sorted on the - * same order as received from InfluxDB. + * same order as received from InfluxDB. + * * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, - * clazz parameter is not annotated with @Measurement or it was not - * possible to define the values of your POJO (e.g. due to an unsupported field type). + * clazz parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). */ public List toPOJO(final QueryResult queryResult, final Class clazz) throws InfluxDBMapperException { + throwExceptionIfMissingAnnotation(clazz); + String measurementName = getMeasurementName(clazz); + return this.toPOJO(queryResult, clazz, measurementName); + } + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @param the target type + * @param measurementName name of the Measurement + * + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * clazz parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz, final String measurementName) + throws InfluxDBMapperException { Objects.requireNonNull(queryResult, "queryResult"); Objects.requireNonNull(clazz, "clazz"); - throwExceptionIfMissingAnnotation(clazz); throwExceptionIfResultWithError(queryResult); cacheMeasurementClass(clazz); List result = new LinkedList(); - String measurementName = getMeasurementName(clazz); + + if (measurementName == null || measurementName.isEmpty()) + { + throw new IllegalArgumentException("measurementName must not be null or empty"); + } + queryResult.getResults().stream() - .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) - .forEach(internalResult -> { - internalResult.getSeries().stream() - .filter(series -> series.getName().equals(measurementName)) - .forEachOrdered(series -> { - parseSeriesAs(series, clazz, result); - }); - }); + .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) + .forEach(internalResult -> { + internalResult.getSeries().stream() + .filter(series -> series.getName().equals(measurementName)) + .forEachOrdered(series -> { + parseSeriesAs(series, clazz, result); + }); + }); return result; } From a00cd83a9c0220f30d84b83f2d647e7051f068d7 Mon Sep 17 00:00:00 2001 From: Iker Aguayo Ureta Date: Fri, 2 Mar 2018 08:55:27 +0100 Subject: [PATCH 116/148] indentation changed, added requireNonNull instead of if --- .../influxdb/impl/InfluxDBResultMapper.java | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index dd3097c12..0c1e9caa3 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -110,6 +110,8 @@ public List toPOJO(final QueryResult queryResult, final Class clazz) t */ public List toPOJO(final QueryResult queryResult, final Class clazz, final String measurementName) throws InfluxDBMapperException { + + Objects.requireNonNull(measurementName, "measurementName"); Objects.requireNonNull(queryResult, "queryResult"); Objects.requireNonNull(clazz, "clazz"); @@ -118,19 +120,14 @@ public List toPOJO(final QueryResult queryResult, final Class clazz, f List result = new LinkedList(); - if (measurementName == null || measurementName.isEmpty()) - { - throw new IllegalArgumentException("measurementName must not be null or empty"); - } - queryResult.getResults().stream() - .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) - .forEach(internalResult -> { - internalResult.getSeries().stream() - .filter(series -> series.getName().equals(measurementName)) - .forEachOrdered(series -> { - parseSeriesAs(series, clazz, result); - }); + .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) + .forEach(internalResult -> { + internalResult.getSeries().stream() + .filter(series -> series.getName().equals(measurementName)) + .forEachOrdered(series -> { + parseSeriesAs(series, clazz, result); + }); }); return result; From 17b2f248a5c153772e589a3d3346998f4146d65d Mon Sep 17 00:00:00 2001 From: Michael Schaefers Date: Fri, 9 Mar 2018 15:26:45 +0100 Subject: [PATCH 117/148] Performance: Escape fields and keys more efficiently than using String.replace twice per value. --- src/main/java/org/influxdb/dto/Point.java | 55 ++++++++++++++++------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index 8c0f91b79..ad33c4b72 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -10,7 +10,6 @@ import java.util.Objects; import java.util.TreeMap; import java.util.concurrent.TimeUnit; -import java.util.function.Function; import org.influxdb.impl.Preconditions; @@ -26,11 +25,6 @@ public class Point { private Long time; private TimeUnit precision = TimeUnit.NANOSECONDS; private Map fields; - - private static final Function FIELD_ESCAPER = s -> - s.replace("\\", "\\\\").replace("\"", "\\\""); - private static final Function KEY_ESCAPER = s -> - s.replace(" ", "\\ ").replace(",", "\\,").replace("=", "\\="); private static final int MAX_FRACTION_DIGITS = 340; private static final ThreadLocal NUMBER_FORMATTER = ThreadLocal.withInitial(() -> { @@ -182,7 +176,7 @@ public Builder fields(final Map fieldsToAdd) { /** * Add a time to this point. * - * @param timeToSet the time for this point + * @param timeToSet the time for this point * @param precisionToSet the TimeUnit * @return the Builder instance. */ @@ -205,8 +199,8 @@ public Point build() { point.setFields(this.fields); point.setMeasurement(this.measurement); if (this.time != null) { - point.setTime(this.time); - point.setPrecision(this.precision); + point.setTime(this.time); + point.setPrecision(this.precision); } point.setTags(this.tags); return point; @@ -329,10 +323,10 @@ public String lineProtocol() { private void concatenatedTags(final StringBuilder sb) { for (Entry tag : this.tags.entrySet()) { - sb.append(',') - .append(KEY_ESCAPER.apply(tag.getKey())) - .append('=') - .append(KEY_ESCAPER.apply(tag.getValue())); + sb.append(','); + escapeKey(sb, tag.getKey()); + sb.append('='); + escapeKey(sb, tag.getValue()); } sb.append(' '); } @@ -343,8 +337,8 @@ private void concatenatedFields(final StringBuilder sb) { if (value == null) { continue; } - - sb.append(KEY_ESCAPER.apply(field.getKey())).append('='); + escapeKey(sb, field.getKey()); + sb.append('='); if (value instanceof Number) { if (value instanceof Double || value instanceof Float || value instanceof BigDecimal) { sb.append(NUMBER_FORMATTER.get().format(value)); @@ -353,7 +347,9 @@ private void concatenatedFields(final StringBuilder sb) { } } else if (value instanceof String) { String stringValue = (String) value; - sb.append('"').append(FIELD_ESCAPER.apply(stringValue)).append('"'); + sb.append('"'); + escapeField(sb, stringValue); + sb.append('"'); } else { sb.append(value); } @@ -368,6 +364,31 @@ private void concatenatedFields(final StringBuilder sb) { } } + static void escapeKey(StringBuilder sb, String key) { + for (int i = 0; i < key.length(); i++) { + switch (key.charAt(i)) { + case ' ': + case ',': + case '=': + sb.append('\\'); + default: + sb.append(key.charAt(i)); + } + } + } + + static void escapeField(StringBuilder sb, String field) { + for (int i = 0; i < field.length(); i++) { + switch (field.charAt(i)) { + case '\\': + case '\"': + sb.append('\\'); + default: + sb.append(field.charAt(i)); + } + } + } + private void formatedTime(final StringBuilder sb) { if (this.time == null || this.precision == null) { return; @@ -380,7 +401,7 @@ private static class MeasurementStringBuilder { private final int length; MeasurementStringBuilder(final String measurement) { - this.sb.append(KEY_ESCAPER.apply(measurement)); + escapeKey(this.sb, measurement); this.length = sb.length(); } From e2169b3f03cda481975cb7fb872988c4354e912e Mon Sep 17 00:00:00 2001 From: Michael Schaefers Date: Fri, 9 Mar 2018 15:36:21 +0100 Subject: [PATCH 118/148] fix travis errors --- src/main/java/org/influxdb/dto/Point.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index ad33c4b72..35f7f3a1c 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -364,7 +364,7 @@ private void concatenatedFields(final StringBuilder sb) { } } - static void escapeKey(StringBuilder sb, String key) { + static void escapeKey(final StringBuilder sb, final String key) { for (int i = 0; i < key.length(); i++) { switch (key.charAt(i)) { case ' ': @@ -377,7 +377,7 @@ static void escapeKey(StringBuilder sb, String key) { } } - static void escapeField(StringBuilder sb, String field) { + static void escapeField(final StringBuilder sb, final String field) { for (int i = 0; i < field.length(); i++) { switch (field.charAt(i)) { case '\\': From 85707c56f23fa1eef00cf211fb926f7e469cb5f1 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 10 Mar 2018 12:39:07 +0100 Subject: [PATCH 119/148] run tests against influxdb 1.5 as well --- compile-and-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compile-and-test.sh b/compile-and-test.sh index 67323c40f..8b10906d0 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -4,7 +4,7 @@ # set -e -INFLUXDB_VERSIONS="1.4 1.3 1.2 1.1" +INFLUXDB_VERSIONS="1.5 1.4 1.3 1.2 1.1" JAVA_VERSIONS="3-jdk-8-alpine 3-jdk-9-slim" From f1520ccb056b5af4483a34b2840b1c2109ec9c6a Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 10 Mar 2018 12:42:04 +0100 Subject: [PATCH 120/148] Update release informations in README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5e3c2e714..6a41e89fa 100644 --- a/README.md +++ b/README.md @@ -287,12 +287,12 @@ The latest version for maven dependence: org.influxdb influxdb-java - 2.8 + 2.9 ``` Or when using with gradle: ```groovy -compile 'org.influxdb:influxdb-java:2.8' +compile 'org.influxdb:influxdb-java:2.9' ``` For version change history have a look at [ChangeLog](https://github.com/influxdata/influxdb-java/blob/master/CHANGELOG.md). From 6d610fcfae17a5a4394cbcd9688468be0ad2c3bd Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 10 Mar 2018 14:11:54 +0100 Subject: [PATCH 121/148] Update dependencies --- pom.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index 4c7809dfd..cbea0c021 100644 --- a/pom.xml +++ b/pom.xml @@ -236,7 +236,7 @@ org.assertj assertj-core - 3.9.0 + 3.9.1 test @@ -260,12 +260,12 @@ com.squareup.okhttp3 okhttp - 3.9.1 + 3.10.0 com.squareup.okhttp3 logging-interceptor - 3.9.1 + 3.10.0
@@ -305,7 +305,7 @@ com.dkanejs.maven.plugins docker-compose-maven-plugin - 1.0.1 + 1.0.3 up From 458d4ea6156172298896cb076923da9204999160 Mon Sep 17 00:00:00 2001 From: Michael Schaefers Date: Sat, 10 Mar 2018 15:00:24 +0100 Subject: [PATCH 122/148] revert wrong formatting as requested --- src/main/java/org/influxdb/dto/Point.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index 35f7f3a1c..acac4a82d 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -176,7 +176,7 @@ public Builder fields(final Map fieldsToAdd) { /** * Add a time to this point. * - * @param timeToSet the time for this point + * @param timeToSet the time for this point * @param precisionToSet the TimeUnit * @return the Builder instance. */ @@ -199,8 +199,8 @@ public Point build() { point.setFields(this.fields); point.setMeasurement(this.measurement); if (this.time != null) { - point.setTime(this.time); - point.setPrecision(this.precision); + point.setTime(this.time); + point.setPrecision(this.precision); } point.setTags(this.tags); return point; From 829749435a2a13fc27160e9d46fd58035024bb67 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 10 Mar 2018 15:31:31 +0100 Subject: [PATCH 123/148] Performance improvement: use chained StringBuilder calls instead of single calls, closes #425 --- .../java/org/influxdb/dto/BatchPoints.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/main/java/org/influxdb/dto/BatchPoints.java b/src/main/java/org/influxdb/dto/BatchPoints.java index 9d29d6cd3..6bf16b92c 100644 --- a/src/main/java/org/influxdb/dto/BatchPoints.java +++ b/src/main/java/org/influxdb/dto/BatchPoints.java @@ -253,17 +253,17 @@ public int hashCode() { @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("BatchPoints [database="); - builder.append(this.database); - builder.append(", retentionPolicy="); - builder.append(this.retentionPolicy); - builder.append(", consistency="); - builder.append(this.consistency); - builder.append(", tags="); - builder.append(this.tags); - builder.append(", points="); - builder.append(this.points); - builder.append("]"); + builder.append("BatchPoints [database=") + .append(this.database) + .append(", retentionPolicy=") + .append(this.retentionPolicy) + .append(", consistency=") + .append(this.consistency) + .append(", tags=") + .append(this.tags) + .append(", points=") + .append(this.points) + .append("]"); return builder.toString(); } From 2ea9ec646cbc935f3421de543a598d4081bdcc2c Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Thu, 15 Mar 2018 19:14:27 +0100 Subject: [PATCH 124/148] First simple implementation of "prepared statements" I added a new query class BoundParameterQuery, which can be used for "prepared statements". The constructor accepts a InfluxQL expression with placeholders, a DB name and a varags list to bind the parameters to the placeholders. I also extended the InfluxDBService, so that the HTTP requests contain the "params" parameter. The InfluxDBImpl now differentiates between a Query and a BoundParameterQuery. This is not the cleanest solution, because this has to be done at a few locations, but I didn't want to change too much of the code. --- .../org/influxdb/dto/BoundParameterQuery.java | 81 ++++++++++++++++ .../java/org/influxdb/impl/InfluxDBImpl.java | 43 +++++++-- .../org/influxdb/impl/InfluxDBService.java | 16 ++++ .../influxdb/dto/BoundParameterQueryTest.java | 93 +++++++++++++++++++ 4 files changed, 224 insertions(+), 9 deletions(-) create mode 100644 src/main/java/org/influxdb/dto/BoundParameterQuery.java create mode 100644 src/test/java/org/influxdb/dto/BoundParameterQueryTest.java diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java new file mode 100644 index 000000000..cedf8b8a7 --- /dev/null +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -0,0 +1,81 @@ +package org.influxdb.dto; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.squareup.moshi.JsonWriter; + +import okio.Buffer; + +public class BoundParameterQuery extends Query { + + private final Object[] params; + + public BoundParameterQuery(final String command, final String database, final Object...params) { + super(command, database, true); + this.params = params; + } + + public String getParameterJsonWithUrlEncoded() { + try { + List placeholders = parsePlaceHolders(getCommand()); + Map parameterMap = createParameterMap(placeholders, params); + String jsonParameterObject = createJsonObject(parameterMap); + String urlEncodedJsonParameterObject = encode(jsonParameterObject); + return urlEncodedJsonParameterObject; + } catch (IOException e) { + throw new RuntimeException("Couldn't create parameter JSON object", e); + } + } + + private String createJsonObject(final Map parameterMap) throws IOException { + Buffer b = new Buffer(); + JsonWriter writer = JsonWriter.of(b); + writer.beginObject(); + for (Entry pair : parameterMap.entrySet()) { + String name = pair.getKey(); + Object value = pair.getValue(); + if (value instanceof Number) { + writer.name(name).value((Number) value); + } else if (value instanceof String) { + writer.name(name).value((String) value); + } else if (value instanceof Boolean) { + writer.name(name).value((Boolean) value); + } else { + writer.name(name).value(value.toString()); + } + } + writer.endObject(); + return b.readString(Charset.forName("utf-8")); + } + + private Map createParameterMap(final List placeholders, final Object[] params) { + if (placeholders.size() != params.length) { + throw new RuntimeException("Unbalanced amount of placeholders and parameters"); + } + + Map parameterMap = new HashMap<>(); + int index = 0; + for (String placeholder : placeholders) { + parameterMap.put(placeholder, params[index++]); + } + return parameterMap; + } + + private List parsePlaceHolders(final String command) { + List placeHolders = new ArrayList<>(); + Pattern p = Pattern.compile("\\s+\\$(\\w+?)(?:\\s|$)"); + Matcher m = p.matcher(getCommand()); + while (m.find()) { + placeHolders.add(m.group(1)); + } + return placeHolders; + } +} diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index ee221ab4d..e94c2b92b 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -18,6 +18,7 @@ import org.influxdb.InfluxDBException; import org.influxdb.InfluxDBIOException; import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.BoundParameterQuery; import org.influxdb.dto.Point; import org.influxdb.dto.Pong; import org.influxdb.dto.Query; @@ -454,8 +455,16 @@ public void query(final Query query, final int chunkSize, final Consumer call = this.influxDBService.query(this.username, this.password, - query.getDatabase(), query.getCommandWithUrlEncoded(), chunkSize); + Call call = null; + if (query instanceof BoundParameterQuery) { + BoundParameterQuery boundParameterQuery = (BoundParameterQuery) query; + call = this.influxDBService.query(this.username, this.password, + query.getDatabase(), query.getCommandWithUrlEncoded(), chunkSize, + boundParameterQuery.getParameterJsonWithUrlEncoded()); + } else { + call = this.influxDBService.query(this.username, this.password, + query.getDatabase(), query.getCommandWithUrlEncoded(), chunkSize); + } call.enqueue(new Callback() { @Override @@ -496,8 +505,17 @@ public void onFailure(final Call call, final Throwable t) { */ @Override public QueryResult query(final Query query, final TimeUnit timeUnit) { - return execute(this.influxDBService.query(this.username, this.password, query.getDatabase(), - TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded())); + Call call = null; + if (query instanceof BoundParameterQuery) { + BoundParameterQuery boundParameterQuery = (BoundParameterQuery) query; + call = this.influxDBService.query(this.username, this.password, query.getDatabase(), + TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded(), + boundParameterQuery.getParameterJsonWithUrlEncoded()); + } else { + call = this.influxDBService.query(this.username, this.password, query.getDatabase(), + TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded()); + } + return execute(call); } /** @@ -560,12 +578,19 @@ public boolean databaseExists(final String name) { */ private Call callQuery(final Query query) { Call call; - if (query.requiresPost()) { - call = this.influxDBService.postQuery(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + if (query instanceof BoundParameterQuery) { + BoundParameterQuery boundParameterQuery = (BoundParameterQuery) query; + call = this.influxDBService.postQuery(this.username, + this.password, query.getDatabase(), query.getCommandWithUrlEncoded(), + boundParameterQuery.getParameterJsonWithUrlEncoded()); } else { - call = this.influxDBService.query(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + if (query.requiresPost()) { + call = this.influxDBService.postQuery(this.username, + this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + } else { + call = this.influxDBService.query(this.username, + this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); + } } return call; } diff --git a/src/main/java/org/influxdb/impl/InfluxDBService.java b/src/main/java/org/influxdb/impl/InfluxDBService.java index 6485f8654..4876b5652 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBService.java +++ b/src/main/java/org/influxdb/impl/InfluxDBService.java @@ -18,6 +18,7 @@ interface InfluxDBService { public static final String Q = "q"; public static final String DB = "db"; public static final String RP = "rp"; + public static final String PARAMS = "params"; public static final String PRECISION = "precision"; public static final String CONSISTENCY = "consistency"; public static final String EPOCH = "epoch"; @@ -47,6 +48,11 @@ public Call writePoints(@Query(U) String username, public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, @Query(EPOCH) String epoch, @Query(value = Q, encoded = true) String query); + @POST("/query") + public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, + @Query(EPOCH) String epoch, @Query(value = Q, encoded = true) String query, + @Query(value = PARAMS, encoded = true) String params); + @GET("/query") public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, @Query(value = Q, encoded = true) String query); @@ -55,6 +61,10 @@ public Call query(@Query(U) String username, @Query(P) String passw public Call postQuery(@Query(U) String username, @Query(P) String password, @Query(DB) String db, @Query(value = Q, encoded = true) String query); + @POST("/query") + public Call postQuery(@Query(U) String username, @Query(P) String password, @Query(DB) String db, + @Query(value = Q, encoded = true) String query, @Query(value = PARAMS, encoded = true) String params); + @GET("/query") public Call query(@Query(U) String username, @Query(P) String password, @Query(value = Q, encoded = true) String query); @@ -68,4 +78,10 @@ public Call postQuery(@Query(U) String username, public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, @Query(value = Q, encoded = true) String query, @Query(CHUNK_SIZE) int chunkSize); + + @Streaming + @POST("/query?chunked=true") + public Call query(@Query(U) String username, + @Query(P) String password, @Query(DB) String db, @Query(value = Q, encoded = true) String query, + @Query(CHUNK_SIZE) int chunkSize, @Query(value = PARAMS, encoded = true) String params); } diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java new file mode 100644 index 000000000..f9f8500b7 --- /dev/null +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -0,0 +1,93 @@ +package org.influxdb.dto; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +import org.junit.Assert; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import com.squareup.moshi.JsonReader; + +import okio.Buffer; + + +/** + * Test for the BoundParameterQuery DTO. + */ +@RunWith(JUnitPlatform.class) +public class BoundParameterQueryTest { + + @Test + public void testSingleCharacterPlaceHolderParsing() throws IOException { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $a AND b < $b", "foobar", 0, 10); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("a"), 0.0); + Assert.assertEquals(params.get("b"), 10.0); + } + + @Test + public void testPlaceHolderParsing() throws IOException { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd", "foobar", 0, 10); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("abc"), 0.0); + Assert.assertEquals(params.get("bcd"), 10.0); + } + + @Test + public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $", "foobar"); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + + query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc$cde", "foobar"); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + } + + @Test + public void testUnbalancedQuery() throws UnsupportedEncodingException { + // too many placeholders + try { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd", "foobar", 0); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected + } + + // too many parameters + try { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc", "foobar", 0, 10); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected + } + } + + private Map readObject(String json) throws IOException { + Buffer source = new Buffer(); + source.writeString(json, Charset.forName("utf-8")); + Map params = new HashMap<>(); + JsonReader reader = JsonReader.of(source); + reader.beginObject(); + while(reader.hasNext()) { + String name = reader.nextName(); + Object value = reader.readJsonValue(); + params.put(name, value); + } + reader.endObject(); + return params; + } + + private static String decode(String str) throws UnsupportedEncodingException { + return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); + } +} From 658fa1d3dec6657961c3743e2438a4b1dfbda18e Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Thu, 15 Mar 2018 19:25:02 +0100 Subject: [PATCH 125/148] Added another test for BoundParameterQuery --- .../org/influxdb/dto/BoundParameterQueryTest.java | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index f9f8500b7..5ef16e2c9 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -42,11 +42,20 @@ public void testPlaceHolderParsing() throws IOException { Assert.assertEquals(params.get("bcd"), 10.0); } + @Test + public void testPlaceHolderParsingWithLimitClause() throws IOException { + BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10", "foobar", 0, 10); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("abc"), 0.0); + Assert.assertEquals(params.get("bcd"), 10.0); + } + @Test public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $", "foobar"); Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - + query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc$cde", "foobar"); Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); } @@ -71,7 +80,7 @@ public void testUnbalancedQuery() throws UnsupportedEncodingException { // expected } } - + private Map readObject(String json) throws IOException { Buffer source = new Buffer(); source.writeString(json, Charset.forName("utf-8")); @@ -86,7 +95,7 @@ private Map readObject(String json) throws IOException { reader.endObject(); return params; } - + private static String decode(String str) throws UnsupportedEncodingException { return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); } From d71f316612250d7af16596f1d3b87a4136353eeb Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Fri, 16 Mar 2018 18:40:21 +0100 Subject: [PATCH 126/148] Implemented Builder mechanism for BoundParameterQuery This Builder avoid very long constructor calls and also makes it more obvious, which value is bound to which placeholder. --- .../org/influxdb/dto/BoundParameterQuery.java | 68 ++++++--- .../influxdb/dto/BoundParameterQueryTest.java | 129 ++++++++++-------- 2 files changed, 122 insertions(+), 75 deletions(-) diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java index cedf8b8a7..117a0e123 100644 --- a/src/main/java/org/influxdb/dto/BoundParameterQuery.java +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -1,5 +1,6 @@ package org.influxdb.dto; +import com.squareup.moshi.JsonWriter; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; @@ -9,25 +10,26 @@ import java.util.Map.Entry; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import com.squareup.moshi.JsonWriter; - import okio.Buffer; public class BoundParameterQuery extends Query { - private final Object[] params; + private final Map params = new HashMap<>(); - public BoundParameterQuery(final String command, final String database, final Object...params) { + private BoundParameterQuery(final String command, final String database) { super(command, database, true); - this.params = params; + } + + public BoundParameterQuery bind(String placeholder, Object value) { + params.put(placeholder, value); + return this; } public String getParameterJsonWithUrlEncoded() { try { List placeholders = parsePlaceHolders(getCommand()); - Map parameterMap = createParameterMap(placeholders, params); - String jsonParameterObject = createJsonObject(parameterMap); + assurePlaceholdersAreBound(placeholders, params); + String jsonParameterObject = createJsonObject(params); String urlEncodedJsonParameterObject = encode(jsonParameterObject); return urlEncodedJsonParameterObject; } catch (IOException e) { @@ -35,6 +37,18 @@ public String getParameterJsonWithUrlEncoded() { } } + private void assurePlaceholdersAreBound(List placeholders, Map params) { + if (placeholders.size() != params.size()) { + throw new RuntimeException("Unbalanced amount of placeholders and parameters"); + } + + for (String placeholder : placeholders) { + if (params.get(placeholder) == null) { + throw new RuntimeException("Placeholder $" + placeholder + " is not bound"); + } + } + } + private String createJsonObject(final Map parameterMap) throws IOException { Buffer b = new Buffer(); JsonWriter writer = JsonWriter.of(b); @@ -56,19 +70,6 @@ private String createJsonObject(final Map parameterMap) throws I return b.readString(Charset.forName("utf-8")); } - private Map createParameterMap(final List placeholders, final Object[] params) { - if (placeholders.size() != params.length) { - throw new RuntimeException("Unbalanced amount of placeholders and parameters"); - } - - Map parameterMap = new HashMap<>(); - int index = 0; - for (String placeholder : placeholders) { - parameterMap.put(placeholder, params[index++]); - } - return parameterMap; - } - private List parsePlaceHolders(final String command) { List placeHolders = new ArrayList<>(); Pattern p = Pattern.compile("\\s+\\$(\\w+?)(?:\\s|$)"); @@ -78,4 +79,29 @@ private List parsePlaceHolders(final String command) { } return placeHolders; } + + public static class QueryBuilder { + private BoundParameterQuery query; + private String influxQL; + + public static QueryBuilder newQuery(String influxQL) { + QueryBuilder instance = new QueryBuilder(); + instance.influxQL = influxQL; + return instance; + } + + public QueryBuilder forDatabase(String database) { + query = new BoundParameterQuery(influxQL, database); + return this; + } + + public QueryBuilder bind(String placeholder, Object value) { + query.params.put(placeholder, value); + return this; + } + + public BoundParameterQuery create() { + return query; + } + } } diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index 5ef16e2c9..fd71e3bab 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -8,6 +8,7 @@ import java.util.HashMap; import java.util.Map; +import org.influxdb.dto.BoundParameterQuery.QueryBuilder; import org.junit.Assert; import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; @@ -17,86 +18,106 @@ import okio.Buffer; - /** * Test for the BoundParameterQuery DTO. */ @RunWith(JUnitPlatform.class) public class BoundParameterQueryTest { - @Test - public void testSingleCharacterPlaceHolderParsing() throws IOException { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $a AND b < $b", "foobar", 0, 10); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("a"), 0.0); - Assert.assertEquals(params.get("b"), 10.0); - } + @Test + public void testSingleCharacterPlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $a AND b < $b") + .forDatabase("foobar") + .bind("a", 0) + .bind("b", 10) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("a"), 0.0); + Assert.assertEquals(params.get("b"), 10.0); + } - @Test - public void testPlaceHolderParsing() throws IOException { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd", "foobar", 0, 10); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); + @Test + public void testPlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); Assert.assertEquals(params.get("abc"), 0.0); Assert.assertEquals(params.get("bcd"), 10.0); - } + } - @Test + @Test public void testPlaceHolderParsingWithLimitClause() throws IOException { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10", "foobar", 0, 10); + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); Assert.assertEquals(2, params.size()); Assert.assertEquals(params.get("abc"), 0.0); Assert.assertEquals(params.get("bcd"), 10.0); } - @Test - public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $", "foobar"); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + @Test + public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $") + .forDatabase("foobar") + .create(); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc$cde", "foobar"); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - } + query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc$cde").forDatabase("foobar").create(); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + } - @Test - public void testUnbalancedQuery() throws UnsupportedEncodingException { - // too many placeholders - try { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd", "foobar", 0); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } + @Test + public void testUnbalancedQuery() throws UnsupportedEncodingException { + // too many placeholders + try { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") + .forDatabase("foobar") + .bind("abc", 0) + .create(); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected + } - // too many parameters - try { - BoundParameterQuery query = new BoundParameterQuery("SELECT * FROM abc WHERE a > $abc", "foobar", 0, 10); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } - } + // too many parameters + try { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected + } + } - private Map readObject(String json) throws IOException { - Buffer source = new Buffer(); - source.writeString(json, Charset.forName("utf-8")); - Map params = new HashMap<>(); - JsonReader reader = JsonReader.of(source); - reader.beginObject(); - while(reader.hasNext()) { + private Map readObject(String json) throws IOException { + Buffer source = new Buffer(); + source.writeString(json, Charset.forName("utf-8")); + Map params = new HashMap<>(); + JsonReader reader = JsonReader.of(source); + reader.beginObject(); + while (reader.hasNext()) { String name = reader.nextName(); Object value = reader.readJsonValue(); params.put(name, value); } - reader.endObject(); - return params; - } + reader.endObject(); + return params; + } - private static String decode(String str) throws UnsupportedEncodingException { + private static String decode(String str) throws UnsupportedEncodingException { return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); } } From 929cfdeb8a9242adec3a3b5162d203d91b993930 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 14:09:44 +0100 Subject: [PATCH 127/148] Removed unnecessary method Removed bind method from BoundParameterQuery, because this functionality is now implemented in the QueryBuilder --- src/main/java/org/influxdb/dto/BoundParameterQuery.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java index 117a0e123..a1344f2ca 100644 --- a/src/main/java/org/influxdb/dto/BoundParameterQuery.java +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -20,11 +20,6 @@ private BoundParameterQuery(final String command, final String database) { super(command, database, true); } - public BoundParameterQuery bind(String placeholder, Object value) { - params.put(placeholder, value); - return this; - } - public String getParameterJsonWithUrlEncoded() { try { List placeholders = parsePlaceHolders(getCommand()); From 7eb0b5a963f4778bb083590386b0ea7d07055bd0 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 14:10:28 +0100 Subject: [PATCH 128/148] Added test for the different parameter types --- .../influxdb/dto/BoundParameterQueryTest.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index fd71e3bab..68f29be4c 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -63,6 +63,24 @@ public void testPlaceHolderParsingWithLimitClause() throws IOException { Assert.assertEquals(params.get("bcd"), 10.0); } + @Test + public void testDifferentTypePlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE number > $number" + + " AND bool = $bool AND string = $string AND other = $object") + .forDatabase("foobar") + .bind("number", 0) + .bind("bool", true) + .bind("string", "test") + .bind("object", new Object()) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(4, params.size()); + Assert.assertEquals(params.get("number"), 0.0); + Assert.assertEquals(params.get("bool"), true); + Assert.assertEquals(params.get("string"), "test"); + Assert.assertTrue(((String)params.get("object")).matches("java.lang.Object@[a-z0-9]+")); + } + @Test public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $") From f03fa37c41b9488e4813260078dda79903896b3a Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 14:39:24 +0100 Subject: [PATCH 129/148] Added equals and hashcode --- .../org/influxdb/dto/BoundParameterQuery.java | 20 ++++++++++++ .../influxdb/dto/BoundParameterQueryTest.java | 32 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java index a1344f2ca..dceb28759 100644 --- a/src/main/java/org/influxdb/dto/BoundParameterQuery.java +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -75,6 +75,26 @@ private List parsePlaceHolders(final String command) { return placeHolders; } + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + params.hashCode(); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!super.equals(obj)) + return false; + BoundParameterQuery other = (BoundParameterQuery) obj; + if (!params.equals(other.params)) + return false; + return true; + } + public static class QueryBuilder { private BoundParameterQuery query; private String influxQL; diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index 68f29be4c..be415d2c5 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -1,5 +1,7 @@ package org.influxdb.dto; +import static org.assertj.core.api.Assertions.assertThat; + import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; @@ -119,6 +121,36 @@ public void testUnbalancedQuery() throws UnsupportedEncodingException { // expected } } + + @Test + public void testEqualsAndHashCode() { + String stringA0 = "SELECT * FROM foobar WHERE a = $a"; + String stringA1 = "SELECT * FROM foobar WHERE a = $a"; + String stringB0 = "SELECT * FROM foobar WHERE b = $b"; + + Query queryA0 = QueryBuilder.newQuery(stringA0) + .forDatabase(stringA0) + .bind("a", 0) + .create(); + Query queryA1 = QueryBuilder.newQuery(stringA1) + .forDatabase(stringA1) + .bind("a", 0) + .create(); + Query queryB0 = QueryBuilder.newQuery(stringB0) + .forDatabase(stringB0) + .bind("b", 10) + .create(); +// Query queryC0 = new Query(stringB0, stringA0); + + assertThat(queryA0).isEqualTo(queryA0); + assertThat(queryA0).isEqualTo(queryA1); + assertThat(queryA0).isNotEqualTo(queryB0); + assertThat(queryA0).isNotEqualTo("foobar"); +// assertThat(queryB0).isNotEqualTo(queryC0); + + assertThat(queryA0.hashCode()).isEqualTo(queryA1.hashCode()); + assertThat(queryA0.hashCode()).isNotEqualTo(queryB0.hashCode()); + } private Map readObject(String json) throws IOException { Buffer source = new Buffer(); From 966d781714488a6984b435e5b9a30fdcaeda6712 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 14:43:02 +0100 Subject: [PATCH 130/148] Code cleanup Fixed checkstyle issues and formatted according to project standard (hopefully) --- .../org/influxdb/dto/BoundParameterQuery.java | 189 ++++++------ .../influxdb/dto/BoundParameterQueryTest.java | 276 +++++++++--------- 2 files changed, 233 insertions(+), 232 deletions(-) diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java index dceb28759..6c14eac6b 100644 --- a/src/main/java/org/influxdb/dto/BoundParameterQuery.java +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -12,111 +12,114 @@ import java.util.regex.Pattern; import okio.Buffer; -public class BoundParameterQuery extends Query { - - private final Map params = new HashMap<>(); - - private BoundParameterQuery(final String command, final String database) { - super(command, database, true); +public final class BoundParameterQuery extends Query { + + private final Map params = new HashMap<>(); + + private BoundParameterQuery(final String command, final String database) { + super(command, database, true); + } + + public String getParameterJsonWithUrlEncoded() { + try { + List placeholders = parsePlaceHolders(getCommand()); + assurePlaceholdersAreBound(placeholders, params); + String jsonParameterObject = createJsonObject(params); + String urlEncodedJsonParameterObject = encode(jsonParameterObject); + return urlEncodedJsonParameterObject; + } catch (IOException e) { + throw new RuntimeException("Couldn't create parameter JSON object", e); } + } - public String getParameterJsonWithUrlEncoded() { - try { - List placeholders = parsePlaceHolders(getCommand()); - assurePlaceholdersAreBound(placeholders, params); - String jsonParameterObject = createJsonObject(params); - String urlEncodedJsonParameterObject = encode(jsonParameterObject); - return urlEncodedJsonParameterObject; - } catch (IOException e) { - throw new RuntimeException("Couldn't create parameter JSON object", e); - } + private void assurePlaceholdersAreBound(final List placeholders, final Map params) { + if (placeholders.size() != params.size()) { + throw new RuntimeException("Unbalanced amount of placeholders and parameters"); } - private void assurePlaceholdersAreBound(List placeholders, Map params) { - if (placeholders.size() != params.size()) { - throw new RuntimeException("Unbalanced amount of placeholders and parameters"); - } - - for (String placeholder : placeholders) { - if (params.get(placeholder) == null) { - throw new RuntimeException("Placeholder $" + placeholder + " is not bound"); - } - } + for (String placeholder : placeholders) { + if (params.get(placeholder) == null) { + throw new RuntimeException("Placeholder $" + placeholder + " is not bound"); + } } - - private String createJsonObject(final Map parameterMap) throws IOException { - Buffer b = new Buffer(); - JsonWriter writer = JsonWriter.of(b); - writer.beginObject(); - for (Entry pair : parameterMap.entrySet()) { - String name = pair.getKey(); - Object value = pair.getValue(); - if (value instanceof Number) { - writer.name(name).value((Number) value); - } else if (value instanceof String) { - writer.name(name).value((String) value); - } else if (value instanceof Boolean) { - writer.name(name).value((Boolean) value); - } else { - writer.name(name).value(value.toString()); - } - } - writer.endObject(); - return b.readString(Charset.forName("utf-8")); + } + + private String createJsonObject(final Map parameterMap) throws IOException { + Buffer b = new Buffer(); + JsonWriter writer = JsonWriter.of(b); + writer.beginObject(); + for (Entry pair : parameterMap.entrySet()) { + String name = pair.getKey(); + Object value = pair.getValue(); + if (value instanceof Number) { + writer.name(name).value((Number) value); + } else if (value instanceof String) { + writer.name(name).value((String) value); + } else if (value instanceof Boolean) { + writer.name(name).value((Boolean) value); + } else { + writer.name(name).value(value.toString()); + } + } + writer.endObject(); + return b.readString(Charset.forName("utf-8")); + } + + private List parsePlaceHolders(final String command) { + List placeHolders = new ArrayList<>(); + Pattern p = Pattern.compile("\\s+\\$(\\w+?)(?:\\s|$)"); + Matcher m = p.matcher(getCommand()); + while (m.find()) { + placeHolders.add(m.group(1)); } + return placeHolders; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + params.hashCode(); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + BoundParameterQuery other = (BoundParameterQuery) obj; + if (!params.equals(other.params)) { + return false; + } + return true; + } + + public static class QueryBuilder { + private BoundParameterQuery query; + private String influxQL; - private List parsePlaceHolders(final String command) { - List placeHolders = new ArrayList<>(); - Pattern p = Pattern.compile("\\s+\\$(\\w+?)(?:\\s|$)"); - Matcher m = p.matcher(getCommand()); - while (m.find()) { - placeHolders.add(m.group(1)); - } - return placeHolders; + public static QueryBuilder newQuery(final String influxQL) { + QueryBuilder instance = new QueryBuilder(); + instance.influxQL = influxQL; + return instance; } - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + params.hashCode(); - return result; + public QueryBuilder forDatabase(final String database) { + query = new BoundParameterQuery(influxQL, database); + return this; } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - BoundParameterQuery other = (BoundParameterQuery) obj; - if (!params.equals(other.params)) - return false; - return true; + public QueryBuilder bind(final String placeholder, final Object value) { + query.params.put(placeholder, value); + return this; } - public static class QueryBuilder { - private BoundParameterQuery query; - private String influxQL; - - public static QueryBuilder newQuery(String influxQL) { - QueryBuilder instance = new QueryBuilder(); - instance.influxQL = influxQL; - return instance; - } - - public QueryBuilder forDatabase(String database) { - query = new BoundParameterQuery(influxQL, database); - return this; - } - - public QueryBuilder bind(String placeholder, Object value) { - query.params.put(placeholder, value); - return this; - } - - public BoundParameterQuery create() { - return query; - } + public BoundParameterQuery create() { + return query; } + } } diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index be415d2c5..c25ca1d39 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -26,148 +26,146 @@ @RunWith(JUnitPlatform.class) public class BoundParameterQueryTest { - @Test - public void testSingleCharacterPlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $a AND b < $b") - .forDatabase("foobar") - .bind("a", 0) - .bind("b", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("a"), 0.0); - Assert.assertEquals(params.get("b"), 10.0); + @Test + public void testSingleCharacterPlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $a AND b < $b") + .forDatabase("foobar") + .bind("a", 0) + .bind("b", 10) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("a"), 0.0); + Assert.assertEquals(params.get("b"), 10.0); + } + + @Test + public void testPlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("abc"), 0.0); + Assert.assertEquals(params.get("bcd"), 10.0); + } + + @Test + public void testPlaceHolderParsingWithLimitClause() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(2, params.size()); + Assert.assertEquals(params.get("abc"), 0.0); + Assert.assertEquals(params.get("bcd"), 10.0); + } + + @Test + public void testDifferentTypePlaceHolderParsing() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE number > $number" + + " AND bool = $bool AND string = $string AND other = $object") + .forDatabase("foobar") + .bind("number", 0) + .bind("bool", true) + .bind("string", "test") + .bind("object", new Object()) + .create(); + Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(4, params.size()); + Assert.assertEquals(params.get("number"), 0.0); + Assert.assertEquals(params.get("bool"), true); + Assert.assertEquals(params.get("string"), "test"); + Assert.assertTrue(((String)params.get("object")).matches("java.lang.Object@[a-z0-9]+")); + } + + @Test + public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $") + .forDatabase("foobar") + .create(); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + + query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc$cde").forDatabase("foobar").create(); + Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + } + + @Test + public void testUnbalancedQuery() throws UnsupportedEncodingException { + // too many placeholders + try { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") + .forDatabase("foobar") + .bind("abc", 0) + .create(); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected } - @Test - public void testPlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("abc"), 0.0); - Assert.assertEquals(params.get("bcd"), 10.0); + // too many parameters + try { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") + .forDatabase("foobar") + .bind("abc", 0) + .bind("bcd", 10) + .create(); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); + } catch (RuntimeException rte) { + // expected } - - @Test - public void testPlaceHolderParsingWithLimitClause() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("abc"), 0.0); - Assert.assertEquals(params.get("bcd"), 10.0); - } - - @Test - public void testDifferentTypePlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE number > $number" - + " AND bool = $bool AND string = $string AND other = $object") - .forDatabase("foobar") - .bind("number", 0) - .bind("bool", true) - .bind("string", "test") - .bind("object", new Object()) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(4, params.size()); - Assert.assertEquals(params.get("number"), 0.0); - Assert.assertEquals(params.get("bool"), true); - Assert.assertEquals(params.get("string"), "test"); - Assert.assertTrue(((String)params.get("object")).matches("java.lang.Object@[a-z0-9]+")); - } - - @Test - public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $") - .forDatabase("foobar") - .create(); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - - query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc$cde").forDatabase("foobar").create(); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); + } + + @Test + public void testEqualsAndHashCode() { + String stringA0 = "SELECT * FROM foobar WHERE a = $a"; + String stringA1 = "SELECT * FROM foobar WHERE a = $a"; + String stringB0 = "SELECT * FROM foobar WHERE b = $b"; + + Query queryA0 = QueryBuilder.newQuery(stringA0) + .forDatabase(stringA0) + .bind("a", 0) + .create(); + Query queryA1 = QueryBuilder.newQuery(stringA1) + .forDatabase(stringA1) + .bind("a", 0) + .create(); + Query queryB0 = QueryBuilder.newQuery(stringB0) + .forDatabase(stringB0) + .bind("b", 10) + .create(); + + assertThat(queryA0).isEqualTo(queryA0); + assertThat(queryA0).isEqualTo(queryA1); + assertThat(queryA0).isNotEqualTo(queryB0); + assertThat(queryA0).isNotEqualTo("foobar"); + + assertThat(queryA0.hashCode()).isEqualTo(queryA1.hashCode()); + assertThat(queryA0.hashCode()).isNotEqualTo(queryB0.hashCode()); + } + + private Map readObject(String json) throws IOException { + Buffer source = new Buffer(); + source.writeString(json, Charset.forName("utf-8")); + Map params = new HashMap<>(); + JsonReader reader = JsonReader.of(source); + reader.beginObject(); + while (reader.hasNext()) { + String name = reader.nextName(); + Object value = reader.readJsonValue(); + params.put(name, value); } + reader.endObject(); + return params; + } - @Test - public void testUnbalancedQuery() throws UnsupportedEncodingException { - // too many placeholders - try { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") - .forDatabase("foobar") - .bind("abc", 0) - .create(); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } - - // too many parameters - try { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } - } - - @Test - public void testEqualsAndHashCode() { - String stringA0 = "SELECT * FROM foobar WHERE a = $a"; - String stringA1 = "SELECT * FROM foobar WHERE a = $a"; - String stringB0 = "SELECT * FROM foobar WHERE b = $b"; - - Query queryA0 = QueryBuilder.newQuery(stringA0) - .forDatabase(stringA0) - .bind("a", 0) - .create(); - Query queryA1 = QueryBuilder.newQuery(stringA1) - .forDatabase(stringA1) - .bind("a", 0) - .create(); - Query queryB0 = QueryBuilder.newQuery(stringB0) - .forDatabase(stringB0) - .bind("b", 10) - .create(); -// Query queryC0 = new Query(stringB0, stringA0); - - assertThat(queryA0).isEqualTo(queryA0); - assertThat(queryA0).isEqualTo(queryA1); - assertThat(queryA0).isNotEqualTo(queryB0); - assertThat(queryA0).isNotEqualTo("foobar"); -// assertThat(queryB0).isNotEqualTo(queryC0); - - assertThat(queryA0.hashCode()).isEqualTo(queryA1.hashCode()); - assertThat(queryA0.hashCode()).isNotEqualTo(queryB0.hashCode()); - } - - private Map readObject(String json) throws IOException { - Buffer source = new Buffer(); - source.writeString(json, Charset.forName("utf-8")); - Map params = new HashMap<>(); - JsonReader reader = JsonReader.of(source); - reader.beginObject(); - while (reader.hasNext()) { - String name = reader.nextName(); - Object value = reader.readJsonValue(); - params.put(name, value); - } - reader.endObject(); - return params; - } - - private static String decode(String str) throws UnsupportedEncodingException { - return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); - } + private static String decode(String str) throws UnsupportedEncodingException { + return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); + } } From be08bcdbc9951d3441e8e7f06e799c4915c111f9 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 15:24:16 +0100 Subject: [PATCH 131/148] Added test for BoundParameterQuery Added a test which executes a BoundParameterQuery against an actual database. --- src/test/java/org/influxdb/InfluxDBTest.java | 31 ++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index c9b1eee21..85c4ebd3a 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -2,10 +2,12 @@ import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.BoundParameterQuery.QueryBuilder; import org.influxdb.dto.Point; import org.influxdb.dto.Pong; import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; +import org.influxdb.dto.QueryResult.Series; import org.influxdb.impl.InfluxDBImpl; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; @@ -89,6 +91,35 @@ public void testQuery() { this.influxDB.query(new Query("DROP DATABASE mydb2", "mydb")); } + @Test + public void testBoundParameterQuery() { + // set up + Point point = Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build(); + this.influxDB.setDatabase(UDP_DATABASE); + this.influxDB.write(point); + + // test + Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE atag = $atag") + .forDatabase(UDP_DATABASE) + .bind("atag", "test") + .create(); + QueryResult result = this.influxDB.query(query); + Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); + Series series = result.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(series.getValues().size() == 1); + + result = this.influxDB.query(query, TimeUnit.SECONDS); + Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); + series = result.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(series.getValues().size() == 1); + } + /** * Tests for callback query. */ From 58cd9eb26f2d941d75c224c30fd43527bad47f33 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sat, 17 Mar 2018 16:55:17 +0100 Subject: [PATCH 132/148] Improved test coverage for BoundParameterQuery --- src/test/java/org/influxdb/InfluxDBTest.java | 16 +++++++++++++++- .../influxdb/dto/BoundParameterQueryTest.java | 17 +++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index 85c4ebd3a..7bd817e85 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -92,7 +92,7 @@ public void testQuery() { } @Test - public void testBoundParameterQuery() { + public void testBoundParameterQuery() throws InterruptedException { // set up Point point = Point .measurement("cpu") @@ -118,6 +118,20 @@ public void testBoundParameterQuery() { Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); series = result.getResults().get(0).getSeries().get(0); Assertions.assertTrue(series.getValues().size() == 1); + + Object waitForTestresults = new Object(); + Consumer check = (queryResult) -> { + Assertions.assertTrue(queryResult.getResults().get(0).getSeries().size() == 1); + Series s = queryResult.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(s.getValues().size() == 1); + synchronized (waitForTestresults) { + waitForTestresults.notifyAll(); + } + }; + this.influxDB.query(query, 10, check); + synchronized (waitForTestresults) { + waitForTestresults.wait(2000); + } } /** diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index c25ca1d39..a5f6b9729 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -120,6 +120,18 @@ public void testUnbalancedQuery() throws UnsupportedEncodingException { } catch (RuntimeException rte) { // expected } + + // unbound placeholder + try { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") + .forDatabase("foobar") + .bind("bcd", 10) + .create(); + query.getParameterJsonWithUrlEncoded(); + Assert.fail("Expected RuntimeException because of unbound placeholder"); + } catch (RuntimeException rte) { + // expected + } } @Test @@ -136,6 +148,10 @@ public void testEqualsAndHashCode() { .forDatabase(stringA1) .bind("a", 0) .create(); + Query queryA2 = QueryBuilder.newQuery(stringA1) + .forDatabase(stringA1) + .bind("a", 10) + .create(); Query queryB0 = QueryBuilder.newQuery(stringB0) .forDatabase(stringB0) .bind("b", 10) @@ -143,6 +159,7 @@ public void testEqualsAndHashCode() { assertThat(queryA0).isEqualTo(queryA0); assertThat(queryA0).isEqualTo(queryA1); + assertThat(queryA0).isNotEqualTo(queryA2); assertThat(queryA0).isNotEqualTo(queryB0); assertThat(queryA0).isNotEqualTo("foobar"); From f761e6139ac5d84b3f294acd90070fea513a9cb6 Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Sun, 18 Mar 2018 14:32:50 +0100 Subject: [PATCH 133/148] Removed unnecessary placeholder checks The checks in BoundParameterQuery have been removed, because InfluxDB does that already and returns appropriate error messages in case of an invalid request. --- .../org/influxdb/dto/BoundParameterQuery.java | 38 +---- .../influxdb/dto/BoundParameterQueryTest.java | 145 +++--------------- 2 files changed, 31 insertions(+), 152 deletions(-) diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java index 6c14eac6b..a70b9524b 100644 --- a/src/main/java/org/influxdb/dto/BoundParameterQuery.java +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -3,13 +3,12 @@ import com.squareup.moshi.JsonWriter; import java.io.IOException; import java.nio.charset.Charset; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.regex.Matcher; -import java.util.regex.Pattern; + +import org.influxdb.InfluxDBIOException; + import okio.Buffer; public final class BoundParameterQuery extends Query { @@ -22,25 +21,11 @@ private BoundParameterQuery(final String command, final String database) { public String getParameterJsonWithUrlEncoded() { try { - List placeholders = parsePlaceHolders(getCommand()); - assurePlaceholdersAreBound(placeholders, params); String jsonParameterObject = createJsonObject(params); String urlEncodedJsonParameterObject = encode(jsonParameterObject); return urlEncodedJsonParameterObject; } catch (IOException e) { - throw new RuntimeException("Couldn't create parameter JSON object", e); - } - } - - private void assurePlaceholdersAreBound(final List placeholders, final Map params) { - if (placeholders.size() != params.size()) { - throw new RuntimeException("Unbalanced amount of placeholders and parameters"); - } - - for (String placeholder : placeholders) { - if (params.get(placeholder) == null) { - throw new RuntimeException("Placeholder $" + placeholder + " is not bound"); - } + throw new InfluxDBIOException(e); } } @@ -52,29 +37,20 @@ private String createJsonObject(final Map parameterMap) throws I String name = pair.getKey(); Object value = pair.getValue(); if (value instanceof Number) { - writer.name(name).value((Number) value); + Number number = (Number) value; + writer.name(name).value(number); } else if (value instanceof String) { writer.name(name).value((String) value); } else if (value instanceof Boolean) { writer.name(name).value((Boolean) value); } else { - writer.name(name).value(value.toString()); + writer.name(name).value(String.valueOf(value)); } } writer.endObject(); return b.readString(Charset.forName("utf-8")); } - private List parsePlaceHolders(final String command) { - List placeHolders = new ArrayList<>(); - Pattern p = Pattern.compile("\\s+\\$(\\w+?)(?:\\s|$)"); - Matcher m = p.matcher(getCommand()); - while (m.find()) { - placeHolders.add(m.group(1)); - } - return placeHolders; - } - @Override public int hashCode() { final int prime = 31; diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java index a5f6b9729..3ab185272 100644 --- a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -5,10 +5,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; -import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -import java.util.HashMap; -import java.util.Map; import org.influxdb.dto.BoundParameterQuery.QueryBuilder; import org.junit.Assert; @@ -16,9 +13,8 @@ import org.junit.platform.runner.JUnitPlatform; import org.junit.runner.RunWith; -import com.squareup.moshi.JsonReader; - -import okio.Buffer; +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.Moshi; /** * Test for the BoundParameterQuery DTO. @@ -27,111 +23,25 @@ public class BoundParameterQueryTest { @Test - public void testSingleCharacterPlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $a AND b < $b") - .forDatabase("foobar") - .bind("a", 0) - .bind("b", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("a"), 0.0); - Assert.assertEquals(params.get("b"), 10.0); - } - - @Test - public void testPlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("abc"), 0.0); - Assert.assertEquals(params.get("bcd"), 10.0); - } - - @Test - public void testPlaceHolderParsingWithLimitClause() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd LIMIT 10") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(2, params.size()); - Assert.assertEquals(params.get("abc"), 0.0); - Assert.assertEquals(params.get("bcd"), 10.0); - } - - @Test - public void testDifferentTypePlaceHolderParsing() throws IOException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE number > $number" - + " AND bool = $bool AND string = $string AND other = $object") + public void testGetParameterJsonWithUrlEncoded() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE integer > $i" + + "AND double = $d AND bool = $bool AND string = $string AND other = $object") .forDatabase("foobar") - .bind("number", 0) + .bind("i", 0) + .bind("d", 1.0) .bind("bool", true) .bind("string", "test") .bind("object", new Object()) .create(); - Map params = readObject(decode(query.getParameterJsonWithUrlEncoded())); - Assert.assertEquals(4, params.size()); - Assert.assertEquals(params.get("number"), 0.0); - Assert.assertEquals(params.get("bool"), true); - Assert.assertEquals(params.get("string"), "test"); - Assert.assertTrue(((String)params.get("object")).matches("java.lang.Object@[a-z0-9]+")); - } - - @Test - public void testIgnoreInvalidPlaceholders() throws UnsupportedEncodingException { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $") - .forDatabase("foobar") - .create(); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - - query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc$cde").forDatabase("foobar").create(); - Assert.assertEquals(decode(query.getParameterJsonWithUrlEncoded()), "{}"); - } - - @Test - public void testUnbalancedQuery() throws UnsupportedEncodingException { - // too many placeholders - try { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc AND b < $bcd") - .forDatabase("foobar") - .bind("abc", 0) - .create(); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } - - // too many parameters - try { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") - .forDatabase("foobar") - .bind("abc", 0) - .bind("bcd", 10) - .create(); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbalanced placeholders and parameters"); - } catch (RuntimeException rte) { - // expected - } - - // unbound placeholder - try { - BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE a > $abc") - .forDatabase("foobar") - .bind("bcd", 10) - .create(); - query.getParameterJsonWithUrlEncoded(); - Assert.fail("Expected RuntimeException because of unbound placeholder"); - } catch (RuntimeException rte) { - // expected - } + + Moshi moshi = new Moshi.Builder().build(); + JsonAdapter adapter = moshi.adapter(Point.class); + Point point = adapter.fromJson(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(0, point.i); + Assert.assertEquals(1.0, point.d, 0.0); + Assert.assertEquals(true, point.bool); + Assert.assertEquals("test", point.string); + Assert.assertTrue(point.object.matches("java.lang.Object@[a-z0-9]+")); } @Test @@ -167,22 +77,15 @@ public void testEqualsAndHashCode() { assertThat(queryA0.hashCode()).isNotEqualTo(queryB0.hashCode()); } - private Map readObject(String json) throws IOException { - Buffer source = new Buffer(); - source.writeString(json, Charset.forName("utf-8")); - Map params = new HashMap<>(); - JsonReader reader = JsonReader.of(source); - reader.beginObject(); - while (reader.hasNext()) { - String name = reader.nextName(); - Object value = reader.readJsonValue(); - params.put(name, value); - } - reader.endObject(); - return params; - } - private static String decode(String str) throws UnsupportedEncodingException { return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); } + + private static class Point { + int i; + double d; + String string; + Boolean bool; + String object; + } } From 70f8c1452c168cf9416f5f18f43081179a596405 Mon Sep 17 00:00:00 2001 From: Fernando Machado Date: Tue, 20 Mar 2018 16:38:03 +0100 Subject: [PATCH 134/148] Fixed setAccessible modif. on POJOs and changes are not reversed anymore --- src/main/java/org/influxdb/impl/InfluxDBResultMapper.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java index 2188bcd18..54f34721c 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -203,9 +203,10 @@ void setFieldValue(final T object, final Field field, final Object value) return; } Class fieldType = field.getType(); - boolean oldAccessibleState = field.isAccessible(); try { - field.setAccessible(true); + if (!field.isAccessible()) { + field.setAccessible(true); + } if (fieldValueModified(fieldType, field, object, value) || fieldValueForPrimitivesModified(fieldType, field, object, value) || fieldValueForPrimitiveWrappersModified(fieldType, field, object, value)) { @@ -219,8 +220,6 @@ void setFieldValue(final T object, final Field field, final Object value) + "The correct type is '%s' (current field value: '%s')."; throw new InfluxDBMapperException( String.format(msg, object.getClass().getName(), field.getName(), value.getClass().getName(), value)); - } finally { - field.setAccessible(oldAccessibleState); } } From 2fc928ece4e389efc45fa19eb6dc130773c19bef Mon Sep 17 00:00:00 2001 From: Henrik Niehaus Date: Tue, 20 Mar 2018 19:22:09 +0100 Subject: [PATCH 135/148] Added paragraph for parameter binding in queries in README Added short paragraph in README, which describes the parameter binding for queries. Also added an entry for the parameter binding pull request to the changelog. --- CHANGELOG.md | 6 ++++++ README.md | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cb32802d..ecfbffb17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2.10 [unreleased] + +### Features + +- Support for parameter binding in queries ("prepared statements") [PR #429](https://github.com/influxdata/influxdb-java/pull/429) + ## 2.9 [2018-02-27] ### Features diff --git a/README.md b/README.md index 6a41e89fa..8365fce41 100644 --- a/README.md +++ b/README.md @@ -263,6 +263,22 @@ this.influxDB.query(new Query("SELECT idle FROM cpu", dbName), queryResult -> { }); ``` +#### Query using parameter binding ("prepared statements", version 2.10+ required) + +If your Query is based on user input, it is good practice to use parameter binding to avoid [injection attacks](https://en.wikipedia.org/wiki/SQL_injection). +You can create queries with parameter binding with the help of the QueryBuilder: + +```java +Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE idle > $idle AND system > $system") + .forDatabase(dbName) + .bind("idle", 90) + .bind("system", 5) + .create(); +QueryResult results = influxDB.query(query); +``` + +The values of the bind() calls are bound to the placeholders in the query ($idle, $system). + #### Batch flush interval jittering (version 2.9+ required) When using large number of influxdb-java clients against a single server it may happen that all the clients From 9e16b5cab2a466f7025510e215ee036c2a3f8cba Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 22 Mar 2018 09:56:37 +0100 Subject: [PATCH 136/148] Update retrofit from 2.3.0 -> 2.4.0 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index cbea0c021..6a44302f7 100644 --- a/pom.xml +++ b/pom.xml @@ -248,12 +248,12 @@ com.squareup.retrofit2 retrofit - 2.3.0 + 2.4.0 com.squareup.retrofit2 converter-moshi - 2.3.0 + 2.4.0 From c41b589f67858475fa943287f950dd0e0f9ac4d1 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Thu, 22 Mar 2018 10:53:44 +0100 Subject: [PATCH 137/148] Use java10 instead of java9 because this will superseed it, jacoco update from 0.8.0 -> 0.8.1 because only this works with java10 --- compile-and-test.sh | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/compile-and-test.sh b/compile-and-test.sh index 8b10906d0..99b2ba4d6 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -6,7 +6,7 @@ set -e INFLUXDB_VERSIONS="1.5 1.4 1.3 1.2 1.1" -JAVA_VERSIONS="3-jdk-8-alpine 3-jdk-9-slim" +JAVA_VERSIONS="3-jdk-8-alpine 3-jdk-10-slim" for java_version in ${JAVA_VERSIONS} diff --git a/pom.xml b/pom.xml index 6a44302f7..9232e5968 100644 --- a/pom.xml +++ b/pom.xml @@ -168,7 +168,7 @@ org.jacoco jacoco-maven-plugin - 0.8.0 + 0.8.1 From 8721f99e829cc9ff91f20d335ddbbae6e59abc3b Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Mar 2018 11:31:44 +0100 Subject: [PATCH 138/148] Speed up tracis ci build be creating a matrix of java version and influxdb version --- .travis.yml | 22 +++++++++++-------- compile-and-test.sh | 51 ++++++++++++++++++++------------------------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/.travis.yml b/.travis.yml index 434bf711d..b768d1b22 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,20 @@ -language: java sudo: required -jdk: - - oraclejdk8 - -addons: - apt: - packages: - - oracle-java8-installer # Updates JDK 8 to the latest available. - services: - docker + +env: + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.5 + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.4 + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.3 + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.2 + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.1 + - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.5 + - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.4 + - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.3 + - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.2 + - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.1 + script: ./compile-and-test.sh after_success: diff --git a/compile-and-test.sh b/compile-and-test.sh index 99b2ba4d6..7523f6e8b 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -4,36 +4,31 @@ # set -e -INFLUXDB_VERSIONS="1.5 1.4 1.3 1.2 1.1" +DEFAULT_INFLUXDB_VERSION="1.5" +DEFAULT_MAVEN_JAVA_VERSION="3-jdk-10-slim" -JAVA_VERSIONS="3-jdk-8-alpine 3-jdk-10-slim" +INFLUXDB_VERSION="${INFLUXDB_VERSION:-$DEFAULT_INFLUXDB_VERSION}" +MAVEN_JAVA_VERSION="${MAVEN_JAVA_VERSION:-$DEFAULT_MAVEN_JAVA_VERSION}" +echo "Run tests with maven:${MAVEN_JAVA_VERSION} on onfluxdb-${INFLUXDB_VERSION}" -for java_version in ${JAVA_VERSIONS} -do - echo "Run tests with maven:${java_version}" -for version in ${INFLUXDB_VERSIONS} -do - echo "Tesing againts influxdb ${version}" - docker kill influxdb || true - docker rm influxdb || true - docker pull influxdb:${version}-alpine || true - docker run \ - --detach \ - --name influxdb \ - --publish 8086:8086 \ - --publish 8089:8089/udp \ - --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ - influxdb:${version}-alpine +docker kill influxdb || true +docker rm influxdb || true +docker pull influxdb:${version}-alpine || true +docker run \ + --detach \ + --name influxdb \ + --publish 8086:8086 \ + --publish 8089:8089/udp \ + --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ + influxdb:${INFLUXDB_VERSION}-alpine - docker run -it --rm \ - --volume $PWD:/usr/src/mymaven \ - --volume $PWD/.m2:/root/.m2 \ - --workdir /usr/src/mymaven \ - --link=influxdb \ - --env INFLUXDB_IP=influxdb \ - maven:${java_version} mvn clean install +docker run -it --rm \ + --volume $PWD:/usr/src/mymaven \ + --volume $PWD/.m2:/root/.m2 \ + --workdir /usr/src/mymaven \ + --link=influxdb \ + --env INFLUXDB_IP=influxdb \ + maven:${MAVEN_JAVA_VERSION} mvn clean install - docker kill influxdb || true -done -done +docker kill influxdb || true From 23776a8866dafc4361e23925fe984f1a58b806c2 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Mar 2018 11:49:10 +0100 Subject: [PATCH 139/148] Further speed up, only test the most recent influx with the old java8 --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index b768d1b22..2117c828c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,19 +1,19 @@ +language: java + sudo: required services: - docker +# We test against all influxdb versions with the most actual JDK. +# Test only the most recent influxdb version with JDK8 which will be legacy soon. env: - - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.5 - - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.4 - - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.3 - - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.2 - - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.1 - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.5 - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.4 - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.3 - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.2 - MAVEN_JAVA_VERSION=3-jdk-10-slim INFLUXDB_VERSION=1.1 + - MAVEN_JAVA_VERSION=3-jdk-8-slim INFLUXDB_VERSION=1.5 script: ./compile-and-test.sh From 6953795cbe2a56762e55482fbb9d5b51c4a23d37 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Mar 2018 11:49:45 +0100 Subject: [PATCH 140/148] Reduce the amount of points written in the performance test by factor 10 and therefor gain even more test speed --- src/test/java/org/influxdb/PerformanceTests.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index 1fd0a0444..65055c507 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -171,11 +171,11 @@ protected void check(InvocationOnMock invocation) { } }; - answer.params.put("startTime", System.currentTimeMillis() + 80000); + answer.params.put("startTime", System.currentTimeMillis() + 8000); doAnswer(answer).when(spy).write(any(BatchPoints.class)); spy.createDatabase(dbName); - BatchOptions batchOptions = BatchOptions.DEFAULTS.actions(100000).flushDuration(20000).bufferLimit(3000000).exceptionHandler((points, throwable) -> { + BatchOptions batchOptions = BatchOptions.DEFAULTS.actions(10000).flushDuration(2000).bufferLimit(300000).exceptionHandler((points, throwable) -> { System.out.println("+++++++++++ exceptionHandler +++++++++++"); System.out.println(throwable); System.out.println("++++++++++++++++++++++++++++++++++++++++"); @@ -185,17 +185,17 @@ protected void check(InvocationOnMock invocation) { spy.enableBatch(batchOptions); String rp = TestUtils.defaultRetentionPolicy(spy.version()); - for (long i = 0; i < 400000; i++) { + for (long i = 0; i < 40000; i++) { Point point = Point.measurement("s").time(i, TimeUnit.MILLISECONDS).addField("v", 1.0).build(); spy.write(dbName, rp, point); } System.out.println("sleep"); - Thread.sleep(120000); + Thread.sleep(12000); try { QueryResult result = spy.query(new Query("select count(v) from s", dbName)); double d = Double.parseDouble(result.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString()); - Assertions.assertEquals(400000, d); + Assertions.assertEquals(40000, d); } catch (Exception e) { System.out.println("+++++++++++++++++count() +++++++++++++++++++++"); System.out.println(e); From 39d770774146c21c1d4865ffbd330a7fa5f4d881 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Mar 2018 11:56:31 +0100 Subject: [PATCH 141/148] Do not execute any java commands in the build --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2117c828c..93b2893eb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,3 @@ -language: java - sudo: required services: From a5abe4548f1a4b91319af1476a94e9a2ab79ea02 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Sat, 24 Mar 2018 13:46:51 +0100 Subject: [PATCH 142/148] Update surefire plugin from 2.20.0 -> 2.21.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 9232e5968..4eb8b322c 100644 --- a/pom.xml +++ b/pom.xml @@ -88,7 +88,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.20 + 2.21.0 org.apache.maven.plugins From 997992ec51a7d9ed0bc08ae54021e33394d19eac Mon Sep 17 00:00:00 2001 From: kub Date: Fri, 23 Mar 2018 16:21:10 +0100 Subject: [PATCH 143/148] allow to figure out, whether the Point.Builder has any fields since the build mehtod contains validation for fields emptiness, there should be also way, how to figure out, whether the Builder contains any fields (to prevent the build method from throwing an exception) --- src/main/java/org/influxdb/dto/Point.java | 9 +++++++++ src/test/java/org/influxdb/dto/PointTest.java | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index acac4a82d..97122ca54 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -187,6 +187,15 @@ public Builder time(final long timeToSet, final TimeUnit precisionToSet) { return this; } + /** + * Does this builder contain any fields? + * + * @return true, if the builder contains any fields, false otherwise. + */ + public boolean hasFields() { + return !fields.isEmpty(); + } + /** * Create a new Point. * diff --git a/src/test/java/org/influxdb/dto/PointTest.java b/src/test/java/org/influxdb/dto/PointTest.java index e828fd9cc..33e11f631 100644 --- a/src/test/java/org/influxdb/dto/PointTest.java +++ b/src/test/java/org/influxdb/dto/PointTest.java @@ -330,4 +330,13 @@ public void testUnEquals() throws Exception { // THEN equals returns true assertThat(equals).isEqualTo(false); } + + @Test + public void testBuilderHasFields() { + Point.Builder pointBuilder = Point.measurement("nulltest").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar"); + assertThat(pointBuilder.hasFields()).isFalse(); + + pointBuilder.addField("testfield", 256); + assertThat(pointBuilder.hasFields()).isTrue(); + } } From 06416c309005ff29c5c10fcd8e7d23d083282778 Mon Sep 17 00:00:00 2001 From: Stefan Majer Date: Mon, 9 Apr 2018 13:59:00 +0200 Subject: [PATCH 144/148] Update junit from 5.1.0 -> 5.1.0 --- pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index 4eb8b322c..4120fd6ed 100644 --- a/pom.xml +++ b/pom.xml @@ -218,13 +218,13 @@ org.junit.jupiter junit-jupiter-engine - 5.1.0 + 5.1.1 test org.junit.platform junit-platform-runner - 1.1.0 + 1.1.1 test @@ -242,7 +242,7 @@ org.mockito mockito-core - 2.15.0 + 2.18.0 test From 40c63d4caa8db4e235a45f63c31a9eeb9d5556e5 Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Thu, 26 Apr 2018 16:41:36 +0200 Subject: [PATCH 145/148] changing docker image for tests in main pom --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 4120fd6ed..a348e7b29 100644 --- a/pom.xml +++ b/pom.xml @@ -272,7 +272,7 @@ release - influxdb:latest + influxdb:alpine From 55e159aeb30b6336d6ba46923708ca0b57b3a31b Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Thu, 26 Apr 2018 16:44:31 +0200 Subject: [PATCH 146/148] [maven-release-plugin] prepare release influxdb-java-2.10 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index a348e7b29..e09253a25 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.10-SNAPSHOT + 2.10 influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -28,7 +28,7 @@ scm:git:git@github.com:influxdata/influxdb-java.git scm:git:git@github.com:influxdata/influxdb-java.git git@github.com:influxdata/influxdb-java.git - HEAD + influxdb-java-2.10 From 4a6d3ce3d059d34499d92f362587804b9b51110d Mon Sep 17 00:00:00 2001 From: Tomas Klapka Date: Thu, 26 Apr 2018 16:44:39 +0200 Subject: [PATCH 147/148] [maven-release-plugin] prepare for next development iteration --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index e09253a25..6a7723db4 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ org.influxdb influxdb-java jar - 2.10 + 2.11-SNAPSHOT influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org @@ -28,7 +28,7 @@ scm:git:git@github.com:influxdata/influxdb-java.git scm:git:git@github.com:influxdata/influxdb-java.git git@github.com:influxdata/influxdb-java.git - influxdb-java-2.10 + HEAD From f5634ce5205ec24160c34ba77f42c2139b42471c Mon Sep 17 00:00:00 2001 From: Hoan Xuan Le Date: Wed, 9 May 2018 10:18:16 +0700 Subject: [PATCH 148/148] fix build javadocs failed + fix unit test --- src/main/java/org/influxdb/InfluxDB.java | 22 ++++++++++++++++++++-- src/main/java/org/influxdb/dto/Point.java | 3 +++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index 7ff02165a..ec9b7b32a 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -299,9 +299,18 @@ public void write(final String database, final String retentionPolicy, /** * Write a set of Points to the influxdb database with the string records. * - * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"} + * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use + * @param precision + * the time precision to use * @param records + * the points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final TimeUnit precision, final String records); @@ -326,9 +335,18 @@ public void write(final String database, final String retentionPolicy, /** * Write a set of Points to the influxdb database with the list of string records. * - * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"} + * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use + * @param precision + * the time precision to use * @param records + * the List of points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final TimeUnit precision, final List records); diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java index 67f486762..fd0175c41 100644 --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -423,6 +423,9 @@ private void formatedTime(final StringBuilder sb) { } private StringBuilder formatedTime(final StringBuilder sb, final TimeUnit precision) { + if (this.time == null || this.precision == null) { + return sb; + } sb.append(" ").append(precision.convert(this.time, this.precision)); return sb; }