Skip to content

Commit

Permalink
Allow simple/async sessions to request data as needed
Browse files Browse the repository at this point in the history
  • Loading branch information
Gregory Woods authored and Gregory Woods committed Dec 13, 2019
1 parent bc105be commit d4708c5
Show file tree
Hide file tree
Showing 5 changed files with 233 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,15 @@ public class AutoPullResponseHandler extends BasicPullResponseHandler implements
{
private static final Queue<Record> UNINITIALIZED_RECORDS = Iterables.emptyQueue();
private final long fetchSize;
private final long LOW_RECORD_WATERMARK;
private final long HIGH_RECORD_WATERMARK;

// initialized lazily when first record arrives
private Queue<Record> records = UNINITIALIZED_RECORDS;

private ResultSummary summary;
private Throwable failure;
private boolean isAutoPullEnabled = true;

private CompletableFuture<Record> recordFuture;
private CompletableFuture<ResultSummary> summaryFuture;
Expand All @@ -64,6 +67,19 @@ public AutoPullResponseHandler(Query query, RunResponseHandler runResponseHandle
{
super(query, runResponseHandler, connection, metadataExtractor, completionListener );
this.fetchSize = fetchSize;

//For pull everything ensure conditions for disabling auto pull are never met
if ( fetchSize == UNLIMITED_FETCH_SIZE )
{
this.HIGH_RECORD_WATERMARK = Long.MAX_VALUE;
this.LOW_RECORD_WATERMARK = Long.MAX_VALUE;
}
else
{
this.HIGH_RECORD_WATERMARK = (long) (fetchSize * 0.7);
this.LOW_RECORD_WATERMARK = (long) (fetchSize * 0.3);
}

installRecordAndSummaryConsumers();
}

Expand Down Expand Up @@ -96,7 +112,10 @@ private void installRecordAndSummaryConsumers()

if ( error == null && summary == null ) // has_more
{
request( fetchSize );
if ( isAutoPullEnabled )
{
request( fetchSize );
}
}
} );
}
Expand Down Expand Up @@ -198,11 +217,27 @@ private void enqueueRecord( Record record )
records = new ArrayDeque<>();
}

// too many records in the queue, pause auto request gathering
if ( records.size() > HIGH_RECORD_WATERMARK )
{
isAutoPullEnabled = false;
}

records.add( record );
}

private Record dequeueRecord()
{
if ( records.size() < LOW_RECORD_WATERMARK )
{
//if not in streaming state we need to restart streaming
if ( state() != State.STREAMING_STATE )
{
request( fetchSize );
}
isAutoPullEnabled = true;
}

return records.poll();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import reactor.test.StepVerifier;

import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.function.Consumer;
Expand All @@ -38,16 +39,17 @@
import org.neo4j.driver.GraphDatabase;
import org.neo4j.driver.Logger;
import org.neo4j.driver.Record;
import org.neo4j.driver.Session;
import org.neo4j.driver.Result;
import org.neo4j.driver.Session;
import org.neo4j.driver.Transaction;
import org.neo4j.driver.async.AsyncSession;
import org.neo4j.driver.exceptions.TransientException;
import org.neo4j.driver.internal.cluster.RoutingSettings;
import org.neo4j.driver.internal.retry.RetrySettings;
import org.neo4j.driver.internal.util.Clock;
import org.neo4j.driver.internal.util.io.ChannelTrackingDriverFactory;
import org.neo4j.driver.reactive.RxSession;
import org.neo4j.driver.reactive.RxResult;
import org.neo4j.driver.reactive.RxSession;
import org.neo4j.driver.util.StubServer;

import static java.util.Arrays.asList;
Expand All @@ -72,6 +74,7 @@
import static org.neo4j.driver.util.StubServer.INSECURE_CONFIG;
import static org.neo4j.driver.util.StubServer.insecureBuilder;
import static org.neo4j.driver.util.TestUtil.asOrderedSet;
import static org.neo4j.driver.util.TestUtil.await;

class DirectDriverBoltKitTest
{
Expand Down Expand Up @@ -306,6 +309,53 @@ void shouldChangeFetchSize() throws Exception
}
}

@Test
void shouldOnlyPullRecordsWhenNeededSimpleSession() throws Exception
{
StubServer server = StubServer.start( "streaming_records_v4_buffering.script", 9001 );
try
{
try ( Driver driver = GraphDatabase.driver( "bolt://localhost:9001", INSECURE_CONFIG ) )
{
Session session = driver.session( builder().withFetchSize( 2 ).build() );
Result result = session.run( "MATCH (n) RETURN n.name" );
ArrayList<String> resultList = new ArrayList<>();
result.forEachRemaining( ( rec ) -> resultList.add( rec.get( 0 ).asString() ) );

assertEquals( resultList, asList( "Bob", "Alice", "Tina", "Frank", "Daisy", "Clive" ) );
}
}
finally
{
assertEquals( 0, server.exitStatus() );
}
}

@Test
void shouldOnlyPullRecordsWhenNeededAsyncSession() throws Exception
{
StubServer server = StubServer.start( "streaming_records_v4_buffering.script", 9001 );
try
{
try ( Driver driver = GraphDatabase.driver( "bolt://localhost:9001", INSECURE_CONFIG ) )
{
AsyncSession session = driver.asyncSession( builder().withFetchSize( 2 ).build() );

ArrayList<String> resultList = new ArrayList<>();

await( session.runAsync( "MATCH (n) RETURN n.name" )
.thenCompose( resultCursor ->
resultCursor.forEachAsync( record -> resultList.add( record.get( 0 ).asString() ) ) ) );

assertEquals( resultList, asList( "Bob", "Alice", "Tina", "Frank", "Daisy", "Clive" ) );
}
}
finally
{
assertEquals( 0, server.exitStatus() );
}
}

@Test
void shouldAllowPullAll() throws Exception
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@

import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
Expand All @@ -35,6 +37,7 @@
import org.neo4j.driver.internal.BoltServerAddress;
import org.neo4j.driver.internal.InternalRecord;
import org.neo4j.driver.internal.spi.Connection;
import org.neo4j.driver.internal.value.BooleanValue;
import org.neo4j.driver.summary.ResultSummary;
import org.neo4j.driver.summary.QueryType;

Expand Down Expand Up @@ -679,7 +682,6 @@ void shouldReturnNotTransformedListInListAsync()

assertEquals( expectedRecords, list );
}

protected T newHandler()
{
return newHandler( new Query( "RETURN 1" ) );
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,31 @@
*/
package org.neo4j.driver.internal.handlers.pulln;

import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;

import org.neo4j.driver.Query;
import org.neo4j.driver.Value;
import org.neo4j.driver.internal.handlers.PullAllResponseHandlerTestBase;
import org.neo4j.driver.internal.handlers.PullResponseCompletionListener;
import org.neo4j.driver.internal.handlers.RunResponseHandler;
import org.neo4j.driver.internal.spi.Connection;
import org.neo4j.driver.internal.value.BooleanValue;

import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.neo4j.driver.Values.value;
import static org.neo4j.driver.Values.values;
import static org.neo4j.driver.internal.handlers.pulln.FetchSizeUtil.DEFAULT_FETCH_SIZE;
import static org.neo4j.driver.internal.messaging.v1.BoltProtocolV1.METADATA_EXTRACTOR;

Expand All @@ -46,4 +59,114 @@ protected AutoPullResponseHandler newHandler(Query query, List<String> queryKeys
handler.prePopulateRecords();
return handler;
}

protected AutoPullResponseHandler newHandler(Query query, Connection connection, long fetchSize )
{
RunResponseHandler runResponseHandler = new RunResponseHandler( new CompletableFuture<>(), METADATA_EXTRACTOR );
runResponseHandler.onSuccess( emptyMap() );
AutoPullResponseHandler handler =
new AutoPullResponseHandler(query, runResponseHandler, connection, METADATA_EXTRACTOR, mock( PullResponseCompletionListener.class ),
fetchSize );
handler.prePopulateRecords();
return handler;
}

@Test
void shouldKeepRequestingWhenBetweenRange() {
Connection connection = connectionMock();
InOrder inOrder = Mockito.inOrder( connection );

//highwatermark=2, lowwatermark=1
AutoPullResponseHandler handler = newHandler( new Query("RETURN 1"), connection, 4 );

Map<String,Value> metaData = new HashMap<>( 1 );
metaData.put( "has_more", BooleanValue.TRUE );

inOrder.verify( connection ).writeAndFlush( any(), any() );

handler.onRecord( values( 1 ) );
handler.onRecord( values( 2 ) );
handler.onSuccess( metaData ); //2 in the record queue

//should send another pulln request since maxValue not met
inOrder.verify( connection ).writeAndFlush( any(), any() );
}

@Test
void shouldStopRequestingWhenOverMaxWatermark() {
Connection connection = connectionMock();
InOrder inOrder = Mockito.inOrder( connection );

//highWatermark=2, lowWatermark=1
AutoPullResponseHandler handler = newHandler( new Query("RETURN 1"), connection, 4 );

Map<String,Value> metaData = new HashMap<>( 1 );
metaData.put( "has_more", BooleanValue.TRUE );

inOrder.verify( connection ).writeAndFlush( any(), any() );

handler.onRecord( values( 1 ) );
handler.onRecord( values( 2 ) );
handler.onRecord( values( 3 ) );
handler.onRecord( values( 4 ) );
handler.onSuccess( metaData );

//only initial writeAndFlush()
verify( connection , times( 1 ) ).writeAndFlush( any(),any() );
}

@Test
void shouldRestartRequestingWhenMinimumWatermarkMet() {
Connection connection = connectionMock();
InOrder inOrder = Mockito.inOrder( connection );

//highwatermark=4, lowwatermark=2
AutoPullResponseHandler handler = newHandler( new Query("RETURN 1"), connection, 7);

Map<String,Value> metaData = new HashMap<>( 1 );
metaData.put( "has_more", BooleanValue.TRUE );

inOrder.verify( connection ).writeAndFlush( any(), any() );

handler.onRecord( values( 1 ) );
handler.onRecord( values( 2 ) );
handler.onRecord( values( 3 ) );
handler.onRecord( values( 4 ) );
handler.onRecord( values( 5 ) );
handler.onRecord( values( 6 ) );
handler.onSuccess( metaData );

verify( connection , times( 1 ) ).writeAndFlush( any(),any() );

handler.nextAsync();
handler.nextAsync();
handler.nextAsync();
handler.nextAsync();
handler.nextAsync();
handler.nextAsync();

inOrder.verify( connection ).writeAndFlush( any() , any() );
}

@Test
void shouldKeepRequestingMoreRecordsWhenPullAll() {
Connection connection = connectionMock();
AutoPullResponseHandler handler = newHandler( new Query("RETURN 1"), connection, -1);

Map<String,Value> metaData = new HashMap<>( 1 );
metaData.put( "has_more", BooleanValue.TRUE );

handler.onRecord( values( 1 ) );
handler.onSuccess( metaData );

handler.onRecord( values( 2 ) );
handler.onSuccess( metaData );

handler.onRecord( values( 3 ) );
handler.onSuccess( emptyMap() );

verify( connection , times( 3 ) ).writeAndFlush( any(),any() );
}


}
19 changes: 19 additions & 0 deletions driver/src/test/resources/streaming_records_v4_buffering.script
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
!: BOLT 4
!: AUTO RESET
!: AUTO HELLO
!: AUTO GOODBYE

C: RUN "MATCH (n) RETURN n.name" {} {}
PULL { "n": 2 }
S: SUCCESS {"fields": ["n.name"]}
RECORD ["Bob"]
RECORD ["Alice"]
SUCCESS {"has_more": true}
C: PULL { "n": 2 }
S: RECORD ["Tina"]
RECORD ["Frank"]
SUCCESS {"has_more": true}
C: PULL { "n": 2 }
S: RECORD ["Daisy"]
RECORD ["Clive"]
SUCCESS {}

0 comments on commit d4708c5

Please sign in to comment.