From e70ab84df21bdfe54cea3562ba60513f7cc8b5bf Mon Sep 17 00:00:00 2001 From: Artem Barger Date: Tue, 27 Jun 2017 01:32:39 +0300 Subject: [PATCH] [FAB-4901]: Harden delivery service unit tests. In commit 74cc3b, delivery service was extended with timeout assertion to check that client switched over to backup ordering service, while it was a bit too small so client didn't have enough time to switch to alive orderer endpoint. This commit also takes care to fix the bug of failover waiting loop (missed for statement). Moreover adding a few more assertions to enable spot the problem it case of test failure. Change-Id: Ie33f4e6c0ac3998cc790778784d8cb732a333803 Signed-off-by: Artem Barger Signed-off-by: Gennady Laventman --- .../blocksprovider/blocksprovider.go | 4 +-- .../blocksprovider/blocksprovider_test.go | 2 +- core/deliverservice/deliveryclient_test.go | 27 ++++++++++++++----- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/core/deliverservice/blocksprovider/blocksprovider.go b/core/deliverservice/blocksprovider/blocksprovider.go index 136348f4d74..bea8ca3044f 100644 --- a/core/deliverservice/blocksprovider/blocksprovider.go +++ b/core/deliverservice/blocksprovider/blocksprovider.go @@ -104,7 +104,7 @@ type blocksProviderImpl struct { const wrongStatusThreshold = 10 -var maxRetryDelay = time.Second * 10 +var MaxRetryDelay = time.Second * 10 var logger *logging.Logger // package-level logger @@ -152,7 +152,7 @@ func (b *blocksProviderImpl) DeliverBlocks() { errorStatusCounter = 0 logger.Warningf("[%s] Got error %v", b.chainID, t) } - maxDelay := float64(maxRetryDelay) + maxDelay := float64(MaxRetryDelay) currDelay := float64(time.Duration(math.Pow(2, float64(statusCounter))) * 100 * time.Millisecond) time.Sleep(time.Duration(math.Min(maxDelay, currDelay))) if currDelay < maxDelay { diff --git a/core/deliverservice/blocksprovider/blocksprovider_test.go b/core/deliverservice/blocksprovider/blocksprovider_test.go index 290eb59d636..5e3bbe07616 100644 --- a/core/deliverservice/blocksprovider/blocksprovider_test.go +++ b/core/deliverservice/blocksprovider/blocksprovider_test.go @@ -32,7 +32,7 @@ import ( ) func init() { - maxRetryDelay = time.Second + MaxRetryDelay = time.Second } type mockMCS struct { diff --git a/core/deliverservice/deliveryclient_test.go b/core/deliverservice/deliveryclient_test.go index 6dd34298027..198384ce477 100644 --- a/core/deliverservice/deliveryclient_test.go +++ b/core/deliverservice/deliveryclient_test.go @@ -19,6 +19,7 @@ package deliverclient import ( "context" "errors" + "fmt" "runtime" "sync" "sync/atomic" @@ -250,6 +251,9 @@ func TestDeliverServiceFailover(t *testing.T) { } func TestDeliverServiceServiceUnavailable(t *testing.T) { + orgMaxRetryDelay := blocksprovider.MaxRetryDelay + blocksprovider.MaxRetryDelay = time.Millisecond * 200 + defer func() { blocksprovider.MaxRetryDelay = orgMaxRetryDelay }() defer ensureNoGoroutineLeak(t)() // Scenario: bring up 2 ordering service instances, // Make the instance the client connects to fail after a delivery of a block and send SERVICE_UNAVAILABLE @@ -292,6 +296,10 @@ func TestDeliverServiceServiceUnavailable(t *testing.T) { activeInstance, backupInstance := waitForConnectionToSomeOSN() assert.NotNil(t, activeInstance) assert.NotNil(t, backupInstance) + // Check that delivery client get connected to active + assert.Equal(t, activeInstance.ConnCount(), 1) + // and not connected to backup instances + assert.Equal(t, backupInstance.ConnCount(), 0) // Send first block go activeInstance.SendBlock(li.Height) @@ -306,7 +314,7 @@ func TestDeliverServiceServiceUnavailable(t *testing.T) { // Fail instance delivery client connected to activeInstance.Fail() - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() wg := sync.WaitGroup{} @@ -314,18 +322,22 @@ func TestDeliverServiceServiceUnavailable(t *testing.T) { go func(ctx context.Context) { defer wg.Done() - select { - case <-time.After(time.Millisecond * 100): - if backupInstance.ConnCount() > 0 { + for { + select { + case <-time.After(time.Millisecond * 100): + if backupInstance.ConnCount() > 0 { + return + } + case <-ctx.Done(): return } - case <-ctx.Done(): - return } }(ctx) wg.Wait() assert.NoError(t, ctx.Err(), "Delivery client has not failed over to alive ordering service") + // Check that delivery client was indeed connected + assert.Equal(t, backupInstance.ConnCount(), 1) // Ensure the client asks blocks from the other ordering service node assertBlockDissemination(li.Height, gossipServiceAdapter.GossipBlockDisseminations, t) @@ -452,7 +464,8 @@ func assertBlockDissemination(expectedSeq uint64, ch chan uint64, t *testing.T) case seq := <-ch: assert.Equal(t, expectedSeq, seq) case <-time.After(time.Second * 5): - assert.Fail(t, "Didn't gossip a new block within a timely manner") + assert.FailNow(t, fmt.Sprintf("Didn't gossip a new block with seq num %d within a timely manner", expectedSeq)) + t.Fatal() } }