Skip to content

Commit

Permalink
testing: Reduce sleep duration
Browse files Browse the repository at this point in the history
We have a number of second-long sleeps that slow down our unit tests by
a cumulative ~12 seconds. We don't actually need to sleep that long -
there's no real server whose response we must honour - so...don't.

Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
  • Loading branch information
stephenfin committed Apr 4, 2024
1 parent c8f8bb0 commit 8f87bf1
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 16 deletions.
27 changes: 16 additions & 11 deletions testing/provider_client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func TestConcurrentReauth(t *testing.T) {
p.SetToken(prereauthTok)
p.ReauthFunc = func(_ context.Context) error {
p.SetThrowaway(true)
time.Sleep(1 * time.Second)
time.Sleep(10 * time.Microsecond)
p.AuthenticatedHeaders()
info.mut.Lock()
info.numreauths++
Expand Down Expand Up @@ -242,7 +242,7 @@ func TestRequestThatCameDuringReauthWaitsUntilItIsCompleted(t *testing.T) {
if info.numreauths == 0 {
info.mut.RUnlock()
close(info.reauthCh)
time.Sleep(1 * time.Second)
time.Sleep(10 * time.Microsecond)
} else {
info.mut.RUnlock()
}
Expand Down Expand Up @@ -459,26 +459,28 @@ func retryBackoffTest(retryCounter *uint, t *testing.T) gophercloud.RetryBackoff

var sleep time.Duration

// Parse delay seconds or HTTP date
// Parse delay seconds or HTTP date. Note that we don't actually _use_
// these exact times in the sleep since that takes too long
if v, err := strconv.ParseUint(retryAfter, 10, 32); err == nil {
sleep = time.Duration(v) * time.Second
sleep = time.Duration(v) * time.Millisecond
} else if v, err := time.Parse(http.TimeFormat, retryAfter); err == nil {
sleep = time.Until(v)
} else {
return e
}

if ctx != nil {
t.Logf("Context sleeping for %d milliseconds", sleep.Milliseconds())
// this can go negative since we're using milliseconds
t.Logf("Context (fake) sleeping for %d milliseconds", sleep.Milliseconds())
select {
case <-time.After(sleep):
case <-time.After(sleep): // we only sleep for 1% of the time to speed things up
t.Log("sleep is over")
case <-ctx.Done():
t.Log(ctx.Err())
return e
}
} else {
t.Logf("Sleeping for %d milliseconds", sleep.Milliseconds())
t.Logf("Fake sleeping for %d milliseconds", sleep.Milliseconds())
time.Sleep(sleep)
t.Log("sleep is over")
}
Expand All @@ -503,7 +505,7 @@ func TestRequestRetry(t *testing.T) {
defer th.TeardownHTTP()

th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Retry-After", "1")
w.Header().Set("Retry-After", "10")

//always reply 429
http.Error(w, "retry later", http.StatusTooManyRequests)
Expand All @@ -530,7 +532,7 @@ func TestRequestRetryHTTPDate(t *testing.T) {
defer th.TeardownHTTP()

th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Retry-After", time.Now().Add(1*time.Second).UTC().Format(http.TimeFormat))
w.Header().Set("Retry-After", time.Now().Add(10*time.Millisecond).UTC().Format(http.TimeFormat))

//always reply 429
http.Error(w, "retry later", http.StatusTooManyRequests)
Expand Down Expand Up @@ -595,12 +597,15 @@ func TestRequestRetrySuccess(t *testing.T) {
th.AssertEquals(t, retryCounter, uint(0))
}

// TestRequestRetryContext tests that we stop retries if our context is
// cancelled before all potential retries have been completed.
func TestRequestRetryContext(t *testing.T) {
var retryCounter uint

ctx, cancel := context.WithCancel(context.Background())
go func() {
sleep := 2.5 * 1000 * time.Millisecond
// we have 3 retries of 10 mS each, so cancel the context before then (25mS < 30mS)
sleep := 2.5 * 10 * time.Millisecond
time.Sleep(sleep)
cancel()
}()
Expand All @@ -616,7 +621,7 @@ func TestRequestRetryContext(t *testing.T) {
defer th.TeardownHTTP()

th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Retry-After", "1")
w.Header().Set("Retry-After", "10")

//always reply 429
http.Error(w, "retry later", http.StatusTooManyRequests)
Expand Down
10 changes: 5 additions & 5 deletions testing/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (
)

func TestWaitFor(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()

err := gophercloud.WaitFor(ctx, func(context.Context) (bool, error) {
Expand All @@ -29,7 +29,7 @@ func TestWaitForTimeout(t *testing.T) {
t.Skip("skipping test in short mode.")
}

ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()

err := gophercloud.WaitFor(ctx, func(context.Context) (bool, error) {
Expand All @@ -43,7 +43,7 @@ func TestWaitForError(t *testing.T) {
t.Skip("skipping test in short mode.")
}

ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()

err := gophercloud.WaitFor(ctx, func(context.Context) (bool, error) {
Expand All @@ -57,7 +57,7 @@ func TestWaitForPredicateExceed(t *testing.T) {
t.Skip("skipping test in short mode.")
}

ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()

err := gophercloud.WaitFor(ctx, func(ctx context.Context) (bool, error) {
Expand All @@ -66,7 +66,7 @@ func TestWaitForPredicateExceed(t *testing.T) {
case <-ctx.Done():
return true, ctx.Err()

case <-time.After(4 * time.Second):
case <-time.After(40 * time.Millisecond):
return false, errors.New("Just wasting time")
}
})
Expand Down

0 comments on commit 8f87bf1

Please sign in to comment.