-
Notifications
You must be signed in to change notification settings - Fork 895
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
GODRIVER-1831 Avoid accidental extra getMore when using limit on Find #562
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -883,6 +883,103 @@ func TestCollection(t *testing.T) { | |
_, ok := err.(mongo.CommandError) | ||
assert.True(mt, ok, "expected error type %v, got %v", mongo.CommandError{}, err) | ||
}) | ||
mt.Run("limit and batch size and skip", func(mt *mtest.T) { | ||
testCases := []struct { | ||
limit int64 | ||
batchSize int32 | ||
skip int64 | ||
name string | ||
}{ | ||
{ | ||
99, 100, 10, | ||
"case 1", | ||
}, | ||
{ | ||
100, 100, 20, | ||
"case 2", | ||
}, | ||
{ | ||
80, 20, 90, | ||
"case 3", | ||
}, | ||
{ | ||
201, 201, 0, | ||
"case 4", | ||
}, | ||
{ | ||
100, 200, 120, | ||
"case 5", | ||
}, | ||
} | ||
for _, tc := range testCases { | ||
mt.Run(tc.name, func(mt *mtest.T) { | ||
var insertDocs []interface{} | ||
for i := 1; i <= 201; i++ { | ||
insertDocs = append(insertDocs, bson.D{{"x", int32(i)}}) | ||
} | ||
|
||
_, err := mt.Coll.InsertMany(mtest.Background, insertDocs) | ||
assert.Nil(mt, err, "InsertMany error for initial data: %v", err) | ||
|
||
findOptions := options.Find().SetLimit(tc.limit).SetBatchSize(tc.batchSize). | ||
SetSkip(tc.skip) | ||
cursor, err := mt.Coll.Find(mtest.Background, bson.D{}, findOptions) | ||
assert.Nil(mt, err, "Find error: %v", err) | ||
|
||
var docs []interface{} | ||
err = cursor.All(mtest.Background, &docs) | ||
assert.Nil(mt, err, "All error: %v", err) | ||
if (201 - tc.skip) < tc.limit { | ||
assert.Equal(mt, int(201-tc.skip), len(docs), "expected number of docs to be %v, got %v", int(201-tc.skip), len(docs)) | ||
} else { | ||
assert.Equal(mt, int(tc.limit), len(docs), "expected number of docs to be %v, got %v", tc.limit, len(docs)) | ||
} | ||
}) | ||
} | ||
}) | ||
mt.Run("unset batch size does not surpass limit", func(mt *mtest.T) { | ||
testCases := []struct { | ||
limit int64 | ||
name string | ||
}{ | ||
{ | ||
99, | ||
"99", | ||
}, | ||
{ | ||
100, | ||
"100", | ||
}, | ||
{ | ||
101, | ||
"101", | ||
}, | ||
{ | ||
200, | ||
"200", | ||
}, | ||
} | ||
for _, tc := range testCases { | ||
mt.Run(tc.name, func(mt *mtest.T) { | ||
var insertDocs []interface{} | ||
for i := 1; i <= 201; i++ { | ||
insertDocs = append(insertDocs, bson.D{{"x", int32(i)}}) | ||
} | ||
|
||
_, err := mt.Coll.InsertMany(mtest.Background, insertDocs) | ||
assert.Nil(mt, err, "InsertMany error for initial data: %v", err) | ||
opts := options.Find().SetSkip(0).SetLimit(tc.limit) | ||
cursor, err := mt.Coll.Find(mtest.Background, bson.D{}, opts) | ||
assert.Nil(mt, err, "Find error with limit %v: %v", tc.limit, err) | ||
|
||
var docs []interface{} | ||
err = cursor.All(mtest.Background, &docs) | ||
assert.Nil(mt, err, "All error with limit %v: %v", tc.limit, err) | ||
|
||
assert.Equal(mt, int(tc.limit), len(docs), "expected number of docs to be %v, got %v", tc.limit, len(docs)) | ||
}) | ||
} | ||
}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Given that this is a bit tricky to reason about, I suggest:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added both! There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks! |
||
}) | ||
mt.RunOpts("find one", noClientOpts, func(mt *mtest.T) { | ||
mt.Run("limit", func(mt *mtest.T) { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -241,7 +241,7 @@ func (bc *BatchCursor) getMore(ctx context.Context) { | |
|
||
// Required for legacy operations which don't support limit. | ||
numToReturn := bc.batchSize | ||
if bc.limit != 0 && bc.numReturned+bc.batchSize > bc.limit { | ||
if bc.limit != 0 && bc.numReturned+bc.batchSize >= bc.limit { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Based on my reading of https://github.com/mongodb/specifications/blob/master/source/crud/crud.rst#combining-limit-and-batch-size-for-the-wire-protocol I believe it is ok to use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure either, but the tests now query a collection containing more than 101 documents and pass... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Great, the new tests cover the case of a getMore being run. |
||
numToReturn = bc.limit - bc.numReturned | ||
if numToReturn <= 0 { | ||
err := bc.Close(ctx) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I suggest adding a name to the test cases and having the loop use
mt.Run
:It will make it easier to identify which test case failed if they are split into cases. Even names as simple as "case 1", "case 2", "case 3", etc. will produce separate output for each test case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah good idea! Used "case 1", etc. since they were difficult to describe succinctly.