@@ -167,7 +167,7 @@ def test_try_next_runs_one_getmore(self):
167167 client = rs_or_single_client (event_listeners = [listener ])
168168 # Connect to the cluster.
169169 client .admin .command ("ping" )
170- listener .results . clear ()
170+ listener .reset ()
171171 # ChangeStreams only read majority committed data so use w:majority.
172172 coll = self .watched_collection ().with_options (write_concern = WriteConcern ("majority" ))
173173 coll .drop ()
@@ -177,33 +177,33 @@ def test_try_next_runs_one_getmore(self):
177177 self .addCleanup (coll .drop )
178178 with self .change_stream_with_client (client , max_await_time_ms = 250 ) as stream :
179179 self .assertEqual (listener .started_command_names (), ["aggregate" ])
180- listener .results . clear ()
180+ listener .reset ()
181181
182182 # Confirm that only a single getMore is run even when no documents
183183 # are returned.
184184 self .assertIsNone (stream .try_next ())
185185 self .assertEqual (listener .started_command_names (), ["getMore" ])
186- listener .results . clear ()
186+ listener .reset ()
187187 self .assertIsNone (stream .try_next ())
188188 self .assertEqual (listener .started_command_names (), ["getMore" ])
189- listener .results . clear ()
189+ listener .reset ()
190190
191191 # Get at least one change before resuming.
192192 coll .insert_one ({"_id" : 2 })
193193 wait_until (lambda : stream .try_next () is not None , "get change from try_next" )
194- listener .results . clear ()
194+ listener .reset ()
195195
196196 # Cause the next request to initiate the resume process.
197197 self .kill_change_stream_cursor (stream )
198- listener .results . clear ()
198+ listener .reset ()
199199
200200 # The sequence should be:
201201 # - getMore, fail
202202 # - resume with aggregate command
203203 # - no results, return immediately without another getMore
204204 self .assertIsNone (stream .try_next ())
205205 self .assertEqual (listener .started_command_names (), ["getMore" , "aggregate" ])
206- listener .results . clear ()
206+ listener .reset ()
207207
208208 # Stream still works after a resume.
209209 coll .insert_one ({"_id" : 3 })
@@ -217,7 +217,7 @@ def test_batch_size_is_honored(self):
217217 client = rs_or_single_client (event_listeners = [listener ])
218218 # Connect to the cluster.
219219 client .admin .command ("ping" )
220- listener .results . clear ()
220+ listener .reset ()
221221 # ChangeStreams only read majority committed data so use w:majority.
222222 coll = self .watched_collection ().with_options (write_concern = WriteConcern ("majority" ))
223223 coll .drop ()
@@ -229,12 +229,12 @@ def test_batch_size_is_honored(self):
229229 expected = {"batchSize" : 23 }
230230 with self .change_stream_with_client (client , max_await_time_ms = 250 , batch_size = 23 ) as stream :
231231 # Confirm that batchSize is honored for initial batch.
232- cmd = listener .results [ "started" ] [0 ].command
232+ cmd = listener .started_events [0 ].command
233233 self .assertEqual (cmd ["cursor" ], expected )
234- listener .results . clear ()
234+ listener .reset ()
235235 # Confirm that batchSize is honored by getMores.
236236 self .assertIsNone (stream .try_next ())
237- cmd = listener .results [ "started" ] [0 ].command
237+ cmd = listener .started_events [0 ].command
238238 key = next (iter (expected ))
239239 self .assertEqual (expected [key ], cmd [key ])
240240
@@ -255,12 +255,11 @@ def test_start_at_operation_time(self):
255255 @no_type_check
256256 def _test_full_pipeline (self , expected_cs_stage ):
257257 client , listener = self .client_with_listener ("aggregate" )
258- results = listener .results
259258 with self .change_stream_with_client (client , [{"$project" : {"foo" : 0 }}]) as _ :
260259 pass
261260
262- self .assertEqual (1 , len (results [ "started" ] ))
263- command = results [ "started" ] [0 ]
261+ self .assertEqual (1 , len (listener . started_events ))
262+ command = listener . started_events [0 ]
264263 self .assertEqual ("aggregate" , command .command_name )
265264 self .assertEqual (
266265 [{"$changeStream" : expected_cs_stage }, {"$project" : {"foo" : 0 }}],
@@ -464,7 +463,7 @@ def _get_expected_resume_token_legacy(self, stream, listener, previous_change=No
464463 versions that don't support postBatchResumeToken. Assumes the stream
465464 has never returned any changes if previous_change is None."""
466465 if previous_change is None :
467- agg_cmd = listener .results [ "started" ] [0 ]
466+ agg_cmd = listener .started_events [0 ]
468467 stage = agg_cmd .command ["pipeline" ][0 ]["$changeStream" ]
469468 return stage .get ("resumeAfter" ) or stage .get ("startAfter" )
470469
@@ -481,7 +480,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None):
481480 if token is not None :
482481 return token
483482
484- response = listener .results [ "succeeded" ] [- 1 ].reply
483+ response = listener .succeeded_events [- 1 ].reply
485484 return response ["cursor" ]["postBatchResumeToken" ]
486485
487486 @no_type_check
@@ -558,8 +557,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self):
558557 pass
559558
560559 # Driver should have attempted aggregate command only once.
561- self .assertEqual (len (listener .results [ "started" ] ), 1 )
562- self .assertEqual (listener .results [ "started" ] [0 ].command_name , "aggregate" )
560+ self .assertEqual (len (listener .started_events ), 1 )
561+ self .assertEqual (listener .started_events [0 ].command_name , "aggregate" )
563562
564563 # Prose test no. 5 - REMOVED
565564 # Prose test no. 6 - SKIPPED
@@ -603,20 +602,20 @@ def test_start_at_operation_time_caching(self):
603602 with self .change_stream_with_client (client ) as cs :
604603 self .kill_change_stream_cursor (cs )
605604 cs .try_next ()
606- cmd = listener .results [ "started" ] [- 1 ].command
605+ cmd = listener .started_events [- 1 ].command
607606 self .assertIsNotNone (cmd ["pipeline" ][0 ]["$changeStream" ].get ("startAtOperationTime" ))
608607
609608 # Case 2: change stream started with startAtOperationTime
610- listener .results . clear ()
609+ listener .reset ()
611610 optime = self .get_start_at_operation_time ()
612611 with self .change_stream_with_client (client , start_at_operation_time = optime ) as cs :
613612 self .kill_change_stream_cursor (cs )
614613 cs .try_next ()
615- cmd = listener .results [ "started" ] [- 1 ].command
614+ cmd = listener .started_events [- 1 ].command
616615 self .assertEqual (
617616 cmd ["pipeline" ][0 ]["$changeStream" ].get ("startAtOperationTime" ),
618617 optime ,
619- str ([k .command for k in listener .results [ "started" ] ]),
618+ str ([k .command for k in listener .started_events ]),
620619 )
621620
622621 # Prose test no. 10 - SKIPPED
@@ -631,7 +630,7 @@ def test_resumetoken_empty_batch(self):
631630 self .assertIsNone (change_stream .try_next ())
632631 resume_token = change_stream .resume_token
633632
634- response = listener .results [ "succeeded" ] [0 ].reply
633+ response = listener .succeeded_events [0 ].reply
635634 self .assertEqual (resume_token , response ["cursor" ]["postBatchResumeToken" ])
636635
637636 # Prose test no. 11
@@ -643,7 +642,7 @@ def test_resumetoken_exhausted_batch(self):
643642 self ._populate_and_exhaust_change_stream (change_stream )
644643 resume_token = change_stream .resume_token
645644
646- response = listener .results [ "succeeded" ] [- 1 ].reply
645+ response = listener .succeeded_events [- 1 ].reply
647646 self .assertEqual (resume_token , response ["cursor" ]["postBatchResumeToken" ])
648647
649648 # Prose test no. 12
@@ -737,7 +736,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self):
737736 self .kill_change_stream_cursor (change_stream )
738737 change_stream .try_next () # Resume attempt
739738
740- response = listener .results [ "started" ] [- 1 ]
739+ response = listener .started_events [- 1 ]
741740 self .assertIsNone (response .command ["pipeline" ][0 ]["$changeStream" ].get ("resumeAfter" ))
742741 self .assertIsNotNone (response .command ["pipeline" ][0 ]["$changeStream" ].get ("startAfter" ))
743742
@@ -756,7 +755,7 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self):
756755 self .kill_change_stream_cursor (change_stream )
757756 change_stream .try_next () # Resume attempt
758757
759- response = listener .results [ "started" ] [- 1 ]
758+ response = listener .started_events [- 1 ]
760759 self .assertIsNotNone (response .command ["pipeline" ][0 ]["$changeStream" ].get ("resumeAfter" ))
761760 self .assertIsNone (response .command ["pipeline" ][0 ]["$changeStream" ].get ("startAfter" ))
762761
@@ -1056,7 +1055,7 @@ def tearDownClass(cls):
10561055
10571056 def setUp (self ):
10581057 super (TestAllLegacyScenarios , self ).setUp ()
1059- self .listener .results . clear ()
1058+ self .listener .reset ()
10601059
10611060 def setUpCluster (self , scenario_dict ):
10621061 assets = [
@@ -1128,7 +1127,7 @@ def check_event(self, event, expectation_dict):
11281127 self .assertEqual (getattr (event , key ), value )
11291128
11301129 def tearDown (self ):
1131- self .listener .results . clear ()
1130+ self .listener .reset ()
11321131
11331132
11341133_TEST_PATH = os .path .join (os .path .dirname (os .path .realpath (__file__ )), "change_streams" )
0 commit comments