Skip to content

Commit

Permalink
TDL-17980 add satisfaction_ratings stream to test coverage (singer-io…
Browse files Browse the repository at this point in the history
…#128)

* WIP add satisfaction_ratings stream to test coverage

* Push current test and spike on future improvements

---------

Co-authored-by: btowles <btowles@stitchdata.com>
  • Loading branch information
2 people authored and karthipillai committed Oct 2, 2023
1 parent 9e58819 commit 86f41ea
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 14 deletions.
48 changes: 44 additions & 4 deletions test/test_all_streams.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def expected_sync_streams(self):
"ticket_fields",
"group_memberships",
"macros",
"satisfaction_ratings",
"tags",
"ticket_metrics",
}
Expand All @@ -40,6 +41,7 @@ def expected_pks(self):
"ticket_fields": {"id"},
"group_memberships": {"id"},
"macros": {"id"},
"satisfaction_ratings": {"id"},
"tags": {"name"},
"ticket_metrics": {"id"},
}
Expand All @@ -50,7 +52,7 @@ def refresh_tags(self, records):
creds = {
'email': 'dev@stitchdata.com',
'password': os.getenv('TAP_ZENDESK_API_PASSWORD'),
'subdomain': "rjmdev",
'subdomain': self.get_properties()['subdomain'],
}

test_tags = ['test_tag_1', 'test_tag_2', 'test_tag_3']
Expand All @@ -73,6 +75,43 @@ def refresh_tags(self, records):
# mark tags as refreshed as soon as we successfully get through one loop
self.tags_are_stale = False

def rate_tickets(self, records):
''' WIP to collect and set satisfaction_ratings during test execution '''

# Zenpy client credentials to connect to API
creds = {
'email': 'dev@stitchdata.com',
'password': os.getenv('TAP_ZENDESK_API_PASSWORD'),
'subdomain': self.get_properties()['subdomain'],
}

ratings = ['bad', 'badwithcomment', 'good', 'goodwithcomment', 'offered']
closed_tickets = [t for t in records.get('tickets').get('messages') if t.get('data').get('status') == 'closed']
self.assertGreaterEqual(len(closed_tickets), 9)
last_9_closed_tickets = closed_tickets[-9:]
confirmed_rated_tickets = 0

zenpy_client = Zenpy(**creds)

for i, tic in enumerate(last_9_closed_tickets):
# if rating is not defaulted to unoffered then set it, otherwise skip
if tic.get('data').get('satisfaction_rating') != {'score': 'unoffered'}:
print(f"Skipping non-default rating! {tic.get('data').get('satisfaction_rating')}")
continue

# TODO move from client to curl or request as client generates the following errors?
# ipdb> zenpy_client.tickets.rate(2297, 'good')
# *** AttributeError: 'PrimaryEndpoint' object has no attribute 'satisfaction_ratings'
# ipdb> zenpy_client.tickets.rate(tic.get('data').get('id'), ratings[i%5])
# *** AttributeError: 'PrimaryEndpoint' object has no attribute 'satisfaction_ratings'
# ipdb> zenpy_client.tickets.rate(tic.get('data').get('id'), {'score': 'good'})
# *** AttributeError: 'PrimaryEndpoint' object has no attribute 'satisfaction_ratings'
# ipdb> zenpy_client.tickets.rate(2301, {"satisfaction_rating": {"score": "good", "comment": "Awesome support. Test 1"}})
# *** AttributeError: 'PrimaryEndpoint' object has no attribute 'satisfaction_ratings'

zenpy_client.tickets.rate(tic.get('data').get('id'), {'score': ratings[i%5]})
#zenpy_client.tickets.rate(id, rating) # example rating {'score': 'good'}


def test_run(self):
# Default test setup
Expand Down Expand Up @@ -108,11 +147,12 @@ def test_run(self):
# Verify exit status is 0 and verify rows were synced
_ = self.run_and_verify_sync(conn_id, state={})

# Verify actual rows were synced

# Ensure all records have a value for PK(s)
records = runner.get_records_from_target_output()

# Ensure tickets data have some ratings now that we have records to check
# self.rate_tickets(records) # TODO tickets.rate(id, rating) fails with client.

# assume tags are stale since we cannot query tag age / date from synced records or the API
self.tags_are_stale = True

Expand Down Expand Up @@ -142,7 +182,7 @@ def test_run(self):
# tags were already refreshed so records were missing from first sync
messages = tags_records.get(stream).get('messages')

if stream in ['tickets', 'groups', 'users']:
if stream in ['groups', 'organizations', 'tickets', 'users']:
self.assertGreater(len(messages), 100, msg="Stream {} has fewer than 100 records synced".format(stream))
for m in messages:
pk_set = self.expected_pks()[stream]
Expand Down
12 changes: 4 additions & 8 deletions test/test_standard_bookmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,15 +127,13 @@ def test_run(self):
# Verify the second sync bookmark is Equal to the first sync bookmark
# assumes no changes to data during test
if not stream == "users":
self.assertEqual(second_bookmark_value,
first_bookmark_value)
self.assertEqual(second_bookmark_value, first_bookmark_value)
else:
# For `users` stream it stores bookmark as 1 minute less than current time if `updated_at` of
# last records less than it. So, if there is no data change then second_bookmark_value will be
# 1 minute less than current time. Therefore second_bookmark_value will always be
# greater or equal to first_bookmark_value
self.assertGreaterEqual(second_bookmark_value,
first_bookmark_value)
self.assertGreaterEqual(second_bookmark_value, first_bookmark_value)

for record in first_sync_messages:

Expand All @@ -145,8 +143,7 @@ def test_run(self):
if stream == "tickets":
replication_key_value = datetime.utcfromtimestamp(replication_key_value).strftime('%Y-%m-%dT%H:%M:%SZ')

self.assertLessEqual(
replication_key_value, first_bookmark_value_utc,
self.assertLessEqual(replication_key_value, first_bookmark_value_utc,
msg="First sync bookmark was set incorrectly, a record with a greater replication-key value was synced."
)

Expand All @@ -161,8 +158,7 @@ def test_run(self):
msg="Second sync records do not repect the previous bookmark.")

# Verify the second sync bookmark value is the max replication key value for a given stream
self.assertLessEqual(
replication_key_value, second_bookmark_value_utc,
self.assertLessEqual(replication_key_value, second_bookmark_value_utc,
msg="Second sync bookmark was set incorrectly, a record with a greater replication-key value was synced."
)

Expand Down
3 changes: 1 addition & 2 deletions test/test_start_date.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,7 @@ def run_test(self, days, expected_streams):
self.assertGreater(record_count_sync_1, record_count_sync_2)

# Verify the records replicated in sync 2 were also replicated in sync 1
self.assertTrue(
primary_keys_sync_2.issubset(primary_keys_sync_1))
self.assertTrue(primary_keys_sync_2.issubset(primary_keys_sync_1))

else:
# Given below streams are child stremas of parent stream `tickets` and tickets is incremental streams
Expand Down

0 comments on commit 86f41ea

Please sign in to comment.