-
Notifications
You must be signed in to change notification settings - Fork 213
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
OPIK-501 Basic experiment items CRUD tests #865
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
from playwright.sync_api import Page, expect, Locator | ||
import re | ||
|
||
class ExperimentItemsPage: | ||
|
||
def __init__(self, page: Page): | ||
self.page = page | ||
self.next_page_button_locator = self.page.locator("div:has(> button:nth-of-type(4))").locator('button:nth-of-type(3)') | ||
|
||
def get_pagination_button(self) -> Locator: | ||
return self.page.get_by_role('button', name='Showing') | ||
|
||
def get_total_number_of_items_in_experiment(self): | ||
pagination_button_text = self.get_pagination_button().inner_text() | ||
match = re.search(r'of (\d+)', pagination_button_text) | ||
if match: | ||
return int(match.group(1)) | ||
else: | ||
return 0 | ||
|
||
def get_id_of_nth_experiment_item(self, n: int): | ||
row = self.page.locator('tr').nth(n+1) | ||
cell = row.locator('td').first | ||
cell.hover() | ||
cell.get_by_role('button').nth(1).click() | ||
id = self.page.evaluate('navigator.clipboard.readText()') | ||
return id | ||
|
||
|
||
def get_all_item_ids_on_current_page(self): | ||
ids = [] | ||
rows = self.page.locator('tr').all() | ||
for row_index, row in enumerate(rows[2:]): | ||
item = {} | ||
cells = row.locator('td').all() | ||
cell = row.locator('td').first | ||
cell.hover() | ||
cell.get_by_role('button').nth(1).click() | ||
id = self.page.evaluate('navigator.clipboard.readText()') | ||
ids.append(id) | ||
|
||
return ids | ||
|
||
|
||
def get_all_item_ids_in_experiment(self): | ||
ids = [] | ||
ids.extend(self.get_all_item_ids_on_current_page()) | ||
while self.next_page_button_locator.is_visible() and self.next_page_button_locator.is_enabled(): | ||
self.next_page_button_locator.click() | ||
self.page.wait_for_timeout(500) | ||
ids.extend(self.get_all_dataset_items_on_current_page()) | ||
|
||
return ids | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -48,6 +48,7 @@ def insert_dataset_items_ui(page: Page, dataset_name, items_list): | |
|
||
for item in items_list: | ||
dataset_items_page.insert_dataset_item(json.dumps(item)) | ||
time.sleep(0.2) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any particular reason to throttle the insertion pace here? The recommended alternative is inserting dataset items in batches, so you can insert the whole There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The main bit of flakiness that's currently present in these tests is one dataset item you insert via the UI not registering if you switch away too quickly. Added a small delay to try and prevent that and see if it works. As far as I know there is no way to add a list of dataset items via the UI, only one at a time, which is why I do it this way here. When inserting via the SDK i just pass the entire list in one go |
||
|
||
|
||
def delete_one_dataset_item_sdk(client: opik.Opik, dataset_name): | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import pytest | ||
from playwright.sync_api import Page, expect | ||
from page_objects.DatasetsPage import DatasetsPage | ||
from page_objects.ExperimentsPage import ExperimentsPage | ||
from page_objects.ExperimentItemsPage import ExperimentItemsPage | ||
from sdk_helpers import get_experiment_by_id, delete_experiment_by_id, delete_experiment_items_by_id, experiment_items_stream | ||
import opik | ||
import time | ||
from collections import Counter | ||
|
||
|
||
class TestExperimentItemsCrud: | ||
|
||
@pytest.mark.browser_context_args(permissions=['clipboard-read']) | ||
def test_all_experiment_items_created(self, page: Page, mock_experiment): | ||
""" | ||
Creates an experiment with 10 experiment items, then checks that all items are visible in both UI and backend | ||
1. Create an experiment on a dataset with 10 items (mock_experiment fixture) | ||
2. Check the item counter on the UI displays the correct total (10 items) | ||
3. Check the 'trace_count' parameter of the experiment as returned via the v1/private/experiments/{id} endpoint | ||
matches the size of the dataset (10 items) | ||
4. Check the list of IDs displayed in the UI (currently dataset item IDs) perfectly matches the list of dataset item IDs | ||
as returned from the v1/private/experiments/items/stream endpoint (easy change to grab the items via the SDK if we ever add this) | ||
""" | ||
experiments_page = ExperimentsPage(page) | ||
experiments_page.go_to_page() | ||
experiments_page.click_first_experiment_that_matches_name(exp_name=mock_experiment['name']) | ||
|
||
experiment_items_page = ExperimentItemsPage(page) | ||
items_on_page = experiment_items_page.get_total_number_of_items_in_experiment() | ||
assert items_on_page == mock_experiment['size'] | ||
|
||
experiment_backend = get_experiment_by_id(mock_experiment['id']) | ||
assert experiment_backend.trace_count == mock_experiment['size'] | ||
|
||
ids_on_backend = [item['dataset_item_id'] for item in experiment_items_stream(mock_experiment['name'])] | ||
ids_on_frontend = experiment_items_page.get_all_item_ids_in_experiment() | ||
|
||
assert Counter(ids_on_backend) == Counter(ids_on_frontend) | ||
|
||
|
||
@pytest.mark.browser_context_args(permissions=['clipboard-read']) | ||
def test_delete_experiment_items(self, page: Page, mock_experiment): | ||
""" | ||
Deletes a single experiment item and checks that everything gets updated on both the UI and the backend | ||
1. Create an experiment on a dataset with 10 items (mock_experiment fixture) | ||
2. Grabbing an experiment ID from the v1/private/experiments/items/stream endpoint, send a delete request to delete | ||
a single experiment item from the experiment | ||
3. Check the item counter in the UI is updated (to size(initial_experiment) - 1) | ||
4. Check the 'trace_count' parameter of the experiment as returned via the v1/private/experiments/{id} endpoint | ||
is updated to the new size (as above) | ||
5. Check the list of IDs displayed in the UI (currently dataset item IDs) perfectly matches the list of dataset item IDs | ||
as returned from the v1/private/experiments/items/stream endpoint (easy change to grab the items via the SDK if we ever add this) | ||
""" | ||
experiments_page = ExperimentsPage(page) | ||
experiments_page.go_to_page() | ||
experiments_page.click_first_experiment_that_matches_name(exp_name=mock_experiment['name']) | ||
|
||
id_to_delete = experiment_items_stream(exp_name=mock_experiment['name'], limit=1)[0]['id'] | ||
delete_experiment_items_by_id(ids=[id_to_delete]) | ||
|
||
experiment_items_page = ExperimentItemsPage(page) | ||
experiment_items_page.page.reload() | ||
items_on_page = experiment_items_page.get_total_number_of_items_in_experiment() | ||
assert items_on_page == mock_experiment['size'] - 1 | ||
|
||
experiment_sdk = get_experiment_by_id(mock_experiment['id']) | ||
assert experiment_sdk.trace_count == mock_experiment['size'] - 1 | ||
|
||
ids_on_backend = [item['dataset_item_id'] for item in experiment_items_stream(mock_experiment['name'])] | ||
ids_on_frontend = experiment_items_page.get_all_item_ids_in_experiment() | ||
|
||
assert Counter(ids_on_backend) == Counter(ids_on_frontend) | ||
|
||
|
||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Minor: mentioned before, but please add a pre-commit hook to format the Python code within this
tests_end_to_end
folder, so among other things, it adds an empty line at the end of each file. Use the SDK linting as reference.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
good point, i went ahead and added full linting functionality, along with a workflow file to check on every PR. applied it as part of this PR, everything looks good. will add more bits in the future when doing improvements (i.e. clean typing on everything like the SDK code currently has)