diff --git a/tests/test_cli.py b/tests/test_cli.py index 3d85b5c9c4..1549a3f2bb 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -222,7 +222,9 @@ def test_upload_folder_mock(self, create_mock: Mock, upload_mock: Mock) -> None: ) cmd.run() - create_mock.assert_called_once_with(repo_id="my-model", repo_type="model", exist_ok=True, private=True) + create_mock.assert_called_once_with( + repo_id="my-model", repo_type="model", exist_ok=True, private=True, space_sdk=None + ) upload_mock.assert_called_once_with( folder_path=cache_dir, path_in_repo=".", @@ -251,7 +253,7 @@ def test_upload_file_mock(self, create_mock: Mock, upload_mock: Mock) -> None: cmd.run() create_mock.assert_called_once_with( - repo_id="my-dataset", repo_type="dataset", exist_ok=True, private=False + repo_id="my-dataset", repo_type="dataset", exist_ok=True, private=False, space_sdk=None ) upload_mock.assert_called_once_with( path_or_fileobj=str(file_path), diff --git a/tests/test_inference_async_client.py b/tests/test_inference_async_client.py index 6a968c52a1..ec34425a6d 100644 --- a/tests/test_inference_async_client.py +++ b/tests/test_inference_async_client.py @@ -212,7 +212,7 @@ async def test_get_status_too_big_model() -> None: @pytest.mark.asyncio async def test_get_status_loaded_model() -> None: - model_status = await AsyncInferenceClient().get_model_status("bigcode/starcoder") + model_status = await AsyncInferenceClient().get_model_status("bigscience/bloom") assert model_status.loaded is True assert model_status.state == "Loaded" assert model_status.compute_type == "gpu" diff --git a/tests/test_inference_client.py b/tests/test_inference_client.py index dea70367fe..1c6f7729fd 100644 --- a/tests/test_inference_client.py +++ b/tests/test_inference_client.py @@ -519,7 +519,7 @@ def test_too_big_model(self) -> None: def test_loaded_model(self) -> None: client = InferenceClient() - model_status = client.get_model_status("bigcode/starcoder") + model_status = client.get_model_status("bigscience/bloom") self.assertTrue(model_status.loaded) self.assertEqual(model_status.state, "Loaded") self.assertEqual(model_status.compute_type, "gpu")