diff --git a/.github/workflows/test_local_integration.yaml b/.github/workflows/test_local_integration.yaml index 81810abfe1..e279e77bc6 100644 --- a/.github/workflows/test_local_integration.yaml +++ b/.github/workflows/test_local_integration.yaml @@ -187,6 +187,7 @@ jobs: env: KEYCLOAK_USERNAME: ${{ env.TEST_USERNAME }} KEYCLOAK_PASSWORD: ${{ env.TEST_PASSWORD }} + CONDA_STORE_SERVICE_NAMESPACE: dev run: | pytest tests/tests_deployment/ -v -s diff --git a/src/_nebari/stages/infrastructure/__init__.py b/src/_nebari/stages/infrastructure/__init__.py index 8b188a720b..33777c4246 100644 --- a/src/_nebari/stages/infrastructure/__init__.py +++ b/src/_nebari/stages/infrastructure/__init__.py @@ -224,7 +224,7 @@ class DigitalOceanNodeGroup(schema.Base): DEFAULT_DO_NODE_GROUPS = { - "general": DigitalOceanNodeGroup(instance="g-8vcpu-32gb", min_nodes=1, max_nodes=1), + "general": DigitalOceanNodeGroup(instance="g-8vcpu-32gb", min_nodes=1, max_nodes=5), "user": DigitalOceanNodeGroup(instance="g-4vcpu-16gb", min_nodes=1, max_nodes=5), "worker": DigitalOceanNodeGroup(instance="g-4vcpu-16gb", min_nodes=1, max_nodes=5), } @@ -315,7 +315,7 @@ class GCPNodeGroup(schema.Base): DEFAULT_GCP_NODE_GROUPS = { - "general": GCPNodeGroup(instance="e2-highmem-4", min_nodes=1, max_nodes=1), + "general": GCPNodeGroup(instance="e2-highmem-4", min_nodes=1, max_nodes=5), "user": GCPNodeGroup(instance="e2-standard-4", min_nodes=0, max_nodes=5), "worker": GCPNodeGroup(instance="e2-standard-4", min_nodes=0, max_nodes=5), } @@ -362,7 +362,7 @@ class AzureNodeGroup(schema.Base): DEFAULT_AZURE_NODE_GROUPS = { - "general": AzureNodeGroup(instance="Standard_D8_v3", min_nodes=1, max_nodes=1), + "general": AzureNodeGroup(instance="Standard_D8_v3", min_nodes=1, max_nodes=5), "user": AzureNodeGroup(instance="Standard_D4_v3", min_nodes=0, max_nodes=5), "worker": AzureNodeGroup(instance="Standard_D4_v3", min_nodes=0, max_nodes=5), } @@ -436,7 +436,7 @@ class AWSNodeGroup(schema.Base): DEFAULT_AWS_NODE_GROUPS = { - "general": AWSNodeGroup(instance="m5.2xlarge", min_nodes=1, max_nodes=1), + "general": AWSNodeGroup(instance="m5.2xlarge", min_nodes=1, max_nodes=5), "user": AWSNodeGroup( instance="m5.xlarge", min_nodes=0, max_nodes=5, single_subnet=False ), diff --git a/src/_nebari/stages/kubernetes_initialize/__init__.py b/src/_nebari/stages/kubernetes_initialize/__init__.py index 7afd69b547..5a764e48ba 100644 --- a/src/_nebari/stages/kubernetes_initialize/__init__.py +++ b/src/_nebari/stages/kubernetes_initialize/__init__.py @@ -45,6 +45,7 @@ class InputVars(schema.Base): external_container_reg: Optional[ExtContainerReg] = None gpu_enabled: bool = False gpu_node_group_names: List[str] = [] + general_node_selector: Dict[str, str] = {} class InputSchema(schema.Base): @@ -92,7 +93,9 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): group for group in self.config.amazon_web_services.node_groups.keys() ] input_vars.aws_region = self.config.amazon_web_services.region - + input_vars.general_node_selector = stage_outputs["stages/02-infrastructure"][ + "node_selectors" + ]["general"] return input_vars.model_dump() def check( diff --git a/src/_nebari/stages/kubernetes_initialize/template/main.tf b/src/_nebari/stages/kubernetes_initialize/template/main.tf index 402c68fb3f..9a0c29836c 100644 --- a/src/_nebari/stages/kubernetes_initialize/template/main.tf +++ b/src/_nebari/stages/kubernetes_initialize/template/main.tf @@ -25,7 +25,14 @@ module "nvidia-driver-installer" { source = "./modules/nvidia-installer" - cloud_provider = var.cloud_provider - gpu_enabled = var.gpu_enabled - gpu_node_group_names = var.gpu_node_group_names + cloud_provider = var.cloud_provider + gpu_enabled = var.gpu_enabled + gpu_node_group_names = var.gpu_node_group_names + general_node_selector = var.general_node_selector +} + +module "keda-installer" { + source = "./modules/keda" + namespace = var.environment + general_node_selector = var.general_node_selector } diff --git a/src/_nebari/stages/kubernetes_initialize/template/modules/keda/main.tf b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/main.tf new file mode 100644 index 0000000000..e0aa517b19 --- /dev/null +++ b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/main.tf @@ -0,0 +1,16 @@ +resource "helm_release" "keda" { + name = "keda" + namespace = var.namespace + repository = "https://kedacore.github.io/charts" + chart = "keda" + version = "2.13.2" + wait_for_jobs = "true" + values = concat([ + file("${path.module}/values.yaml"), + jsonencode({ + nodeSelector = { + "${var.general_node_selector.key}" = var.general_node_selector.value + } + }) + ]) +} diff --git a/src/_nebari/stages/kubernetes_initialize/template/modules/keda/values.yaml b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/values.yaml new file mode 100644 index 0000000000..25f0ee680e --- /dev/null +++ b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/values.yaml @@ -0,0 +1 @@ +# https://github.com/kedacore/charts/blob/v2.13.2/keda/values.yaml diff --git a/src/_nebari/stages/kubernetes_initialize/template/modules/keda/variables.tf b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/variables.tf new file mode 100644 index 0000000000..8929b69249 --- /dev/null +++ b/src/_nebari/stages/kubernetes_initialize/template/modules/keda/variables.tf @@ -0,0 +1,9 @@ +variable "namespace" { + description = "deploy keda server on this namespace" + type = string + default = "dev" +} + +variable "general_node_selector" { + description = "General node group selector." +} diff --git a/src/_nebari/stages/kubernetes_initialize/template/modules/nvidia-installer/variables.tf b/src/_nebari/stages/kubernetes_initialize/template/modules/nvidia-installer/variables.tf index 9eb9a9b2ab..b6c111efb5 100644 --- a/src/_nebari/stages/kubernetes_initialize/template/modules/nvidia-installer/variables.tf +++ b/src/_nebari/stages/kubernetes_initialize/template/modules/nvidia-installer/variables.tf @@ -3,6 +3,11 @@ variable "gpu_node_group_names" { default = [] } +variable "general_node_selector" { + description = "Node selector for general node group." + default = {} +} + variable "gpu_enabled" { description = "Enable GPU support" default = false diff --git a/src/_nebari/stages/kubernetes_initialize/template/variables.tf b/src/_nebari/stages/kubernetes_initialize/template/variables.tf index f169f5bcf2..bb6ee6f8c1 100644 --- a/src/_nebari/stages/kubernetes_initialize/template/variables.tf +++ b/src/_nebari/stages/kubernetes_initialize/template/variables.tf @@ -30,3 +30,7 @@ variable "gpu_enabled" { variable "gpu_node_group_names" { description = "Names of node groups with GPU" } + +variable "general_node_selector" { + description = "General node group selector." +} diff --git a/src/_nebari/stages/kubernetes_services/__init__.py b/src/_nebari/stages/kubernetes_services/__init__.py index fae8955de1..675ac7043c 100644 --- a/src/_nebari/stages/kubernetes_services/__init__.py +++ b/src/_nebari/stages/kubernetes_services/__init__.py @@ -183,6 +183,8 @@ class CondaStore(schema.Base): image_tag: str = constants.DEFAULT_CONDA_STORE_IMAGE_TAG default_namespace: str = "nebari-git" object_storage: str = "200Gi" + max_workers: int = 4 + worker_resources: dict = {"requests": {"cpu": "1", "memory": "4Gi"}} class NebariWorkflowController(schema.Base): @@ -360,6 +362,8 @@ class CondaStoreInputVars(schema.Base): conda_store_service_token_scopes: Dict[str, Dict[str, Any]] = Field( alias="conda-store-service-token-scopes" ) + conda_store_max_workers: int = Field(alias="conda-store-max-workers") + conda_store_worker_resources: dict = Field(alias="conda-store-worker-resources") class JupyterhubInputVars(schema.Base): @@ -468,6 +472,12 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): "*/*": ["viewer"], }, }, + "keda-scaler": { + "primary_namespace": "", + "role_bindings": { + "*/*": ["viewer"], + }, + }, } # Compound any logout URLs from extensions so they are are logged out in succession @@ -511,6 +521,8 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): conda_store_extra_config=self.config.conda_store.extra_config, conda_store_image=self.config.conda_store.image, conda_store_image_tag=self.config.conda_store.image_tag, + conda_store_max_workers=self.config.conda_store.max_workers, + conda_store_worker_resources=self.config.conda_store.worker_resources, ) jupyterhub_vars = JupyterhubInputVars( diff --git a/src/_nebari/stages/kubernetes_services/template/conda-store.tf b/src/_nebari/stages/kubernetes_services/template/conda-store.tf index 904a17e8df..9eb7998120 100644 --- a/src/_nebari/stages/kubernetes_services/template/conda-store.tf +++ b/src/_nebari/stages/kubernetes_services/template/conda-store.tf @@ -58,9 +58,12 @@ module "kubernetes-conda-store-server" { for filename, environment in var.conda-store-environments : filename => yamlencode(environment) } - services = var.conda-store-service-token-scopes - extra-settings = var.conda-store-extra-settings - extra-config = var.conda-store-extra-config + services = var.conda-store-service-token-scopes + extra-settings = var.conda-store-extra-settings + extra-config = var.conda-store-extra-config + conda-store-worker-resources = var.conda-store-worker-resources + max-worker-replica-count = var.conda-store-max-workers + conda-store-keda-scaler-token = module.kubernetes-conda-store-server.service-tokens.keda-scaler } module "conda-store-nfs-mount" { diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py index 6ed6232ba8..cdb28ba637 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py @@ -157,7 +157,7 @@ async def authenticate(self, request): # ================================== c.CondaStoreWorker.log_level = logging.INFO c.CondaStoreWorker.watch_paths = ["/opt/environments"] -c.CondaStoreWorker.concurrency = 4 +c.CondaStoreWorker.concurrency = 1 # Template used to form the directory for symlinking conda environment builds. c.CondaStore.environment_directory = "/home/conda/{namespace}/envs/{namespace}-{name}" diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf index fd5ff0fa2f..2e0a84f794 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf @@ -76,3 +76,18 @@ variable "services" { description = "Map of services tokens and scopes for conda-store" type = map(any) } + +variable "max-worker-replica-count" { + description = "Maximum concurrency of conda workers" + type = number +} + +variable "conda-store-worker-resources" { + description = "Default resource allocation for conda-store worker pods" + type = map(any) +} + +variable "conda-store-keda-scaler-token" { + description = "Token for conda-store to be used by keda scaler for fetching conda environments dynamically." + type = string +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf index c3e725dbea..2dbbb85c6a 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf @@ -113,6 +113,10 @@ resource "kubernetes_deployment" "worker" { "/etc/conda-store/conda_store_config.py" ] + resources { + requests = var.conda-store-worker-resources["requests"] + } + volume_mount { name = "config" mount_path = "/etc/conda-store" @@ -202,3 +206,98 @@ resource "kubernetes_deployment" "worker" { } } } + + +resource "kubernetes_secret" "keda-metric-api-secret" { + metadata { + name = "keda-metric-api-secret" + namespace = var.namespace + } + data = { + token = var.conda-store-keda-scaler-token + } +} + +resource "kubernetes_manifest" "triggerauthenticator" { + manifest = { + apiVersion = "keda.sh/v1alpha1" + kind = "TriggerAuthentication" + + metadata = { + name = "keda-metric-api-cred" + namespace = var.namespace + } + + spec = { + secretTargetRef = [ + { + parameter = "token" + name = "keda-metric-api-secret" + key = "token" + } + ] + } + } + depends_on = [ + kubernetes_deployment.worker + ] +} + +resource "kubernetes_manifest" "scaledobject" { + manifest = { + apiVersion = "keda.sh/v1alpha1" + kind = "ScaledObject" + + metadata = { + name = "scaled-conda-worker" + namespace = var.namespace + } + + spec = { + scaleTargetRef = { + kind = "Deployment" + name = "nebari-conda-store-worker" + } + maxReplicaCount = var.max-worker-replica-count + pollingInterval = 5 + cooldownPeriod = 5 + advanced = { + scalingModifiers = { + formula = "(trig_one + trig_two)" + target = "1" + metricType = "AverageValue" + } + } + triggers = [ + { + type = "metrics-api" + name = "trig_one" + metadata = { + url = "http://nebari-conda-store-server.${var.namespace}.svc:5000/conda-store/api/v1/build/?status=QUEUED" + valueLocation = "count" + authMode = "bearer" + } + authenticationRef = { + name = "keda-metric-api-cred" + } + }, + { + type = "metrics-api" + name = "trig_two" + metadata = { + url = "http://nebari-conda-store-server.${var.namespace}.svc:5000/conda-store/api/v1/build/?status=BUILDING" + valueLocation = "count" + authMode = "bearer" + } + authenticationRef = { + name = "keda-metric-api-cred" + } + } + ] + } + } + depends_on = [ + kubernetes_deployment.worker, + kubernetes_manifest.triggerauthenticator + ] +} diff --git a/src/_nebari/stages/kubernetes_services/template/variables.tf b/src/_nebari/stages/kubernetes_services/template/variables.tf index 9e36e65979..e9c96ccdb1 100644 --- a/src/_nebari/stages/kubernetes_services/template/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/variables.tf @@ -39,6 +39,16 @@ variable "conda-store-default-namespace" { type = string } +variable "conda-store-max-workers" { + description = "Maximum concurrency of conda workers" + type = number +} + +variable "conda-store-worker-resources" { + description = "Default resource allocation for conda-store worker pods" + type = map(any) +} + variable "argo-workflows-enabled" { description = "Enable Argo Workflows" type = bool diff --git a/tests/tests_deployment/test_conda_store_scaling.py b/tests/tests_deployment/test_conda_store_scaling.py new file mode 100644 index 0000000000..ca37f642c2 --- /dev/null +++ b/tests/tests_deployment/test_conda_store_scaling.py @@ -0,0 +1,234 @@ +import base64 +import json +import logging +import os +import sys +import time +import uuid + +import pytest +import requests +from kubernetes import client, config, dynamic + +from tests.tests_deployment import constants + +CONDA_STORE_API_ENDPOINT = "conda-store/api/v1" +NEBARI_HOSTNAME = constants.NEBARI_HOSTNAME +NAMESPACE = os.getenv("CONDA_STORE_SERVICE_NAMESPACE") +TEST_CONDASTORE_WORKER_COUNT = os.getenv("TEST_CONDASTORE_WORKER_COUNT", 1) + + +log = logging.getLogger() +logging.basicConfig( + format="%(asctime)s %(module)s %(levelname)s: %(message)s", + datefmt="%m/%d/%Y %I:%M:%S %p", + level=logging.INFO, +) +stream_handler = logging.StreamHandler(sys.stdout) +log.addHandler(stream_handler) + + +@pytest.fixture +def requests_session(patched_secret_token): + session = requests.Session() + session.headers.update({"Authorization": f"Bearer {patched_secret_token}"}) + yield session + session.close() + + +@pytest.fixture +def kubernetes_config(): + yield config.load_kube_config() + + +@pytest.fixture +def api_client(kubernetes_config): + with client.ApiClient(kubernetes_config) as _api_client: + yield _api_client + + +def get_conda_secret(api_instance, name, namespace): + log.info(f"Getting conda secret {name}, from namespace {namespace}") + api_response = api_instance.read_namespaced_secret(name, namespace) + api_response_data = api_response.data + secret_data = api_response_data["config.json"] + secret_config = json.loads(base64.b64decode(secret_data)) + return api_response, secret_config + + +def b64encodestr(string): + return base64.b64encode(string.encode("utf-8")).decode() + + +@pytest.fixture +def patched_secret_token(kubernetes_config, api_client): + # Create an instance of the API class + log.info("Creating a admin token for the test.") + api_instance = client.CoreV1Api(api_client) + name = "conda-store-secret" # str | name of the Secret + elevated_token = str(uuid.uuid4()) + + # Get secret + api_response, secret_config = get_conda_secret(api_instance, name, NAMESPACE) + + # Update secret + permissions = { + "primary_namespace": "", + "role_bindings": {"*/*": ["admin"]}, + } + secret_config["service-tokens"][elevated_token] = permissions + api_response.data = {"config.json": b64encodestr(json.dumps(secret_config))} + log.info(f"Patching secret: {name}.") + api_instance.patch_namespaced_secret(name, NAMESPACE, api_response) + + # Get pod name for conda-store + api_response = api_instance.list_namespaced_pod(NAMESPACE) + server_pod = [ + i for i in api_response.items if "nebari-conda-store-server-" in i.metadata.name + ][0] + log.info(f"Restarting conda-store-server pod: {server_pod.metadata.name}") + api_instance.delete_namespaced_pod(server_pod.metadata.name, NAMESPACE) + time.sleep(10) + + yield elevated_token + + +def get_build_status(build_id, session): + _res = session.get( + f"https://{NEBARI_HOSTNAME}/{CONDA_STORE_API_ENDPOINT}/build/{build_id}", + verify=False, + ) + status = _res.json().get("data")["status"] + return status + + +@pytest.mark.timeout(6 * 60) +def timed_wait_for_environment_creation(builds, session): + created_count = 0 + while True: + _count = len([b for b in builds if get_build_status(b, session) == "COMPLETED"]) + if created_count != _count: + log.info(f"{_count}/{len(builds)} Environments created") + created_count = _count + else: + log.info("Environment creation finished successfully.") + return + + +def get_deployment_count(api_client): + _client = dynamic.DynamicClient(api_client) + deployment_api = _client.resources.get(api_version="apps/v1", kind="Deployment") + deployment = deployment_api.get( + name="nebari-conda-store-worker", namespace=NAMESPACE + ) + replica_count = deployment.spec.replicas + messages = "\n".join([c["message"] for c in deployment.status["conditions"]]) + log.info(f"Deployment logs: {messages}") + pod_names = find_conda_store_worker_pod_names() + if deployment.status.readyReplicas: + pod_name_lookup = messages.split('"')[1] + for n in pod_names: + if pod_name_lookup in n: + pod_name = n + api_response = client.CoreV1Api().read_namespaced_pod_log( + name=pod_name, namespace=NAMESPACE, container="conda-store-worker" + ) + log.debug(f"conda-store-worker logs: {api_response}") + return replica_count + + +def find_conda_store_worker_pod_names(): + """ + find namespace pod msg + """ + k8s_api_obj = client.CoreV1Api() + api_response = k8s_api_obj.list_namespaced_pod(NAMESPACE) + names = [ + i.metadata.name + for i in api_response.items + if i.metadata.labels.get("role") + and "nebari-conda-store-worker" in i.metadata.labels["role"] + ] + return names + + +@pytest.mark.timeout(20 * 60) +def timed_wait_for_deployments(target_deployment_count, api_client): + log.info( + f"Waiting for deployments to reach target value {target_deployment_count} ..." + ) + replica_count = get_deployment_count(api_client) + while replica_count != target_deployment_count: + replica_count = get_deployment_count(api_client) + direction = "up" if target_deployment_count > replica_count else "down" + log.info( + f"Scaling {direction} deployments: from {replica_count} to {target_deployment_count}" + ) + time.sleep(5) + log.info(f"Deployment count: {replica_count}") + + +def delete_conda_environments(session): + existing_envs_url = f"https://{NEBARI_HOSTNAME}/{CONDA_STORE_API_ENDPOINT}/environment/?namespace=global" + response = session.get(existing_envs_url, verify=False) + for env in response.json()["data"]: + env_name = env["name"] + delete_url = f"https://{NEBARI_HOSTNAME}/{CONDA_STORE_API_ENDPOINT}/environment/global/{env_name}" + log.info(f"Deleting {delete_url}") + session.delete(delete_url, verify=False) + log.info("All conda environments deleted.") + + +def create_conda_store_env(session): + _url = f"https://{NEBARI_HOSTNAME}/{CONDA_STORE_API_ENDPOINT}/specification/" + name = str(uuid.uuid4()) + request_json = { + "namespace": "global", + "specification": f"dependencies:\n - tqdm\nvariables: {{}}\nchannels: " + f"[]\n\ndescription: ''\nname: {name}\nprefix: null", + } + response = session.post(_url, json=request_json, verify=False) + log.debug(request_json) + log.debug(response.json()) + return response.json()["data"]["build_id"] + + +@pytest.mark.timeout(10) +def build_n_environments(n, builds, session): + log.info(f"Building {n} conda environments.") + for _ in range(n): + time.sleep(1) + builds.append(create_conda_store_env(session)) + return builds + + +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +def test_scale_up_and_down(patched_secret_token, api_client, requests_session): + """ + Adds an admin token in conda-store-secret + Restarts conda-store-server. + Creates environment. + Validate pod scale-up. + Validate environment creation. + Validates pod scale-down. + """ + builds = [] + _initial_deployment_count = get_deployment_count(api_client) + log.info(f"Deployments at the start of the test: {_initial_deployment_count}") + delete_conda_environments(requests_session) + builds = build_n_environments( + TEST_CONDASTORE_WORKER_COUNT, builds, requests_session + ) + log.info( + f"Wait for {TEST_CONDASTORE_WORKER_COUNT} conda-store-worker pods to start." + ) + timed_wait_for_deployments( + TEST_CONDASTORE_WORKER_COUNT + _initial_deployment_count, api_client + ) + timed_wait_for_environment_creation(builds, requests_session) + log.info(f"Wait till worker deployment scales down to {_initial_deployment_count}") + timed_wait_for_deployments(_initial_deployment_count, api_client) + log.info("Deleting conda environments.") + delete_conda_environments(requests_session) + log.info("Test passed.") diff --git a/tests/tests_deployment/test_jupyterhub_api.py b/tests/tests_deployment/test_jupyterhub_api.py index 5e1a54562b..5f02c72dd4 100644 --- a/tests/tests_deployment/test_jupyterhub_api.py +++ b/tests/tests_deployment/test_jupyterhub_api.py @@ -9,6 +9,7 @@ @pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +@pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_jupyterhub_loads_roles_from_keycloak(): session = get_jupyterhub_session() xsrf_token = session.cookies.get("_xsrf")