diff --git a/.gitignore b/.gitignore index 3df3c9e4..01e9cedd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Editors +.vscode + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/SETUP.md b/SETUP.md index f4b3210a..a5122f70 100644 --- a/SETUP.md +++ b/SETUP.md @@ -91,4 +91,17 @@ host machine. The `jupyterhub_config.py` file that ships in this repo will read that environment variable to figure out what IP the pods should connect to the JupyterHub on. Replace `vboxnet4` with whatever interface name you used in step 4 of the previous section. -This will give you a running JupyterHub that spawns nodes inside the minikube VM! It'll be setup with [DummyAuthenticator](http://github.com/yuvipanda/jupyterhub-dummy-authenticator), so any user + password combo will allow you to log in. You can make changes to the spawner and restart jupyterhub, and rapidly iterate :) + This will give you a running JupyterHub that spawns nodes inside the minikube VM! It'll be setup with [DummyAuthenticator](http://github.com/yuvipanda/jupyterhub-dummy-authenticator), so any user + password combo will allow you to log in. You can make changes to the spawner and restart jupyterhub, and rapidly iterate :) + +## Running tests + +``` +python setup.py test +``` + +If you got a massive amount of errors, it may help to remove your .eggs +directory. + +``` +rm -rf .eggs +``` \ No newline at end of file diff --git a/kubespawner/objects.py b/kubespawner/objects.py index 3c6f5f2e..3c2a6527 100644 --- a/kubespawner/objects.py +++ b/kubespawner/objects.py @@ -6,19 +6,24 @@ import escapism import re import string +from kubespawner.utils import get_k8s_model, update_k8s_model from kubernetes.client.models import ( V1Pod, V1PodSpec, V1PodSecurityContext, V1ObjectMeta, V1LocalObjectReference, V1Volume, V1VolumeMount, - V1Container, V1ContainerPort, V1SecurityContext, V1EnvVar, V1ResourceRequirements, + V1Container, V1ContainerPort, V1SecurityContext, V1EnvVar, V1ResourceRequirements, V1Lifecycle, V1PersistentVolumeClaim, V1PersistentVolumeClaimSpec, V1Endpoints, V1EndpointSubset, V1EndpointAddress, V1EndpointPort, V1Service, V1ServiceSpec, V1ServicePort, V1beta1Ingress, V1beta1IngressSpec, V1beta1IngressRule, V1beta1HTTPIngressRuleValue, V1beta1HTTPIngressPath, - V1beta1IngressBackend + V1beta1IngressBackend, + V1Toleration, + V1Affinity, + V1NodeAffinity, V1NodeSelector, V1NodeSelectorTerm, V1PreferredSchedulingTerm, V1NodeSelectorRequirement, + V1PodAffinity, V1PodAntiAffinity, V1WeightedPodAffinityTerm, V1PodAffinityTerm, ) def make_pod( @@ -34,12 +39,12 @@ def make_pod( fs_gid=None, supplemental_gids=None, run_privileged=False, - env={}, + env=None, working_dir=None, - volumes=[], - volume_mounts=[], - labels={}, - annotations={}, + volumes=None, + volume_mounts=None, + labels=None, + annotations=None, cpu_limit=None, cpu_guarantee=None, mem_limit=None, @@ -52,7 +57,16 @@ def make_pod( extra_container_config=None, extra_pod_config=None, extra_containers=None, - scheduler_name=None + scheduler_name=None, + tolerations=None, + node_affinity_preferred=None, + node_affinity_required=None, + pod_affinity_preferred=None, + pod_affinity_required=None, + pod_anti_affinity_preferred=None, + pod_anti_affinity_required=None, + priority_class_name=None, + logger=None, ): """ Make a k8s pod specification for running a user notebook. @@ -143,17 +157,73 @@ def make_pod( extra_containers: Extra containers besides notebook container. Used for some housekeeping jobs (e.g. crontab). scheduler_name: - A custom scheduler's name. + The pod's scheduler explicitly named. + tolerations: + Tolerations can allow a pod to schedule or execute on a tainted node. To + learn more about pod tolerations, see + https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/. + + Pass this field an array of "Toleration" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core + node_affinity_preferred: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PreferredSchedulingTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#preferredschedulingterm-v1-core + node_affinity_required: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "NodeSelectorTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core + pod_affinity_preferred: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "WeightedPodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core + pod_affinity_required: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core + pod_anti_affinity_preferred: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "WeightedPodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core + pod_anti_affinity_required: + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core + priority_class_name: + The name of the PriorityClass to be assigned the pod. This feature is Beta available in K8s 1.11. """ - pod = V1Pod() pod.kind = "Pod" pod.api_version = "v1" pod.metadata = V1ObjectMeta( name=name, - labels=labels.copy(), - annotations=annotations.copy() + labels=(labels or {}).copy(), + annotations=(annotations or {}).copy() ) pod.spec = V1PodSpec(containers=[]) @@ -179,17 +249,20 @@ def make_pod( if node_selector: pod.spec.node_selector = node_selector + if lifecycle_hooks: + lifecycle_hooks = get_k8s_model(V1Lifecycle, lifecycle_hooks) + notebook_container = V1Container( name='notebook', image=image_spec, working_dir=working_dir, ports=[V1ContainerPort(name='notebook-port', container_port=port)], - env=[V1EnvVar(k, v) for k, v in env.items()], + env=[V1EnvVar(k, v) for k, v in (env or {}).items()], args=cmd, image_pull_policy=image_pull_policy, lifecycle=lifecycle_hooks, resources=V1ResourceRequirements(), - volume_mounts=volume_mounts + volume_mounts=[get_k8s_model(V1VolumeMount, obj) for obj in (volume_mounts or [])], ) if service_account is None: @@ -200,9 +273,7 @@ def make_pod( pod.spec.service_account_name = service_account if run_privileged: - notebook_container.security_context = V1SecurityContext( - privileged=True - ) + notebook_container.security_context = V1SecurityContext(privileged=True) notebook_container.resources.requests = {} if cpu_guarantee: @@ -210,8 +281,7 @@ def make_pod( if mem_guarantee: notebook_container.resources.requests['memory'] = mem_guarantee if extra_resource_guarantees: - for k in extra_resource_guarantees: - notebook_container.resources.requests[k] = extra_resource_guarantees[k] + notebook_container.resources.requests.update(extra_resource_guarantees) notebook_container.resources.limits = {} if cpu_limit: @@ -219,38 +289,103 @@ def make_pod( if mem_limit: notebook_container.resources.limits['memory'] = mem_limit if extra_resource_limits: - for k in extra_resource_limits: - notebook_container.resources.limits[k] = extra_resource_limits[k] - - pod.spec.containers.append(notebook_container) + notebook_container.resources.limits.update(extra_resource_limits) if extra_container_config: - for key, value in extra_container_config.items(): - setattr(notebook_container, _map_attribute(notebook_container.attribute_map, key), value) - if extra_pod_config: - for key, value in extra_pod_config.items(): - setattr(pod.spec, _map_attribute(pod.spec.attribute_map, key), value) - if extra_containers: - pod.spec.containers.extend(extra_containers) + notebook_container = update_k8s_model( + target=notebook_container, + source=extra_container_config, + logger=logger, + origin="extra_container_config", + ) - pod.spec.init_containers = init_containers - pod.spec.volumes = volumes + pod.spec.containers.append(notebook_container) + if extra_containers: + pod.spec.containers.extend([get_k8s_model(V1Container, obj) for obj in extra_containers]) + if tolerations: + pod.spec.tolerations = [get_k8s_model(V1Toleration, obj) for obj in tolerations] + if init_containers: + pod.spec.init_containers = [get_k8s_model(V1Container, obj) for obj in init_containers] + if volumes: + pod.spec.volumes = [get_k8s_model(V1Volume, obj) for obj in volumes] + else: + # Keep behaving exactly like before by not cleaning up generated pod + # spec by setting the volumes field even though it is an empty list. + pod.spec.volumes = [] if scheduler_name: pod.spec.scheduler_name = scheduler_name - return pod + node_affinity = None + if node_affinity_preferred or node_affinity_required: + node_selector = None + if node_affinity_required: + node_selector = V1NodeSelector( + node_selector_terms=[get_k8s_model(V1NodeSelectorTerm, obj) for obj in node_affinity_required], + ) + + preferred_scheduling_terms = None + if node_affinity_preferred: + preferred_scheduling_terms = [get_k8s_model(V1PreferredSchedulingTerm, obj) for obj in node_affinity_preferred] + node_affinity = V1NodeAffinity( + preferred_during_scheduling_ignored_during_execution=preferred_scheduling_terms, + required_during_scheduling_ignored_during_execution=node_selector, + ) -def _map_attribute(attribute_map, attribute): - if attribute in attribute_map: - return attribute + pod_affinity = None + if pod_affinity_preferred or pod_affinity_required: + weighted_pod_affinity_terms = None + if pod_affinity_preferred: + weighted_pod_affinity_terms = [get_k8s_model(V1WeightedPodAffinityTerm, obj) for obj in pod_affinity_preferred] - for key, value in attribute_map.items(): - if value == attribute: - return key - else: - raise ValueError('Attribute must be one of {}'.format(attribute_map.values())) + pod_affinity_terms = None + if pod_affinity_required: + pod_affinity_terms = [get_k8s_model(V1PodAffinityTerm, obj) for obj in pod_affinity_required] + + pod_affinity = V1PodAffinity( + preferred_during_scheduling_ignored_during_execution=weighted_pod_affinity_terms, + required_during_scheduling_ignored_during_execution=pod_affinity_terms, + ) + + pod_anti_affinity = None + if pod_anti_affinity_preferred or pod_anti_affinity_required: + weighted_pod_affinity_terms = None + if pod_anti_affinity_preferred: + weighted_pod_affinity_terms = [get_k8s_model(V1WeightedPodAffinityTerm, obj) for obj in pod_anti_affinity_preferred] + + pod_affinity_terms = None + if pod_anti_affinity_required: + pod_affinity_terms = [get_k8s_model(V1PodAffinityTerm, obj) for obj in pod_anti_affinity_required] + + pod_anti_affinity = V1PodAffinity( + preferred_during_scheduling_ignored_during_execution=weighted_pod_affinity_terms, + required_during_scheduling_ignored_during_execution=pod_affinity_terms, + ) + + affinity = None + if (node_affinity or pod_affinity or pod_anti_affinity): + affinity = V1Affinity( + node_affinity=node_affinity, + pod_affinity=pod_affinity, + pod_anti_affinity=pod_anti_affinity, + ) + + if affinity: + pod.spec.affinity = affinity + + if priority_class_name: + pod.spec.priority_class_name = priority_class_name + + if extra_pod_config: + pod.spec = update_k8s_model( + target=pod.spec, + source=extra_pod_config, + logger=logger, + origin="extra_pod_config", + ) + + return pod def make_pvc( @@ -258,8 +393,8 @@ def make_pvc( storage_class, access_modes, storage, - labels, - annotations={} + labels=None, + annotations=None ): """ Make a k8s pvc specification for running a user notebook. @@ -282,9 +417,8 @@ def make_pvc( pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name - pvc.metadata.annotations = annotations - pvc.metadata.labels = {} - pvc.metadata.labels.update(labels) + pvc.metadata.annotations = (annotations or {}).copy() + pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() diff --git a/kubespawner/spawner.py b/kubespawner/spawner.py index abc14272..10afe543 100644 --- a/kubespawner/spawner.py +++ b/kubespawner/spawner.py @@ -16,7 +16,7 @@ from tornado import gen from tornado.ioloop import IOLoop from tornado.concurrent import run_on_executor -from traitlets import Any, Unicode, List, Integer, Union, Dict, Bool, Any, validate +from traitlets import Any, Unicode, List, Integer, Union, Dict, Bool, Any, validate, default from jupyterhub.spawner import Spawner from jupyterhub.utils import exponential_backoff from jupyterhub.traitlets import Command @@ -63,6 +63,9 @@ def events(self): ) +class MockObject(object): + pass + class KubeSpawner(Spawner): """ Implement a JupyterHub spawner to spawn pods in a Kubernetes Cluster. @@ -90,37 +93,50 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if _mock: - # if testing, skip the rest of initialization - # FIXME: rework initialization for easier mocking - return + # runs during test execution only + user = MockObject() + user.name = 'mock_name' + user.id = 'mock_id' + user.url = 'mock_url' + self.user = user + + hub = MockObject() + hub.public_host = 'mock_public_host' + hub.url = 'mock_url' + hub.base_url = 'mock_base_url' + hub.api_url = 'mock_api_url' + self.hub = hub + else: + # runs during normal execution only - # By now, all the traitlets have been set, so we can use them to compute - # other attributes - if self.__class__.executor is None: - self.__class__.executor = ThreadPoolExecutor( - max_workers=self.k8s_api_threadpool_workers - ) + # By now, all the traitlets have been set, so we can use them to compute + # other attributes + if self.__class__.executor is None: + self.__class__.executor = ThreadPoolExecutor( + max_workers=self.k8s_api_threadpool_workers + ) - # This will start watching in __init__, so it'll start the first - # time any spawner object is created. Not ideal but works! - self._start_watching_pods() - if self.events_enabled: - self._start_watching_events() + # This will start watching in __init__, so it'll start the first + # time any spawner object is created. Not ideal but works! + self._start_watching_pods() + if self.events_enabled: + self._start_watching_events() - self.api = shared_client('CoreV1Api') + self.api = shared_client('CoreV1Api') + if self.hub_connect_ip: + scheme, netloc, path, params, query, fragment = urlparse(self.hub.api_url) + netloc = '{ip}:{port}'.format( + ip=self.hub_connect_ip, + port=self.hub_connect_port, + ) + self.accessible_hub_api_url = urlunparse((scheme, netloc, path, params, query, fragment)) + else: + self.accessible_hub_api_url = self.hub.api_url + + # runs during both test and normal execution self.pod_name = self._expand_user_properties(self.pod_name_template) self.pvc_name = self._expand_user_properties(self.pvc_name_template) - if self.hub_connect_ip: - scheme, netloc, path, params, query, fragment = urlparse(self.hub.api_url) - netloc = '{ip}:{port}'.format( - ip=self.hub_connect_ip, - port=self.hub_connect_port, - ) - self.accessible_hub_api_url = urlunparse((scheme, netloc, path, params, query, fragment)) - else: - self.accessible_hub_api_url = self.hub.api_url - if self.port == 0: # Our default port is 8888 self.port = 8888 @@ -128,8 +144,7 @@ def __init__(self, *args, **kwargs): k8s_api_threadpool_workers = Integer( # Set this explicitly, since this is the default in Python 3.5+ # but not in 3.4 - 5 * multiprocessing.cpu_count(), - config=True, + default_value=5 * multiprocessing.cpu_count(), help=""" Number of threads in thread pool used to talk to the k8s API. @@ -137,29 +152,28 @@ def __init__(self, *args, **kwargs): Defaults to `5 * cpu_cores`, which is the default for `ThreadPoolExecutor`. """ - ) + ).tag(config=True) events_enabled = Bool( - True, - config=True, + default_value=True, help=""" Enable event-watching for progress-reports to the user spawn page. Disable if these events are not desirable or to save some performance cost. """ - ) + ).tag(config=True) namespace = Unicode( - config=True, help=""" Kubernetes namespace to spawn user pods in. If running inside a kubernetes cluster with service accounts enabled, defaults to the current namespace. If not, defaults to `default` """ - ) + ).tag(config=True) + @default('namespace') def _namespace_default(self): """ Set namespace default to current namespace if running in a k8s cluster @@ -173,7 +187,8 @@ def _namespace_default(self): return f.read().strip() return 'default' - ip = Unicode('0.0.0.0', + ip = Unicode( + default_value='0.0.0.0', help=""" The IP address (or hostname) the single-user server should listen on. @@ -183,7 +198,7 @@ def _namespace_default(self): ).tag(config=True) cmd = Command( - None, + default_value=None, allow_none=True, minlen=0, help=""" @@ -203,7 +218,7 @@ def _namespace_default(self): ).tag(config=True) working_dir = Unicode( - None, + default_value=None, allow_none=True, help=""" The working directory where the Notebook server will be started inside the container. @@ -212,9 +227,8 @@ def _namespace_default(self): ).tag(config=True) service_account = Unicode( - None, + default_value=None, allow_none=True, - config=True, help=""" The service account to be mounted in the spawned user pod. @@ -227,11 +241,10 @@ def _namespace_default(self): has the minimal permissions needed, and nothing more. When misconfigured, this can easily give arbitrary users root over your entire cluster. """ - ) + ).tag(config=True) pod_name_template = Unicode( - 'jupyter-{username}{servername}', - config=True, + default_value='jupyter-{username}{servername}', help=""" Template to use to form the name of user's pods. @@ -242,22 +255,20 @@ def _namespace_default(self): in, so if you are running multiple jupyterhubs spawning in the same namespace, consider setting this to be something more unique. """ - ) + ).tag(config=True) storage_pvc_ensure = Bool( - False, - config=True, + default_value=False, help=""" Ensure that a PVC exists for each user before spawning. Set to true to create a PVC named with `pvc_name_template` if it does not exist for the user when their pod is spawning. """ - ) + ).tag(config=True) pvc_name_template = Unicode( - 'claim-{username}{servername}', - config=True, + default_value='claim-{username}{servername}', help=""" Template to use to form the name of user's pvc. @@ -268,11 +279,10 @@ def _namespace_default(self): in, so if you are running multiple jupyterhubs spawning in the same namespace, consider setting this to be something more unique. """ - ) + ).tag(config=True) hub_connect_ip = Unicode( - None, - config=True, + default_value=None, allow_none=True, help=""" IP/DNS hostname to be used by pods to reach out to the hub API. @@ -290,10 +300,9 @@ def _namespace_default(self): Used together with `hub_connect_port` configuration. """ - ) + ).tag(config=True) hub_connect_port = Integer( - config=True, help=""" Port to use by pods to reach out to the hub API. @@ -306,8 +315,9 @@ def _namespace_default(self): This should be set to the `port` attribute of a service that is fronting the hub pod. """ - ) + ).tag(config=True) + @default('hub_connect_port') def _hub_connect_port_default(self): """ Set default port on which pods connect to hub to be the hub port @@ -319,11 +329,10 @@ def _hub_connect_port_default(self): return self.hub.server.port common_labels = Dict( - { + default_value={ 'app': 'jupyterhub', 'heritage': 'jupyterhub', }, - config=True, help=""" Kubernetes labels that both spawned singleuser server pods and created user PVCs will get. @@ -331,11 +340,10 @@ def _hub_connect_port_default(self): Note that these are only set when the Pods and PVCs are created, not later when this setting is updated. """ - ) + ).tag(config=True) extra_labels = Dict( - {}, - config=True, + default_value={}, help=""" Extra kubernetes labels to set on the spawned single-user pods. @@ -349,11 +357,10 @@ def _hub_connect_port_default(self): `{username}` and `{userid}` are expanded to the escaped, dns-label safe username & integer user id respectively, wherever they are used. """ - ) + ).tag(config=True) extra_annotations = Dict( - {}, - config=True, + default_value={}, help=""" Extra kubernetes annotations to set on the spawned single-user pods. @@ -366,11 +373,10 @@ def _hub_connect_port_default(self): `{username}` and `{userid}` are expanded to the escaped, dns-label safe username & integer user id respectively, wherever they are used. """ - ) + ).tag(config=True) image_spec = Unicode( - 'jupyterhub/singleuser:latest', - config=True, + default_value='jupyterhub/singleuser:latest', help=""" Docker image spec to use for spawning user's containers. @@ -394,11 +400,10 @@ def _hub_connect_port_default(self): c.KubeSpawner.start_timeout = 60 * 5 # Upto 5 minutes """ - ) + ).tag(config=True) image_pull_policy = Unicode( - 'IfNotPresent', - config=True, + default_value='IfNotPresent', help=""" The image pull policy of the docker container specified in `image_spec`. @@ -412,12 +417,11 @@ def _hub_connect_port_default(self): actively changing the `image_spec` and would like to pull the image whenever a user container is spawned. """ - ) + ).tag(config=True) image_pull_secrets = Unicode( - None, + default_value=None, allow_none=True, - config=True, help=""" The kubernetes secret to use for pulling images from private repository. @@ -428,11 +432,10 @@ def _hub_connect_port_default(self): has more information on when and why this might need to be set, and what it should be set to. """ - ) + ).tag(config=True) node_selector = Dict( - {}, - config=True, + default_value={}, help=""" The dictionary Selector labels used to match the Nodes where Pods will be launched. @@ -442,15 +445,14 @@ def _hub_connect_port_default(self): {"disktype": "ssd"} """ - ) + ).tag(config=True) uid = Union( - [ + trait_types=[ Integer(), Callable(), ], allow_none=True, - config=True, help=""" The UID to run the single-user server containers as. @@ -467,15 +469,14 @@ def _hub_connect_port_default(self): If set to `None`, the user specified with the `USER` directive in the container metadata is used. """ - ) + ).tag(config=True) gid = Union( - [ + trait_types=[ Integer(), Callable(), ], allow_none=True, - config=True, help=""" The GID to run the single-user server containers as. @@ -492,15 +493,14 @@ def _hub_connect_port_default(self): If set to `None`, the group of the user specified with the `USER` directive in the container metadata is used. """ - ) + ).tag(config=True) fs_gid = Union( - [ + trait_types=[ Integer(), Callable(), ], allow_none=True, - config=True, help=""" The GID of the group that should own any volumes that are created & mounted. @@ -527,15 +527,14 @@ def _hub_connect_port_default(self): cloud providers. See `fsGroup `_ for more details. """ - ) + ).tag(config=True) supplemental_gids = Union( - [ + trait_types=[ List(), Callable(), ], allow_none=True, - config=True, help=""" A list of GIDs that should be set as additional supplemental groups to the user that the container runs as. @@ -554,20 +553,18 @@ def _hub_connect_port_default(self): image must setup all directories/files any application needs access to, as group writable. """ - ) + ).tag(config=True) privileged = Bool( - False, - config=True, + default_value=False, help=""" Whether to run the pod with a privileged security context. """ - ) + ).tag(config=True) modify_pod_hook = Callable( - None, + default_value=None, allow_none=True, - config=True, help=""" Callable to augment the Pod object before launching. @@ -584,11 +581,10 @@ def _hub_connect_port_default(self): Note that the spawner object can change between versions of KubeSpawner and JupyterHub, so be careful relying on this! """ - ) + ).tag(config=True) volumes = List( - [], - config=True, + default_value=[], help=""" List of Kubernetes Volume specifications that will be mounted in the user pod. @@ -612,11 +608,10 @@ def _hub_connect_port_default(self): `{username}` and `{userid}` are expanded to the escaped, dns-label safe username & integer user id respectively, wherever they are used. """ - ) + ).tag(config=True) volume_mounts = List( - [], - config=True, + default_value=[], help=""" List of paths on which to mount volumes in the user notebook's pod. @@ -633,11 +628,10 @@ def _hub_connect_port_default(self): `{username}` and `{userid}` are expanded to the escaped, dns-label safe username & integer user id respectively, wherever they are used. """ - ) + ).tag(config=True) storage_capacity = Unicode( - None, - config=True, + default_value=None, allow_none=True, help=""" The ammount of storage space to request from the volume that the pvc will @@ -656,11 +650,10 @@ def _hub_connect_port_default(self): the same value: `128974848`, `129e6`, `129M`, `123Mi`. (https://github.com/kubernetes/kubernetes/blob/master/docs/design/resources.md) """ - ) + ).tag(config=True) storage_extra_labels = Dict( - {}, - config=True, + default_value={}, help=""" Extra kubernetes labels to set on the user PVCs. @@ -674,11 +667,10 @@ def _hub_connect_port_default(self): `{username}` and `{userid}` are expanded to the escaped, dns-label safe username & integer user id respectively, wherever they are used. """ - ) + ).tag(config=True) storage_class = Unicode( - None, - config=True, + default_value=None, allow_none=True, help=""" The storage class that the pvc will use. If left blank, the kubespawner will not @@ -696,11 +688,10 @@ def _hub_connect_port_default(self): more information on how StorageClasses work. """ - ) + ).tag(config=True) storage_access_modes = List( - ["ReadWriteOnce"], - config=True, + default_value=["ReadWriteOnce"], help=""" List of access modes the user has for the pvc. @@ -713,11 +704,10 @@ def _hub_connect_port_default(self): See https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes for more information on how access modes work. """ - ) + ).tag(config=True) lifecycle_hooks = Dict( - {}, - config=True, + default_value={}, help=""" Kubernetes lifecycle hooks to set on the spawned single-user pods. @@ -738,11 +728,10 @@ def _hub_connect_port_default(self): See https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ for more info on what lifecycle hooks are and why you might want to use them! """ - ) + ).tag(config=True) init_containers = List( - None, - config=True, + default_value=[], help=""" List of initialization containers belonging to the pod. @@ -769,11 +758,10 @@ def _hub_connect_port_default(self): To user this feature, Kubernetes version must greater than 1.6. """ - ) + ).tag(config=True) extra_container_config = Dict( - None, - config=True, + default_value={}, help=""" Extra configuration (e.g. ``envFrom``) for notebook container which is not covered by other attributes. @@ -797,11 +785,10 @@ def _hub_connect_port_default(self): or underscore-separated word (used by kubernetes python client, e.g. ``env_from``). """ - ) + ).tag(config=True) extra_pod_config = Dict( - None, - config=True, + default_value={}, help=""" Extra configuration (e.g. tolerations) for the pod which is not covered by other attributes. @@ -816,11 +803,10 @@ def _hub_connect_port_default(self): The `key` could be either camelcase word (used by Kubernetes yaml, e.g. `dnsPolicy`) or underscore-separated word (used by kubernetes python client, e.g. `dns_policy`). """ - ) + ).tag(config=True) extra_containers = List( - None, - config=True, + default_value=[], help=""" List of containers belonging to the pod which besides to the container generated for notebook server. @@ -839,11 +825,122 @@ def _hub_connect_port_default(self): ] """ - ) + ).tag(config=True) + + scheduler_name = Unicode( + default_value=None, + allow_none=True, + help=""" + Set the pod's scheduler explicitly by name. + See the Kubernetes API documentation for additional details. + - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core + """ + ).tag(config=True) + + tolerations = List( + default_value=[], + help=""" + List of tolerations that are to be assigned to the pod in order to be able to schedule the pod + on a node with the corresponding taints. See the official Kubernetes documentation for additional details + https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + + Pass this field an array of "Toleration" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core + + Example: + + [ + { + 'key': 'key', + 'operator': 'Equal', + 'value': 'value', + 'effect': 'NoSchedule' + }, + { + 'key': 'key', + 'operator': 'Exists', + 'effect': 'NoSchedule' + } + ] + + """ + ).tag(config=True) + + node_affinity_preferred = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PreferredSchedulingTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#preferredschedulingterm-v1-core + """ + ).tag(config=True) + node_affinity_required = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "NodeSelectorTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core + """ + ).tag(config=True) + pod_affinity_preferred = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "WeightedPodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core + """ + ).tag(config=True) + pod_affinity_required = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core + """ + ).tag(config=True) + pod_anti_affinity_preferred = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "WeightedPodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core + """ + ).tag(config=True) + pod_anti_affinity_required = List( + default_value=[], + help=""" + Affinities describe where pods prefer or require to be scheduled, they + may prefer or require a node to have a certain label or be in proximity + / remoteness to another pod. To learn more visit + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + Pass this field an array of "PodAffinityTerm" objects.* + * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core + """ + ).tag(config=True) extra_resource_guarantees = Dict( - {}, - config=True, + default_value={}, help=""" The dictionary used to request arbitrary resources. Default is None and means no additional resources are requested. @@ -851,11 +948,9 @@ def _hub_connect_port_default(self): {"nvidia.com/gpu": "3"} """ - ) - + ).tag(config=True) extra_resource_limits = Dict( - {}, - config=True, + default_value={}, help=""" The dictionary used to limit arbitrary resources. Default is None and means no additional resources are limited. @@ -863,20 +958,19 @@ def _hub_connect_port_default(self): {"nvidia.com/gpu": "3"} """ - ) + ).tag(config=True) delete_stopped_pods = Bool( - True, - config=True, + default_value=True, help=""" Whether to delete pods that have stopped themselves. Set to False to leave stopped pods in the completed state, allowing for easier debugging of why they may have stopped. """ - ) + ).tag(config=True) profile_form_template = Unicode( - """ + default_value="""