diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a54a31f5..7f6fc98e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,6 +9,11 @@ tox devenv -e integration source venv/bin/activate ``` +The development setup ships with tox3, you might want to install tox4: +```shell +pip install 'tox>=4,<5' +``` + ## Testing This project uses `tox` for managing test environments. There are some pre-configured environments diff --git a/charms/worker/k8s/lib/charms/k8s/v0/k8sd_api_manager.py b/charms/worker/k8s/lib/charms/k8s/v0/k8sd_api_manager.py index 2663b3a5..619dc6d2 100644 --- a/charms/worker/k8s/lib/charms/k8s/v0/k8sd_api_manager.py +++ b/charms/worker/k8s/lib/charms/k8s/v0/k8sd_api_manager.py @@ -31,7 +31,7 @@ class utilises different connection factories (UnixSocketConnectionFactory import socket from contextlib import contextmanager from http.client import HTTPConnection, HTTPException -from typing import Generator, List, Optional, Type, TypeVar +from typing import Dict, Generator, List, Optional, Type, TypeVar from pydantic import BaseModel, Field, validator @@ -48,6 +48,61 @@ class utilises different connection factories (UnixSocketConnectionFactory logger = logging.getLogger(__name__) +class CreateClusterRequest(BaseModel): + """Request model for creating a new Canonical Kubernetes cluster. + + Attributes: + bootstrap (bool): Flag to enable or disable the bootstrap process for + the cluster. Defaults to True. + name (str): The name of the cluster to be created. + address (str): The address where the cluster is hosted. + config (Dict[str, str]): A dictionary of additional configuration + parameters for the cluster. + """ + + bootstrap: bool = True + name: str + address: str + config: Dict[str, str] + + +class BootstrapConfig(BaseModel): + """Configuration model for bootstrapping a Canonical K8s cluster. + + Attributes: + components (List[str]): A list of default components to be installed + during the bootstrap process. Defaults to ["dns", "metrics-server", + "network"]. + cluster_cidr (str): The IP address range for the cluster's pods. Defaults + to "10.1.0.0/16". + service_cidr (str): The IP address range for the cluster services. Defaults + to "10.152.183.0/24". + rbac (bool): Flag to enable or disable role-based access control + (RBAC). Defaults to True. + k8s_dqlite_port (int): The port used by Dqlite. Defaults to 9000. + datastore (str): The type of datastore used by the cluster. + Defaults to "k8s-dqlite". + datastore_url (str): The URL of the datastore. Optional; defaults to None. + datastore_ca_cert (str): The CA certificate for the datastore. + Optional; defaults to None. + datastore_client_cert (str): The client certificate for accessing the + datastore. Optional; defaults to None. + datastore_client_key (str): The client key for accessing the datastore. + Optional; defaults to None. + """ + + components: List[str] = ["dns", "metrics-server", "network"] + cluster_cidr: str = Field("10.1.0.0/16", alias="cluster-cidr") + service_cidr: str = Field("10.152.183.0/24", alias="service-cidr") + rbac: bool = Field(True, alias="enable-rbac") + k8s_dqlite_port: int = Field(9000, alias="k8s-dqlite-port") + datastore: str = "k8s-dqlite" + datastore_url: str = Field(None, alias="datastore-url") + datastore_ca_cert: str = Field(None, alias="datastore-ca-crt") + datastore_client_cert: str = Field(None, alias="datastore-client-crt") + datastore_client_key: str = Field(None, alias="datastore-client-key") + + class K8sdAPIManagerError(Exception): """Base exception for K8sd API Manager errors.""" @@ -603,17 +658,16 @@ def check_k8sd_ready(self): endpoint = "/cluster/1.0/ready" self._send_request(endpoint, "GET", EmptyResponse) - def bootstrap_k8s_snap(self, name: str, address: str) -> None: + def bootstrap_k8s_snap(self, request: CreateClusterRequest) -> None: """Bootstrap the k8s cluster. Args: - name (str): name of the node - address (str): address to which k8sd should be bound + request (CreateClusterRequest): The request model to bootstrap the cluster. TODO: Add bootstrap config support """ endpoint = "/cluster/control" - body = {"bootstrap": True, "name": name, "address": address} + body = request.dict(exclude_none=True, by_alias=True) self._send_request(endpoint, "POST", EmptyResponse, body) def request_auth_token(self, username: str, groups: List[str]) -> str: diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index b62ea45f..3767cb92 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -28,10 +28,13 @@ import charms.contextual_status as status import ops +import yaml from charms.contextual_status import WaitingStatus, on_error from charms.grafana_agent.v0.cos_agent import COSAgentProvider from charms.interface_kube_dns import KubeDnsRequires from charms.k8s.v0.k8sd_api_manager import ( + BootstrapConfig, + CreateClusterRequest, DNSConfig, InvalidResponseError, K8sdAPIManager, @@ -210,15 +213,28 @@ def _check_k8sd_ready(self): K8sdConnectionError, ) def _bootstrap_k8s_snap(self): - """Bootstrap k8s if it's not already bootstrapped.""" - if not self.api_manager.is_cluster_bootstrapped(): - status.add(ops.MaintenanceStatus("Bootstrapping Cluster")) - log.info("Bootstrapping Cluster") - binding = self.model.get_binding("juju-info") - address = binding and binding.network.ingress_address - node_name = self.get_node_name() - # TODO: Make port (and address) configurable. - self.api_manager.bootstrap_k8s_snap(node_name, f"{address}:{K8SD_PORT}") + """Bootstrap the k8s snap package.""" + if self.api_manager.is_cluster_bootstrapped(): + log.info("K8s cluster already bootstrapped") + return + + bootstrap_config = BootstrapConfig() + + status.add(ops.MaintenanceStatus("Bootstrapping Cluster")) + + binding = self.model.get_binding("juju-info") + address = binding and binding.network.ingress_address + node_name = self.get_node_name() + config_str = { + "bootstrapConfig": yaml.dump(bootstrap_config.dict(by_alias=True, exclude_none=True)) + } + + payload = CreateClusterRequest( + name=node_name, address=f"{address}:{K8SD_PORT}", config=config_str + ) + + # TODO: Make port (and address) configurable. + self.api_manager.bootstrap_k8s_snap(payload) @status.on_error( ops.WaitingStatus("Configuring COS Integration"), diff --git a/charms/worker/k8s/tests/unit/test_k8sd_api_manager.py b/charms/worker/k8s/tests/unit/test_k8sd_api_manager.py index ae6aa445..ae3df4a1 100644 --- a/charms/worker/k8s/tests/unit/test_k8sd_api_manager.py +++ b/charms/worker/k8s/tests/unit/test_k8sd_api_manager.py @@ -12,6 +12,7 @@ from lib.charms.k8s.v0.k8sd_api_manager import ( AuthTokenResponse, BaseRequestModel, + CreateClusterRequest, CreateJoinTokenResponse, DNSConfig, EmptyResponse, @@ -113,16 +114,23 @@ def setUp(self): @patch("lib.charms.k8s.v0.k8sd_api_manager.K8sdAPIManager._send_request") def test_bootstrap_k8s_snap(self, mock_send_request): - mock_send_request.return_value = EmptyResponse( - status_code=200, type="test", error_code=0, metadata="foo" - ) + mock_send_request.return_value = EmptyResponse(status_code=200, type="test", error_code=0) - self.api_manager.bootstrap_k8s_snap("test-node", "127.0.0.1:6400") + self.api_manager.bootstrap_k8s_snap( + CreateClusterRequest( + name="test-node", address="127.0.0.1:6400", config={"bootstrapConfig": "foobar"} + ) + ) mock_send_request.assert_called_once_with( "/cluster/control", "POST", EmptyResponse, - {"bootstrap": True, "name": "test-node", "address": "127.0.0.1:6400"}, + { + "bootstrap": True, + "name": "test-node", + "address": "127.0.0.1:6400", + "config": {"bootstrapConfig": "foobar"}, + }, ) def test_create_join_token_invalid_response(self): diff --git a/charms/worker/lxd-profile.yaml b/charms/worker/lxd-profile.yaml deleted file mode 100644 index fcb130dd..00000000 --- a/charms/worker/lxd-profile.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -description: "LXD profile for Canonical Kubernetes" -config: - boot.autostart: "true" - linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter - raw.lxc: | - lxc.apparmor.profile=unconfined - lxc.mount.auto=proc:rw sys:rw cgroup:rw - lxc.cgroup.devices.allow=a - lxc.cap.drop= - security.nesting: "true" - security.privileged: "true" -devices: - aadisable: - path: /sys/module/nf_conntrack/parameters/hashsize - source: /sys/module/nf_conntrack/parameters/hashsize - type: disk - aadisable2: - path: /dev/kmsg - source: /dev/kmsg - type: unix-char - aadisable3: - path: /sys/fs/bpf - source: /sys/fs/bpf - type: disk - aadisable4: - path: /proc/sys/net/netfilter/nf_conntrack_max - source: /proc/sys/net/netfilter/nf_conntrack_max - type: disk diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 64bf6018..30989b1b 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -169,6 +169,17 @@ def switch(self, name: str, path: Path): app["channel"] = None +async def cloud_profile(ops_test: OpsTest): + """Apply lxd-profile to the model if the juju cloud is lxd.""" + controller = await ops_test.model.get_controller() + cloud = await controller.cloud() + if cloud.cloud.type_ == "lxd": + lxd = LXDSubstrate(None, None) + profile_name = f"juju-{ops_test.model.name}" + lxd.remove_profile(profile_name) + lxd.apply_profile("k8s.profile", profile_name) + + @contextlib.asynccontextmanager async def deploy_model( request: pytest.FixtureRequest, @@ -199,6 +210,7 @@ async def deploy_model( config=config, ) with ops_test.model_context(model_name) as the_model: + await cloud_profile(ops_test) async with ops_test.fast_forward("60s"): await the_model.deploy(bundle.render) await the_model.wait_for_idle( diff --git a/tests/integration/cos_substrate.py b/tests/integration/cos_substrate.py index ac6cecf2..d4135220 100644 --- a/tests/integration/cos_substrate.py +++ b/tests/integration/cos_substrate.py @@ -51,7 +51,8 @@ def apply_profile( Defaults to 'cos-profile'. """ profile_path = Path("tests/integration/data") / profile_name - with open(profile_path) as file: + + with profile_path.open() as file: try: raw_profile = yaml.safe_load(file) config = raw_profile.get("config", {}) @@ -59,7 +60,7 @@ def apply_profile( self.client.profiles.create(target_profile_name, config=config, devices=devices) log.info(f"Profile {target_profile_name} applied successfully.") except (yaml.YAMLError, Exception) as e: - log.error(f"Failed to read or apply LXD profile: {e}") + log.exception(f"Failed to read or apply LXD profile: {e}") def create_container(self, name: str): """Create a container. diff --git a/charms/worker/k8s/lxd-profile.yaml b/tests/integration/data/k8s.profile similarity index 55% rename from charms/worker/k8s/lxd-profile.yaml rename to tests/integration/data/k8s.profile index fcb130dd..861f529e 100644 --- a/charms/worker/k8s/lxd-profile.yaml +++ b/tests/integration/data/k8s.profile @@ -3,7 +3,7 @@ description: "LXD profile for Canonical Kubernetes" config: boot.autostart: "true" - linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter + linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,iptable_raw,netlink_diag,nf_nat,overlay,br_netfilter,xt_socket raw.lxc: | lxc.apparmor.profile=unconfined lxc.mount.auto=proc:rw sys:rw cgroup:rw @@ -13,18 +13,6 @@ config: security.privileged: "true" devices: aadisable: - path: /sys/module/nf_conntrack/parameters/hashsize - source: /sys/module/nf_conntrack/parameters/hashsize - type: disk - aadisable2: path: /dev/kmsg source: /dev/kmsg type: unix-char - aadisable3: - path: /sys/fs/bpf - source: /sys/fs/bpf - type: disk - aadisable4: - path: /proc/sys/net/netfilter/nf_conntrack_max - source: /proc/sys/net/netfilter/nf_conntrack_max - type: disk