diff --git a/CHANGELOG.md b/CHANGELOG.md index 799aeaf5..a46a6785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,13 @@ All notable changes to this project will be documented in this file. ### Changed -- `operator-rs` `0.36.0` -> `0.39.0` ([#326], [#337]). - [Breaking] Moved top level config option to `clusterConfig` ([#326]). +- [BREAKING] Support specifying Service type. + This enables us to later switch non-breaking to using `ListenerClasses` for the exposure of Services. + This change is breaking, because - for security reasons - we default to the `cluster-internal` `ListenerClass`. + If you need you cluster to be accessible from outside of Kubernetes you need to set `clusterConfig.listenerClass` + to `external-unstable` ([#XXX]). + - `operator-rs` `0.36.0` -> `0.39.0` ([#326], [#337]). ### Removed diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 4e862821..3934a061 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -90,6 +90,39 @@ pub struct HdfsClusterConfig { pub vector_aggregator_config_map_name: Option, /// Name of the ZooKeeper discovery config map. pub zookeeper_config_map_name: String, + /// In the future this setting will control, which ListenerClass + /// will be used to expose the service. + /// Currently only a subset of the ListenerClasses are supported by choosing the type of the created Services + /// by looking at the ListenerClass name specified, + /// In a future release support for custom ListenerClasses will be introduced without a breaking change: + /// + /// * cluster-internal: Use a ClusterIP service + /// + /// * external-unstable: Use a NodePort service + #[serde(default)] + pub listener_class: CurrentlySupportedListenerClasses, +} + +// TODO: Temporary solution until listener-operator is finished +#[derive( + Clone, Debug, Default, Display, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize, +)] +#[serde(rename_all = "PascalCase")] +pub enum CurrentlySupportedListenerClasses { + #[default] + #[serde(rename = "cluster-internal")] + ClusterInternal, + #[serde(rename = "external-unstable")] + ExternalUnstable, +} + +impl CurrentlySupportedListenerClasses { + pub fn k8s_service_type(&self) -> String { + match self { + CurrentlySupportedListenerClasses::ClusterInternal => "ClusterIP".to_string(), + CurrentlySupportedListenerClasses::ExternalUnstable => "NodePort".to_string(), + } + } } /// This is a shared trait for all role/role-group config structs to avoid duplication @@ -433,9 +466,15 @@ impl HdfsCluster { ); group_labels.insert(String::from("role"), rolegroup_ref.role.clone()); group_labels.insert(String::from("group"), rolegroup_ref.role_group.clone()); - // TODO: in a production environment, probably not all roles need to be exposed with one NodePort per Pod but it's - // useful for development purposes. - group_labels.insert(LABEL_ENABLE.to_string(), "true".to_string()); + + if self.spec.cluster_config.listener_class + == CurrentlySupportedListenerClasses::ExternalUnstable + { + // TODO: in a production environment, probably not all roles need to be exposed with one NodePort per Pod but it's + // useful for development purposes. + + group_labels.insert(LABEL_ENABLE.to_string(), "true".to_string()); + } group_labels } diff --git a/rust/operator/src/hdfs_controller.rs b/rust/operator/src/hdfs_controller.rs index 45cdb9e5..d453700b 100644 --- a/rust/operator/src/hdfs_controller.rs +++ b/rust/operator/src/hdfs_controller.rs @@ -338,6 +338,8 @@ fn rolegroup_service( .with_label("prometheus.io/scrape", "true") .build(), spec: Some(ServiceSpec { + // Internal communication does not need to be exposed + type_: Some("ClusterIP".to_string()), cluster_ip: Some("None".to_string()), ports: Some( role.ports() diff --git a/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 b/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 index bee8ef22..b624436f 100644 --- a/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 +++ b/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 @@ -7,8 +7,9 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[1] }}" -{% if lookup('env', 'VECTOR_AGGREGATOR') %} clusterConfig: + listenerClass: {{ test_scenario['values']['listener-class'] }} +{% if lookup('env', 'VECTOR_AGGREGATOR') %} logging: vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 index 4a494f15..f52e9e88 100644 --- a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 @@ -16,6 +16,7 @@ spec: clusterConfig: dfsReplication: 1 zookeeperConfigMapName: hdfs-zk + listenerClass: {{ test_scenario['values']['listener-class'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/smoke/04-assert.yaml b/tests/templates/kuttl/smoke/04-assert.yaml index 309d479b..1c4860b9 100644 --- a/tests/templates/kuttl/smoke/04-assert.yaml +++ b/tests/templates/kuttl/smoke/04-assert.yaml @@ -2,4 +2,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert commands: - - script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py ls + - script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py $NAMESPACE ls diff --git a/tests/templates/kuttl/smoke/04-create-file.yaml b/tests/templates/kuttl/smoke/04-create-file.yaml index fe7d4732..d72fb348 100644 --- a/tests/templates/kuttl/smoke/04-create-file.yaml +++ b/tests/templates/kuttl/smoke/04-create-file.yaml @@ -4,4 +4,4 @@ kind: TestStep commands: - script: kubectl cp -n $NAMESPACE ./webhdfs.py webhdfs-0:/tmp - script: kubectl cp -n $NAMESPACE ./testdata.txt webhdfs-0:/tmp - - script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py create + - script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py $NAMESPACE create diff --git a/tests/templates/kuttl/smoke/webhdfs.py b/tests/templates/kuttl/smoke/webhdfs.py index 4c77470e..d7bb4c3f 100755 --- a/tests/templates/kuttl/smoke/webhdfs.py +++ b/tests/templates/kuttl/smoke/webhdfs.py @@ -5,7 +5,8 @@ def main() -> int: result = 0 - command = sys.argv[1] + namespace = sys.argv[1] + command = sys.argv[2] log_level = "DEBUG" logging.basicConfig( @@ -16,7 +17,7 @@ def main() -> int: if command == "ls": http_code = requests.get( - "http://hdfs-namenode-default-0:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS" + f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS" ).status_code if http_code != 200: result = 1 @@ -30,7 +31,7 @@ def main() -> int: ) } http_code = requests.put( - "http://hdfs-namenode-default-0:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE", + f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE", files=files, allow_redirects=True, ).status_code diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index a8819ce0..3ee9466a 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -22,6 +22,11 @@ dimensions: values: - "default" - "2hdd-1ssd" + # Used for both, zookeeper and hdfs + - name: listener-class + values: + - "cluster-internal" + - "external-unstable" tests: - name: smoke dimensions: @@ -29,6 +34,7 @@ tests: - zookeeper - number-of-datanodes - datanode-pvcs + - listener-class - name: orphaned-resources dimensions: - hadoop-latest