Skip to content

Commit

Permalink
Support specifying Service type
Browse files Browse the repository at this point in the history
  • Loading branch information
sbernauer committed Apr 5, 2023
1 parent 8a88110 commit 6430219
Show file tree
Hide file tree
Showing 9 changed files with 65 additions and 10 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,13 @@ All notable changes to this project will be documented in this file.

### Changed

- `operator-rs` `0.36.0` -> `0.39.0` ([#326], [#337]).
- [Breaking] Moved top level config option to `clusterConfig` ([#326]).
- [BREAKING] Support specifying Service type.
This enables us to later switch non-breaking to using `ListenerClasses` for the exposure of Services.
This change is breaking, because - for security reasons - we default to the `cluster-internal` `ListenerClass`.
If you need you cluster to be accessible from outside of Kubernetes you need to set `clusterConfig.listenerClass`
to `external-unstable` ([#XXX]).
- `operator-rs` `0.36.0` -> `0.39.0` ([#326], [#337]).

### Removed

Expand Down
45 changes: 42 additions & 3 deletions rust/crd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,39 @@ pub struct HdfsClusterConfig {
pub vector_aggregator_config_map_name: Option<String>,
/// Name of the ZooKeeper discovery config map.
pub zookeeper_config_map_name: String,
/// In the future this setting will control, which ListenerClass <https://docs.stackable.tech/home/stable/listener-operator/listenerclass.html>
/// will be used to expose the service.
/// Currently only a subset of the ListenerClasses are supported by choosing the type of the created Services
/// by looking at the ListenerClass name specified,
/// In a future release support for custom ListenerClasses will be introduced without a breaking change:
///
/// * cluster-internal: Use a ClusterIP service
///
/// * external-unstable: Use a NodePort service
#[serde(default)]
pub listener_class: CurrentlySupportedListenerClasses,
}

// TODO: Temporary solution until listener-operator is finished
#[derive(
Clone, Debug, Default, Display, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize,
)]
#[serde(rename_all = "PascalCase")]
pub enum CurrentlySupportedListenerClasses {
#[default]
#[serde(rename = "cluster-internal")]
ClusterInternal,
#[serde(rename = "external-unstable")]
ExternalUnstable,
}

impl CurrentlySupportedListenerClasses {
pub fn k8s_service_type(&self) -> String {
match self {
CurrentlySupportedListenerClasses::ClusterInternal => "ClusterIP".to_string(),
CurrentlySupportedListenerClasses::ExternalUnstable => "NodePort".to_string(),
}
}
}

/// This is a shared trait for all role/role-group config structs to avoid duplication
Expand Down Expand Up @@ -433,9 +466,15 @@ impl HdfsCluster {
);
group_labels.insert(String::from("role"), rolegroup_ref.role.clone());
group_labels.insert(String::from("group"), rolegroup_ref.role_group.clone());
// TODO: in a production environment, probably not all roles need to be exposed with one NodePort per Pod but it's
// useful for development purposes.
group_labels.insert(LABEL_ENABLE.to_string(), "true".to_string());

if self.spec.cluster_config.listener_class
== CurrentlySupportedListenerClasses::ExternalUnstable
{
// TODO: in a production environment, probably not all roles need to be exposed with one NodePort per Pod but it's
// useful for development purposes.

group_labels.insert(LABEL_ENABLE.to_string(), "true".to_string());
}

group_labels
}
Expand Down
2 changes: 2 additions & 0 deletions rust/operator/src/hdfs_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,8 @@ fn rolegroup_service(
.with_label("prometheus.io/scrape", "true")
.build(),
spec: Some(ServiceSpec {
// Internal communication does not need to be exposed
type_: Some("ClusterIP".to_string()),
cluster_ip: Some("None".to_string()),
ports: Some(
role.ports()
Expand Down
3 changes: 2 additions & 1 deletion tests/templates/kuttl/smoke/01-install-zk.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@ spec:
image:
productVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[0] }}"
stackableVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[1] }}"
{% if lookup('env', 'VECTOR_AGGREGATOR') %}
clusterConfig:
listenerClass: {{ test_scenario['values']['listener-class'] }}
{% if lookup('env', 'VECTOR_AGGREGATOR') %}
logging:
vectorAggregatorConfigMapName: vector-aggregator-discovery
{% endif %}
Expand Down
1 change: 1 addition & 0 deletions tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ spec:
clusterConfig:
dfsReplication: 1
zookeeperConfigMapName: hdfs-zk
listenerClass: {{ test_scenario['values']['listener-class'] }}
{% if lookup('env', 'VECTOR_AGGREGATOR') %}
vectorAggregatorConfigMapName: vector-aggregator-discovery
{% endif %}
Expand Down
2 changes: 1 addition & 1 deletion tests/templates/kuttl/smoke/04-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestAssert
commands:
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py ls
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py $NAMESPACE ls
2 changes: 1 addition & 1 deletion tests/templates/kuttl/smoke/04-create-file.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ kind: TestStep
commands:
- script: kubectl cp -n $NAMESPACE ./webhdfs.py webhdfs-0:/tmp
- script: kubectl cp -n $NAMESPACE ./testdata.txt webhdfs-0:/tmp
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py create
- script: kubectl exec -n $NAMESPACE webhdfs-0 -- python /tmp/webhdfs.py $NAMESPACE create
7 changes: 4 additions & 3 deletions tests/templates/kuttl/smoke/webhdfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@

def main() -> int:
result = 0
command = sys.argv[1]
namespace = sys.argv[1]
command = sys.argv[2]

log_level = "DEBUG"
logging.basicConfig(
Expand All @@ -16,7 +17,7 @@ def main() -> int:

if command == "ls":
http_code = requests.get(
"http://hdfs-namenode-default-0:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS"
f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS"
).status_code
if http_code != 200:
result = 1
Expand All @@ -30,7 +31,7 @@ def main() -> int:
)
}
http_code = requests.put(
"http://hdfs-namenode-default-0:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE",
f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE",
files=files,
allow_redirects=True,
).status_code
Expand Down
6 changes: 6 additions & 0 deletions tests/test-definition.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,19 @@ dimensions:
values:
- "default"
- "2hdd-1ssd"
# Used for both, zookeeper and hdfs
- name: listener-class
values:
- "cluster-internal"
- "external-unstable"
tests:
- name: smoke
dimensions:
- hadoop
- zookeeper
- number-of-datanodes
- datanode-pvcs
- listener-class
- name: orphaned-resources
dimensions:
- hadoop-latest
Expand Down

0 comments on commit 6430219

Please sign in to comment.