forked from kube-hetzner/terraform-hcloud-kube-hetzner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
93 lines (81 loc) · 2.75 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
resource "random_password" "k3s_token" {
length = 48
special = false
}
resource "hcloud_ssh_key" "k3s" {
count = var.hcloud_ssh_key_id == null ? 1 : 0
name = var.cluster_name
public_key = var.ssh_public_key
labels = local.labels
}
resource "hcloud_network" "k3s" {
name = var.cluster_name
ip_range = local.network_ipv4_cidr
labels = local.labels
}
# We start from the end of the subnets cird array,
# as we would have fewer control plane nodepools, than angent ones.
resource "hcloud_network_subnet" "control_plane" {
count = length(var.control_plane_nodepools)
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
ip_range = local.network_ipv4_subnets[255 - count.index]
}
# Here we start at the beginning of the subnets cird array
resource "hcloud_network_subnet" "agent" {
count = length(var.agent_nodepools)
network_id = hcloud_network.k3s.id
type = "cloud"
network_zone = var.network_region
ip_range = local.network_ipv4_subnets[count.index]
}
resource "hcloud_firewall" "k3s" {
name = var.cluster_name
labels = local.labels
dynamic "rule" {
for_each = local.firewall_rules_list
content {
direction = rule.value.direction
protocol = rule.value.protocol
port = lookup(rule.value, "port", null)
destination_ips = lookup(rule.value, "destination_ips", [])
source_ips = lookup(rule.value, "source_ips", [])
}
}
}
resource "hcloud_placement_group" "control_plane" {
count = ceil(local.control_plane_count / 10)
name = "${var.cluster_name}-control-plane-${count.index + 1}"
labels = local.labels
type = "spread"
}
resource "hcloud_placement_group" "agent" {
count = ceil(local.agent_count / 10)
name = "${var.cluster_name}-agent-${count.index + 1}"
labels = local.labels
type = "spread"
}
resource "null_resource" "destroy_cluster_loadbalancer" {
# this only gets triggered before total destruction of the cluster, but when the necessary elements to run the commands are still available
triggers = {
kustomization_id = null_resource.kustomization.id
cluster_name = var.cluster_name
}
# Important when issuing terraform destroy, otherwise the LB will not let the network get deleted
provisioner "local-exec" {
when = destroy
command = "kubectl -n kube-system delete service traefik --kubeconfig kubeconfig.yaml"
on_failure = continue
}
depends_on = [
null_resource.control_planes[0],
hcloud_network_subnet.control_plane,
hcloud_network_subnet.agent,
hcloud_placement_group.control_plane,
hcloud_placement_group.agent,
hcloud_network.k3s,
hcloud_firewall.k3s,
hcloud_ssh_key.k3s
]
}