diff --git a/README.md b/README.md
index d4add358..2fcbd227 100644
--- a/README.md
+++ b/README.md
@@ -993,6 +993,30 @@ enable_delete_protection = {
```
+
+
+Use only private ips in your cluster
+
+To use only private ips on your cluster, you need in your project:
+1. A network already configured.
+2. A machine with a public IP, with nat configured (see [Hetzner guide](https://community.hetzner.com/tutorials/how-to-set-up-nat-for-cloud-networks)).
+3. Access to your network (you can use wireguard, see [Hetnzer guide](https://docs.hetzner.com/cloud/apps/list/wireguard/)).
+4. A route in your network, destination: `0.0.0.0/0` through the private ip of your machine with NAT.
+5. Make sure the connexion to your vpn is established before launching terraform.
+
+Recommended values:
+- Network range: `10.0.0.0/8`
+- Subnet for your wireguard and NAT machine: `10.128.0.0/16`
+
+If you follow this values, in your kube.tf, please set:
+- `existing_network_id = [YOURID]` (with the brackets)
+- `network_ipv4_cidr = "10.0.0.0/9"`
+- Add `disable_ipv4 = true` and `disable_ipv6 = true` in all machines in all nodepools (control planes + agents).
+
+This setup is compatible with a loadbalancer for your control planes, however you should consider to set
+`control_plane_lb_enable_public_interface = false` to keep ip private.
+
+
## Debugging
diff --git a/agents.tf b/agents.tf
index 98451333..16d46ce1 100644
--- a/agents.tf
+++ b/agents.tf
@@ -29,6 +29,9 @@ module "agents" {
swap_size = each.value.swap_size
zram_size = each.value.zram_size
keep_disk_size = var.keep_disk_agents
+ disable_ipv4 = each.value.disable_ipv4
+ disable_ipv6 = each.value.disable_ipv6
+ network_id = length(var.existing_network_id) > 0 ? var.existing_network_id[0] : 0
private_ipv4 = cidrhost(hcloud_network_subnet.agent[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0]].ip_range, each.value.index + 101)
@@ -57,6 +60,14 @@ locals {
var.agent_nodes_custom_config,
(v.selinux == true ? { selinux = true } : {})
) }
+
+ agent_ips = {
+ for k, v in module.agents : k => coalesce(
+ v.ipv4_address,
+ v.ipv6_address,
+ v.private_ipv4_address
+ )
+ }
}
resource "null_resource" "agent_config" {
@@ -71,7 +82,7 @@ resource "null_resource" "agent_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.agents[each.key].ipv4_address
+ host = local.agent_ips[each.key]
port = var.ssh_port
}
@@ -97,7 +108,7 @@ resource "null_resource" "agents" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.agents[each.key].ipv4_address
+ host = local.agent_ips[each.key]
port = var.ssh_port
}
@@ -166,7 +177,7 @@ resource "null_resource" "configure_longhorn_volume" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.agents[each.key].ipv4_address
+ host = local.agent_ips[each.key]
port = var.ssh_port
}
@@ -227,7 +238,7 @@ resource "null_resource" "configure_floating_ip" {
NM_CONNECTION=$(nmcli -g GENERAL.CONNECTION device show eth0)
nmcli connection modify "$NM_CONNECTION" \
ipv4.method manual \
- ipv4.addresses ${hcloud_floating_ip.agents[each.key].ip_address}/32,${module.agents[each.key].ipv4_address}/32 gw4 172.31.1.1 \
+ ipv4.addresses ${hcloud_floating_ip.agents[each.key].ip_address}/32,${local.agent_ips[each.key]}/32 gw4 172.31.1.1 \
ipv4.route-metric 100 \
&& nmcli connection up "$NM_CONNECTION"
EOT
@@ -238,7 +249,7 @@ resource "null_resource" "configure_floating_ip" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.agents[each.key].ipv4_address
+ host = local.agent_ips[each.key]
port = var.ssh_port
}
diff --git a/autoscaler-agents.tf b/autoscaler-agents.tf
index c81c2104..84bbe78d 100644
--- a/autoscaler-agents.tf
+++ b/autoscaler-agents.tf
@@ -62,7 +62,7 @@ resource "null_resource" "configure_autoscaler" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
}
@@ -165,7 +165,7 @@ resource "null_resource" "autoscaled_nodes_registries" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = each.value.ipv4_address
+ host = coalesce(each.value.ipv4_address, each.value.ipv6_address, try(one(each.value.network).ip, null))
port = var.ssh_port
}
diff --git a/control_planes.tf b/control_planes.tf
index 4c5bdd29..1223f0c6 100644
--- a/control_planes.tf
+++ b/control_planes.tf
@@ -29,6 +29,9 @@ module "control_planes" {
swap_size = each.value.swap_size
zram_size = each.value.zram_size
keep_disk_size = var.keep_disk_cp
+ disable_ipv4 = each.value.disable_ipv4
+ disable_ipv6 = each.value.disable_ipv6
+ network_id = length(var.existing_network_id) > 0 ? var.existing_network_id[0] : 0
# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
@@ -83,6 +86,14 @@ resource "hcloud_load_balancer_service" "control_plane" {
}
locals {
+ control_plane_ips = {
+ for k, v in module.control_planes : k => coalesce(
+ v.ipv4_address,
+ v.ipv6_address,
+ v.private_ipv4_address
+ )
+ }
+
k3s-config = { for k, v in local.control_plane_nodes : k => merge(
{
node-name = module.control_planes[k].name
@@ -134,7 +145,7 @@ resource "null_resource" "control_plane_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[each.key].ipv4_address
+ host = local.control_plane_ips[each.key]
port = var.ssh_port
}
@@ -167,7 +178,7 @@ resource "null_resource" "authentication_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[each.key].ipv4_address
+ host = local.control_plane_ips[each.key]
port = var.ssh_port
}
@@ -197,7 +208,7 @@ resource "null_resource" "control_planes" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[each.key].ipv4_address
+ host = local.control_plane_ips[each.key]
port = var.ssh_port
}
diff --git a/init.tf b/init.tf
index d63c4b3d..aebe4584 100644
--- a/init.tf
+++ b/init.tf
@@ -19,13 +19,37 @@ resource "hcloud_load_balancer" "cluster" {
}
}
+resource "hcloud_load_balancer_network" "cluster" {
+ count = local.has_external_load_balancer ? 0 : 1
+
+ load_balancer_id = hcloud_load_balancer.cluster.*.id[0]
+ subnet_id = hcloud_network_subnet.agent.*.id[0]
+}
+
+resource "hcloud_load_balancer_target" "cluster" {
+ count = local.has_external_load_balancer ? 0 : 1
+
+ depends_on = [hcloud_load_balancer_network.cluster]
+ type = "label_selector"
+ load_balancer_id = hcloud_load_balancer.cluster.*.id[0]
+ label_selector = join(",", [for k, v in merge(local.labels, local.labels_control_plane_node, local.labels_agent_node) : "${k}=${v}"])
+ use_private_ip = true
+}
+
+locals {
+ first_control_plane_ip = coalesce(
+ module.control_planes[keys(module.control_planes)[0]].ipv4_address,
+ module.control_planes[keys(module.control_planes)[0]].ipv6_address,
+ module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
+ )
+}
resource "null_resource" "first_control_plane" {
connection {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
}
@@ -55,7 +79,7 @@ resource "null_resource" "first_control_plane" {
var.use_control_plane_lb ? {
tls-san = concat([hcloud_load_balancer.control_plane.*.ipv4[0], hcloud_load_balancer_network.control_plane.*.ip[0]], var.additional_tls_sans)
} : {
- tls-san = concat([module.control_planes[keys(module.control_planes)[0]].ipv4_address], var.additional_tls_sans)
+ tls-san = concat([local.first_control_plane_ip], var.additional_tls_sans)
},
local.etcd_s3_snapshots,
var.control_planes_custom_config,
@@ -149,7 +173,7 @@ resource "null_resource" "kustomization" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
}
diff --git a/kube.tf.example b/kube.tf.example
index 2d1c5774..306de64a 100644
--- a/kube.tf.example
+++ b/kube.tf.example
@@ -136,6 +136,10 @@ module "kube-hetzner" {
# Enable automatic backups via Hetzner (default: false)
# backups = true
+
+ # To disable public ips (default: false)
+ # disable_ipv4 = true
+ # disable_ipv6 = true
},
{
name = "control-plane-nbg1",
@@ -150,6 +154,10 @@ module "kube-hetzner" {
# Enable automatic backups via Hetzner (default: false)
# backups = true
+
+ # To disable public ips (default: false)
+ # disable_ipv4 = true
+ # disable_ipv6 = true
},
{
name = "control-plane-hel1",
@@ -164,6 +172,10 @@ module "kube-hetzner" {
# Enable automatic backups via Hetzner (default: false)
# backups = true
+
+ # To disable public ips (default: false)
+ # disable_ipv4 = true
+ # disable_ipv6 = true
}
]
diff --git a/kubeconfig.tf b/kubeconfig.tf
index 228f98a6..d84ad219 100644
--- a/kubeconfig.tf
+++ b/kubeconfig.tf
@@ -1,6 +1,6 @@
data "remote_file" "kubeconfig" {
conn {
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
user = "root"
private_key = var.ssh_private_key
@@ -12,9 +12,15 @@ data "remote_file" "kubeconfig" {
}
locals {
- kubeconfig_server_address = var.kubeconfig_server_address != "" ? var.kubeconfig_server_address : (var.use_control_plane_lb ? hcloud_load_balancer.control_plane.*.ipv4[0] : (
- can(module.control_planes[keys(module.control_planes)[0]].ipv4_address) ? module.control_planes[keys(module.control_planes)[0]].ipv4_address : "unknown"
- ))
+ kubeconfig_server_address = var.kubeconfig_server_address != "" ? var.kubeconfig_server_address : (var.use_control_plane_lb ?
+ (
+ var.control_plane_lb_enable_public_interface ?
+ hcloud_load_balancer.control_plane.*.ipv4[0]
+ : hcloud_load_balancer.control_plane.*.network_ip[0]
+ )
+ :
+ (can(local.first_control_plane_ip) ? local.first_control_plane_ip : "unknown")
+ )
kubeconfig_external = replace(replace(data.remote_file.kubeconfig.content, "127.0.0.1", local.kubeconfig_server_address), "default", var.cluster_name)
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
kubeconfig_data = {
diff --git a/kustomization_user.tf b/kustomization_user.tf
index 9fa13943..42d5ef2b 100644
--- a/kustomization_user.tf
+++ b/kustomization_user.tf
@@ -9,7 +9,7 @@ resource "null_resource" "kustomization_user" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
}
@@ -40,7 +40,7 @@ resource "null_resource" "kustomization_user_deploy" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
+ host = local.first_control_plane_ip
port = var.ssh_port
}
diff --git a/locals.tf b/locals.tf
index e808eae1..41101e5e 100644
--- a/locals.tf
+++ b/locals.tf
@@ -143,7 +143,10 @@ locals {
index : node_index
selinux : nodepool_obj.selinux
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
- placement_group : nodepool_obj.placement_group
+ placement_group : nodepool_obj.placement_group,
+ disable_ipv4 : nodepool_obj.disable_ipv4,
+ disable_ipv6 : nodepool_obj.disable_ipv6,
+ network_id : nodepool_obj.network_id,
}
}
]...)
@@ -168,7 +171,10 @@ locals {
index : node_index
selinux : nodepool_obj.selinux
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
- placement_group : nodepool_obj.placement_group
+ placement_group : nodepool_obj.placement_group,
+ disable_ipv4 : nodepool_obj.disable_ipv4,
+ disable_ipv6 : nodepool_obj.disable_ipv6,
+ network_id : nodepool_obj.network_id,
}
}
]...)
@@ -195,6 +201,9 @@ locals {
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
placement_group : nodepool_obj.placement_group,
index : floor(tonumber(node_key)),
+ disable_ipv4 : nodepool_obj.disable_ipv4,
+ disable_ipv6 : nodepool_obj.disable_ipv6,
+ network_id : nodepool_obj.network_id,
},
{ for key, value in node_obj : key => value if value != null },
{
@@ -840,8 +849,9 @@ cloudinit_write_files_common = < /etc/udev/rules.d/70-persistent-net.rules
@@ -851,18 +861,26 @@ cloudinit_write_files_common = </dev/null; then
+ eth0_connection=$(nmcli -g GENERAL.CONNECTION device show eth0)
+ nmcli connection modify "$eth0_connection" \
+ con-name eth0 \
+ connection.interface-name eth0
+ fi
myrepeat () {
# Current time + 300 seconds (5 minutes)
local END_SECONDS=$((SECONDS + 300))
while true; do
>&2 echo "loop"
- if (( "$SECONDS" > "$END_SECONDS" )); then
+ if (( SECONDS > END_SECONDS )); then
>&2 echo "timeout reached"
exit 1
fi
- # run command and check return code
- if $@ ; then
+ # Run command and check return code
+ if "$@"; then
>&2 echo "break"
break
else
@@ -873,11 +891,12 @@ cloudinit_write_files_common = < 0 ? [""] : []
+ content {
+ network_id = var.network_id
+ ip = var.private_ipv4
+ alias_ips = []
+ }
+ }
labels = var.labels
@@ -48,7 +70,7 @@ resource "hcloud_server" "server" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = self.ipv4_address
+ host = coalesce(self.ipv4_address, self.ipv6_address, try(one(self.network).ip, null))
port = var.ssh_port
}
@@ -63,13 +85,11 @@ resource "hcloud_server" "server" {
# Wait for MicroOS to reboot and be ready.
provisioner "local-exec" {
command = <<-EOT
- timeout 600 bash < /dev/null
- do
- echo "Waiting for MicroOS to become available..."
- sleep 3
- done
- EOF
+ until ssh ${local.ssh_args} -i /tmp/${random_string.identity_file.id} -o ConnectTimeout=2 -p ${var.ssh_port} root@${coalesce(self.ipv4_address, self.ipv6_address, try(one(self.network).ip, null))} true 2> /dev/null
+ do
+ echo "Waiting for MicroOS to become available..."
+ sleep 3
+ done
EOT
}
@@ -105,7 +125,7 @@ resource "null_resource" "registries" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = hcloud_server.server.ipv4_address
+ host = coalesce(hcloud_server.server.ipv4_address, hcloud_server.server.ipv6_address, try(one(hcloud_server.server.network).ip, null))
port = var.ssh_port
}
@@ -125,11 +145,12 @@ resource "hcloud_rdns" "server" {
count = var.base_domain != "" ? 1 : 0
server_id = hcloud_server.server.id
- ip_address = hcloud_server.server.ipv4_address
+ ip_address = coalesce(hcloud_server.server.ipv4_address, hcloud_server.server.ipv6_address, try(one(hcloud_server.server.network).ip, null))
dns_ptr = format("%s.%s", local.name, var.base_domain)
}
resource "hcloud_server_network" "server" {
+ count = var.network_id > 0 ? 0 : 1
ip = var.private_ipv4
server_id = hcloud_server.server.id
subnet_id = var.ipv4_subnet_id
@@ -165,7 +186,7 @@ resource "null_resource" "zram" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
- host = hcloud_server.server.ipv4_address
+ host = coalesce(hcloud_server.server.ipv4_address, hcloud_server.server.ipv6_address, try(one(hcloud_server.server.network).ip, null))
port = var.ssh_port
}
diff --git a/modules/host/out.tf b/modules/host/out.tf
index e65867a8..651fbb11 100644
--- a/modules/host/out.tf
+++ b/modules/host/out.tf
@@ -7,7 +7,7 @@ output "ipv6_address" {
}
output "private_ipv4_address" {
- value = hcloud_server_network.server.ip
+ value = try(one(hcloud_server.server.network).ip, hcloud_server_network.server[0].ip)
}
output "name" {
diff --git a/modules/host/variables.tf b/modules/host/variables.tf
index 45e10e1a..d867c3c0 100644
--- a/modules/host/variables.tf
+++ b/modules/host/variables.tf
@@ -144,3 +144,21 @@ variable "keep_disk_size" {
default = false
description = "Whether to keep OS disks of nodes the same size when upgrading a node"
}
+
+variable "disable_ipv4" {
+ type = bool
+ default = false
+ description = "Whether to disable ipv4 on the server. If you disable ipv4 and ipv6 make sure you have an access to your private network."
+}
+
+variable "disable_ipv6" {
+ type = bool
+ default = false
+ description = "Whether to disable ipv4 on the server. If you disable ipv4 and ipv6 make sure you have an access to your private network."
+}
+
+variable "network_id" {
+ type = number
+ default = null
+ description = "The network id to attach the server to."
+}
diff --git a/output.tf b/output.tf
index 9b56cba0..6e1b8b02 100644
--- a/output.tf
+++ b/output.tf
@@ -43,7 +43,7 @@ output "agents_public_ipv6" {
output "ingress_public_ipv4" {
description = "The public IPv4 address of the Hetzner load balancer (with fallback to first control plane node)"
- value = local.has_external_load_balancer ? module.control_planes[keys(module.control_planes)[0]].ipv4_address : hcloud_load_balancer.cluster[0].ipv4
+ value = local.has_external_load_balancer ? local.first_control_plane_ip : hcloud_load_balancer.cluster[0].ipv4
}
output "ingress_public_ipv6" {
@@ -86,13 +86,13 @@ output "agent_nodes" {
# Keeping for backward compatibility
output "kubeconfig_file" {
value = local.kubeconfig_external
- description = "Kubeconfig file content with external IP address"
+ description = "Kubeconfig file content with external IP address, or internal IP address if only private ips are available"
sensitive = true
}
output "kubeconfig" {
value = local.kubeconfig_external
- description = "Kubeconfig file content with external IP address"
+ description = "Kubeconfig file content with external IP address, or internal IP address if only private ips are available"
sensitive = true
}
diff --git a/variables.tf b/variables.tf
index b00e8396..2365cf38 100644
--- a/variables.tf
+++ b/variables.tf
@@ -184,6 +184,9 @@ variable "control_plane_nodepools" {
selinux = optional(bool, true)
placement_group_compat_idx = optional(number, 0)
placement_group = optional(string, null)
+ disable_ipv4 = optional(bool, false)
+ disable_ipv6 = optional(bool, false)
+ network_id = optional(number, 0)
}))
default = []
validation {
@@ -217,6 +220,9 @@ variable "agent_nodepools" {
placement_group_compat_idx = optional(number, 0)
placement_group = optional(string, null)
count = optional(number, null)
+ disable_ipv4 = optional(bool, false)
+ disable_ipv6 = optional(bool, false)
+ network_id = optional(number, 0)
nodes = optional(map(object({
server_type = optional(string)
location = optional(string)