Skip to content

Commit

Permalink
Merge pull request #292 from Dakror/kube-ovn-v1.12
Browse files Browse the repository at this point in the history
Upgrade kube-ovn to v1.12
  • Loading branch information
eaudetcobello authored Nov 27, 2024
2 parents a0af6fb + 9202307 commit 3385552
Show file tree
Hide file tree
Showing 9 changed files with 5,150 additions and 2,914 deletions.
1,160 changes: 0 additions & 1,160 deletions addons/kube-ovn/crd.yaml

This file was deleted.

194 changes: 147 additions & 47 deletions addons/kube-ovn/disable
Original file line number Diff line number Diff line change
@@ -1,26 +1,86 @@
#!/bin/bash

# Sourced from: https://github.com/kubeovn/kube-ovn/blob/master/dist/images/cleanup.sh
# Sourced from: https://github.com/kubeovn/kube-ovn/blob/v1.12.21/dist/images/cleanup.sh
# Changelog:
# - use microk8s.$KUBECTL instead of $KUBECTL

set -eu

KUBECTL="$SNAP/microk8s-kubectl.wrapper"

$KUBECTL delete --ignore-not-found ds kube-ovn-pinger -n kube-system
$KUBECTL delete --ignore-not-found -n kube-system ds kube-ovn-pinger
# ensure kube-ovn-pinger has been deleted
while :; do
if [ $($KUBECTL get pod --no-headers -n kube-system -l app=kube-ovn-pinger | wc -l) -eq 0 ]; then
if [ $($KUBECTL get pod -n kube-system -l app=kube-ovn-pinger -o name | wc -l) -eq 0 ]; then
break
fi
sleep 5
sleep 1
done

for gw in $($KUBECTL get vpc-nat-gw -o name); do
$KUBECTL delete --ignore-not-found $gw
done

for vd in $($KUBECTL get vpc-dns -o name); do
$KUBECTL delete --ignore-not-found $vd
done

for vip in $($KUBECTL get vip -o name); do
$KUBECTL delete --ignore-not-found $vip
done

for snat in $($KUBECTL get snat -o name); do
$KUBECTL delete --ignore-not-found $snat
done

for dnat in $($KUBECTL get dnat -o name); do
$KUBECTL delete --ignore-not-found $dnat
done

for fip in $($KUBECTL get fip -o name); do
$KUBECTL delete --ignore-not-found $fip
done

for eip in $($KUBECTL get eip -o name); do
$KUBECTL delete --ignore-not-found $eip
done

for odnat in $($KUBECTL get odnat -o name); do
$KUBECTL delete --ignore-not-found $odnat
done

for osnat in $($KUBECTL get osnat -o name); do
$KUBECTL delete --ignore-not-found $osnat
done

for ofip in $($KUBECTL get ofip -o name); do
$KUBECTL delete --ignore-not-found $ofip
done

for oeip in $($KUBECTL get oeip -o name); do
$KUBECTL delete --ignore-not-found $oeip
done

for slr in $($KUBECTL get switch-lb-rule -o name); do
$KUBECTL delete --ignore-not-found $slr
done

for ippool in $($KUBECTL get ippool -o name); do
$KUBECTL delete --ignore-not-found $ippool
done

set +e
for subnet in $($KUBECTL get subnet -o name); do
$KUBECTL patch "$subnet" --type='json' -p '[{"op": "replace", "path": "/metadata/finalizers", "value": []}]'
$KUBECTL delete --ignore-not-found "$subnet"
done
# subnet join will recreate, so delete subnet crd right now
$KUBECTL delete --ignore-not-found crd subnets.kubeovn.io
set -e

for vpc in $($KUBECTL get vpc -o name); do
$KUBECTL delete --ignore-not-found $vpc
done

for vlan in $($KUBECTL get vlan -o name); do
$KUBECTL delete --ignore-not-found $vlan
Expand All @@ -30,56 +90,85 @@ for pn in $($KUBECTL get provider-network -o name); do
$KUBECTL delete --ignore-not-found $pn
done

sleep 5

# Delete Kube-OVN components
$KUBECTL delete --ignore-not-found deploy kube-ovn-monitor -n kube-system
$KUBECTL delete --ignore-not-found cm ovn-config ovn-ic-config ovn-external-gw-config -n kube-system
$KUBECTL delete --ignore-not-found svc kube-ovn-pinger kube-ovn-controller kube-ovn-cni kube-ovn-monitor -n kube-system
$KUBECTL delete --ignore-not-found ds kube-ovn-cni -n kube-system
$KUBECTL delete --ignore-not-found deploy kube-ovn-controller -n kube-system
$KUBECTL delete --ignore-not-found -n kube-system deploy kube-ovn-monitor
$KUBECTL delete --ignore-not-found -n kube-system cm ovn-config ovn-ic-config ovn-external-gw-config
$KUBECTL delete --ignore-not-found -n kube-system svc kube-ovn-pinger kube-ovn-controller kube-ovn-cni kube-ovn-monitor
$KUBECTL delete --ignore-not-found -n kube-system deploy kube-ovn-controller
$KUBECTL delete --ignore-not-found -n kube-system deploy ovn-ic-controller
$KUBECTL delete --ignore-not-found -n kube-system deploy ovn-ic-server

# wait for provier-networks to be deleted before deleting kube-ovn-cni
sleep 5
$KUBECTL delete --ignore-not-found -n kube-system ds kube-ovn-cni

# ensure kube-ovn-cni has been deleted
while :; do
if [ $($KUBECTL get pod --no-headers -n kube-system -l app=kube-ovn-cni | wc -l) -eq 0 ]; then
if [ $($KUBECTL get pod -n kube-system -l app=kube-ovn-cni -o name | wc -l) -eq 0 ]; then
break
fi
sleep 5
sleep 1
done

for pod in $($KUBECTL get pod -n kube-system -l app=ovs -o 'jsonpath={.items[?(@.status.phase=="Running")].metadata.name}'); do
node=$($KUBECTL get pod -n kube-system $pod -o 'jsonpath={.spec.nodeName}')
nodeIPs=$($KUBECTL get node $node -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}' | sed 's/ /,/')
$KUBECTL exec -n kube-system "$pod" -- bash /kube-ovn/uninstall.sh "$nodeIPs"
$KUBECTL exec -n kube-system "$pod" -- bash /kube-ovn/uninstall.sh
done

$KUBECTL delete --ignore-not-found svc ovn-nb ovn-sb ovn-northd -n kube-system
$KUBECTL delete --ignore-not-found deploy ovn-central -n kube-system
$KUBECTL delete --ignore-not-found ds ovs-ovn -n kube-system
$KUBECTL delete --ignore-not-found ds ovs-ovn-dpdk -n kube-system
$KUBECTL delete --ignore-not-found secret kube-ovn-tls -n kube-system
$KUBECTL delete --ignore-not-found sa ovn -n kube-system
$KUBECTL delete --ignore-not-found clusterrole system:ovn
$KUBECTL delete --ignore-not-found clusterrolebinding ovn

# delete vpc-dns content
$KUBECTL delete --ignore-not-found cm vpc-dns-config -n kube-system
$KUBECTL delete --ignore-not-found clusterrole system:vpc-dns
$KUBECTL delete --ignore-not-found clusterrolebinding vpc-dns
$KUBECTL delete --ignore-not-found sa vpc-dns -n kube-system

# delete CRD
$KUBECTL delete --ignore-not-found crd htbqoses.kubeovn.io security-groups.kubeovn.io ips.kubeovn.io subnets.kubeovn.io \
vpc-nat-gateways.kubeovn.io vpcs.kubeovn.io vlans.kubeovn.io provider-networks.kubeovn.io \
iptables-dnat-rules.kubeovn.io iptables-eips.kubeovn.io iptables-fip-rules.kubeovn.io \
iptables-snat-rules.kubeovn.io vips.kubeovn.io
$KUBECTL delete --ignore-not-found crd \
htbqoses.kubeovn.io \
security-groups.kubeovn.io \
ippools.kubeovn.io \
vpc-nat-gateways.kubeovn.io \
vpcs.kubeovn.io \
vlans.kubeovn.io \
provider-networks.kubeovn.io \
iptables-dnat-rules.kubeovn.io \
iptables-snat-rules.kubeovn.io \
iptables-fip-rules.kubeovn.io \
iptables-eips.kubeovn.io \
vips.kubeovn.io \
switch-lb-rules.kubeovn.io \
vpc-dnses.kubeovn.io \
ovn-dnat-rules.kubeovn.io \
ovn-snat-rules.kubeovn.io \
ovn-fips.kubeovn.io \
ovn-eips.kubeovn.io \
qos-policies.kubeovn.io

# in case of ip not delete
set +e
for ip in $($KUBECTL get ip -o name); do
$KUBECTL patch "$ip" --type='json' -p '[{"op": "replace", "path": "/metadata/finalizers", "value": []}]'
$KUBECTL delete --ignore-not-found "$ip"
done
$KUBECTL delete --ignore-not-found crd ips.kubeovn.io
set -e

# Remove annotations/labels in namespaces and nodes
$KUBECTL annotate no --all ovn.kubernetes.io/cidr-
$KUBECTL annotate no --all ovn.kubernetes.io/gateway-
$KUBECTL annotate no --all ovn.kubernetes.io/ip_address-
$KUBECTL annotate no --all ovn.kubernetes.io/logical_switch-
$KUBECTL annotate no --all ovn.kubernetes.io/mac_address-
$KUBECTL annotate no --all ovn.kubernetes.io/port_name-
$KUBECTL annotate no --all ovn.kubernetes.io/allocated-
$KUBECTL annotate no --all ovn.kubernetes.io/chassis-
$KUBECTL annotate node --all ovn.kubernetes.io/cidr-
$KUBECTL annotate node --all ovn.kubernetes.io/gateway-
$KUBECTL annotate node --all ovn.kubernetes.io/ip_address-
$KUBECTL annotate node --all ovn.kubernetes.io/logical_switch-
$KUBECTL annotate node --all ovn.kubernetes.io/mac_address-
$KUBECTL annotate node --all ovn.kubernetes.io/port_name-
$KUBECTL annotate node --all ovn.kubernetes.io/allocated-
$KUBECTL annotate node --all ovn.kubernetes.io/chassis-
$KUBECTL label node --all kube-ovn/role-

$KUBECTL get no -o name | while read node; do
$KUBECTL get node -o name | while read node; do
$KUBECTL get "$node" -o 'go-template={{ range $k, $v := .metadata.labels }}{{ $k }}{{"\n"}}{{ end }}' | while read label; do
if echo "$label" | grep -qE '^(.+\.provider-network\.kubernetes\.io/(ready|mtu|interface|exclude))$'; then
$KUBECTL label "$node" "$label-"
Expand All @@ -97,24 +186,35 @@ $KUBECTL annotate ns --all ovn.kubernetes.io/allocated-

# ensure kube-ovn components have been deleted
while :; do
sleep 5
if [ $($KUBECTL get pod --no-headers -n kube-system -l component=network | wc -l) -eq 0 ]; then
sleep 10
if [ $($KUBECTL get pod -n kube-system -l component=network -o name | wc -l) -eq 0 ]; then
break
fi
for pod in $($KUBECTL -n kube-system get pod -l component=network -o name); do
echo "$pod logs:"
$KUBECTL -n kube-system logs $pod --timestamps --tail 50
done
done

# wait for all pods to be deleted before deleting serviceaccount/clusterrole/clusterrolebinding
$KUBECTL delete --ignore-not-found sa ovn ovn-ovs kube-ovn-cni kube-ovn-app -n kube-system
$KUBECTL delete --ignore-not-found clusterrole system:ovn system:ovn-ovs system:kube-ovn-cni system:kube-ovn-app
$KUBECTL delete --ignore-not-found clusterrolebinding ovn ovn ovn-ovs kube-ovn-cni kube-ovn-app

$KUBECTL delete --ignore-not-found -n kube-system lease kube-ovn-controller

# Remove annotations in all pods of all namespaces
for ns in $($KUBECTL get ns -o name |cut -c 11-); do
echo "annotating pods in ns:$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/cidr- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/gateway- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/ip_address- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/logical_switch- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/mac_address- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/port_name- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/allocated- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/routed- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/vlan_id- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/network_type- -n "$ns"
$KUBECTL annotate pod --all ovn.kubernetes.io/provider_network- -n "$ns"
for ns in $($KUBECTL get ns -o name | awk -F/ '{print $2}'); do
echo "annotating pods in namespace $ns"
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/cidr-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/gateway-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/ip_address-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/logical_switch-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/mac_address-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/port_name-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/allocated-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/routed-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/vlan_id-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/network_type-
$KUBECTL annotate pod --all -n $ns ovn.kubernetes.io/provider_network-
done
86 changes: 30 additions & 56 deletions addons/kube-ovn/enable
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import socket
import subprocess
import sys
import re
import time
from pathlib import Path
import shutil

Expand Down Expand Up @@ -54,16 +55,13 @@ Error: kube-ovn requires ha-cluster to be enabled. Please enable with:
sys.exit(1)

hosts = hostname.split(",")
node_ips = []

# 1. label nodes that will run ovn-db
for host in hosts:
stdout = subprocess.check_output(
[KUBECTL, "get", "node", host, "-o", JSONPATH_INTERNAL_IP]
)
node_ip = stdout.decode().strip()
if node_ip:
node_ips.append(node_ip)

click.echo("Label node {} ({})".format(host, node_ip))
subprocess.check_call(
Expand All @@ -76,43 +74,35 @@ Error: kube-ovn requires ha-cluster to be enabled. Please enable with:
click.echo("Remove Calico CNI")
subprocess.run([KUBECTL, "delete", "-f", cni_yaml])
shutil.move(cni_yaml, SNAP_DATA / "args" / "cni-network" / "cni.yaml.backup")

# 3. apply ovn CRDs
click.echo("Deploy kube-ovn CRDs")
subprocess.check_call([KUBECTL, "apply", "-f", DIR / "crd.yaml"])

has_avx_flag = cpu_has_avx_flag()

# 4. generate manifest and deploy ovn components
if not has_avx_flag:
# Update kube-ovn-template.yaml and write it to kube-ovn.yaml
kube_ovn_yaml = DIR / "kube-ovn.yaml"
kube_ovn_template = DIR / "kube-ovn-template.yaml"
with open(kube_ovn_template, "r") as tpl:
kube_ovn_tpl = tpl.read()

kube_ovn_tpl = kube_ovn_tpl.replace("__AVXTAG__", NO_AVX_CPU_TAG)
with open(kube_ovn_yaml, "w") as ko:
ko.write(kube_ovn_tpl)

click.echo("Deploy ovn components")
# Update ovn-template.yaml and write it to ovn.yaml
with open(DIR / "ovn-template.yaml") as fin:
ovn_template = fin.read()

ovn_yaml = SNAP_DATA / "args" / "cni-network" / "ovn.yaml"
ovn_template = ovn_template.replace("__REPLICAS__", str(len(node_ips)))
ovn_template = ovn_template.replace("__NODE_IPS__", ",".join(node_ips))
if not has_avx_flag:
ovn_template = ovn_template.replace("__AVXTAG__", NO_AVX_CPU_TAG)
with open(ovn_yaml, "w") as fout:
fout.write(ovn_template)

subprocess.check_call([KUBECTL, "apply", "-f", ovn_yaml])

# 5. install kube-ovn plugins
click.echo("Deploy kube-ovn CNI")
subprocess.check_call([KUBECTL, "apply", "-f", DIR / "kube-ovn.yaml"])
print("Cleaning up Calico resources...")
timeout, elapsed = 120, 0
while True:
result = subprocess.run(
[KUBECTL, "get", "all", "-A", "--no-headers"],
stdout=subprocess.PIPE,
text=True,
)
resources = result.stdout.splitlines()
calico_resources = [r for r in resources if "calico" in r]

if not calico_resources:
print("All Calico resource(s) have been cleaned up.")
break

print(
f"Still cleaning up {len(calico_resources)} Calico resources(s)..."
)
if elapsed >= timeout:
print(
f"Failed to cleanup {len(calico_resources)} remaining resource(s) in time, exiting."
)
return
time.sleep(5)
elapsed += 5

# Run kube-ovn install script
click.echo("Run kube-ovn install script")
subprocess.check_call(["/bin/bash", DIR / "install.sh"])

# 6. install microk8s hooks
for hook in ["reconcile"]:
Expand All @@ -123,21 +113,5 @@ Error: kube-ovn requires ha-cluster to be enabled. Please enable with:
click.echo("Failed to install {} hook: {}".format(hook, e), err=True)


def cpu_has_avx_flag() -> bool:
"""
Check if the CPU has the AVX2 or AXV512 flag.
Returns:
bool: True if the CPU has the AVX2 or AVX512 flag, False otherwise.
"""
with open("/proc/cpuinfo", "r") as f:
l = f.read()
# Take any string after the specified field name and colon.
if re.search(r"^.*(avx[0-9]{1,3}).*$", l, re.MULTILINE):
return True

return False


if __name__ == "__main__":
enable()
16 changes: 7 additions & 9 deletions addons/kube-ovn/hooks/reconcile
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,14 @@ KUBECTL="${SNAP}/microk8s-kubectl.wrapper"

# Kube-OVN reload deployment
if ! [ -e "${SNAP_DATA}/var/lock/no-cni-reload" ] &&
[ -e "${SNAP_DATA}/var/lock/no-flanneld" ] &&
[ -e "${SNAP_DATA}/var/lock/cni-needs-reload" ]
then
[ -e "${SNAP_DATA}/var/lock/no-flanneld" ] &&
[ -e "${SNAP_DATA}/var/lock/cni-needs-reload" ]; then
echo "Reloading kube-ovn"
if (is_apiserver_ready) &&
"${KUBECTL}" --request-timeout 2m describe -n kube-system daemonset.apps/kube-ovn-cni &&
"${KUBECTL}" --request-timeout 2m describe -n kube-system daemonset.apps/ovs-ovn &&
"${KUBECTL}" --request-timeout 2m rollout restart -n kube-system daemonset.apps/kube-ovn-cni &&
"${KUBECTL}" --request-timeout 2m rollout restart -n kube-system daemonset.apps/ovs-ovn
then
if (is_apiserver_ready) &&
"${KUBECTL}" --request-timeout 2m describe -n kube-system daemonset.apps/kube-ovn-cni &&
"${KUBECTL}" --request-timeout 2m describe -n kube-system daemonset.apps/ovs-ovn &&
"${KUBECTL}" --request-timeout 2m rollout restart -n kube-system daemonset.apps/kube-ovn-cni &&
"${KUBECTL}" --request-timeout 2m rollout restart -n kube-system daemonset.apps/ovs-ovn; then
rm "${SNAP_DATA}/var/lock/cni-needs-reload"
fi
fi
Loading

0 comments on commit 3385552

Please sign in to comment.