Skip to content

Commit

Permalink
Merge pull request #182 from thebsdbox/arp_fixes
Browse files Browse the repository at this point in the history
Fixes to arp/controller and manifests
  • Loading branch information
thebsdbox authored Mar 8, 2021
2 parents 4c6fa47 + 6d1c60d commit 0a52356
Show file tree
Hide file tree
Showing 12 changed files with 152 additions and 56 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ TARGET := kube-vip
.DEFAULT_GOAL: $(TARGET)

# These will be provided to the target
VERSION := 0.3.2
VERSION := 0.3.3
BUILD := `git rev-parse HEAD`

# Operating System Default (LINUX)
Expand Down
5 changes: 3 additions & 2 deletions docs/manifests/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ rules:
verbs: ["*"]
- apiGroups: [""]
resources: ["nodes", "services"]
verbs: ["list","get","watch"]
verbs: ["list","get","watch","update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
Expand Down Expand Up @@ -54,7 +54,8 @@ spec:
containers:
- command:
- /plndr-cloud-provider
image: plndr/plndr-cloud-provider:0.1.4
- --leader-elect-resource-name=plndr-cloud-controller
image: plndr/plndr-cloud-provider:0.1.5
name: plndr-cloud-provider
imagePullPolicy: Always
resources: {}
Expand Down
3 changes: 3 additions & 0 deletions docs/manifests/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes"]
verbs: ["list","get","watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
5 changes: 3 additions & 2 deletions pkg/kubevip/config_generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -821,8 +821,9 @@ func GenerateDeamonsetManifestFromConfig(c *Config, imageVersion string, inClust
if taint {
newManifest.Spec.Template.Spec.Tolerations = []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Effect: corev1.TaintEffectNoSchedule,
Key: "node-role.kubernetes.io/master",
Effect: corev1.TaintEffectNoSchedule,
Operator: corev1.TolerationOpExists,
},
}
newManifest.Spec.Template.Spec.NodeSelector = map[string]string{
Expand Down
41 changes: 0 additions & 41 deletions testing/create.sh

This file was deleted.

43 changes: 43 additions & 0 deletions testing/k3s/create.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash

if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then
echo "Usage:"
echo " Param 1: Kube-Vip Version"
echo " Param 2: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]"
echo " Param 3: Vip address"
echo " Param 4: k3s url (https://github.com/k3s-io/k3s/releases/download/v1.20.4%2Bk3s1/k3s)"
echo "" ./create.sh 0.3.3 hybrid 192.168.0.40
exit 1
fi

case "$2" in

"controlplane") echo "Creating control plane only cluster"
mode="--controlplane"
;;
"services") echo "Creating services only cluster"
mode="--services"
;;
"hybrid") echo "Creating hybrid cluster"
mode="--controlplane --services"
;;
*) echo "Unknown kube-vip mode [$2]"
exit -1
;;
esac

source ./testing/nodes

echo "Creating First node!"

ssh $NODE01 "sudo mkdir -p /var/lib/rancher/k3s/server/manifests/"
ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$1 manifest daemonset $mode --interface ens160 --vip $3 --arp --leaderElection --inCluster --taint | sudo tee /var/lib/rancher/k3s/server/manifests/vip.yaml"
ssh $NODE01 "sudo curl https://kube-vip.io/manifests/rbac.yaml | sudo tee /var/lib/rancher/k3s/server/manifests/rbac.yaml"
ssh $NODE01 "sudo screen -dmSL k3s k3s server --cluster-init --tls-san $3 --no-deploy servicelb --disable-cloud-controller --token=test"
echo "Started first node, sleeping for 60 seconds"
sleep 60
echo "Adding additional nodes"
ssh $NODE02 "sudo screen -dmSL k3s k3s server --server https://$3:6443 --token=test"
ssh $NODE03 "sudo screen -dmSL k3s k3s server --server https://$3:6443 --token=test"
sleep 20
ssh $NODE01 "sudo k3s kubectl get node -o wide"
13 changes: 13 additions & 0 deletions testing/k3s/teardown.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash

source ./testing/nodes

echo "Wiping Nodes in reverse order, and rebooting"
ssh $NODE05 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot"
ssh $NODE04 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot"
ssh $NODE03 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot"
ssh $NODE02 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot"
ssh $NODE01 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot"
echo
echo "All Control Plane Nodes have been reset"
echo "Consider removing kube-vip images if changing version"
61 changes: 61 additions & 0 deletions testing/kubeadm/create.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/bin/bash

if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then
echo "Usage:"
echo " Param 1: Kubernetes Version"
echo " Param 2: Kube-Vip Version"
echo " Param 3: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]"
echo " Param 4: Vip address"
echo ""
echo "" ./create_k8s.sh 1.18.5 0.3.3 192.168.0.40
exit 1
fi

case "$3" in

"controlplane") echo "Sending SIGHUP signal"
mode="--controlplane"
;;
"services") echo "Sending SIGINT signal"
mode="--services"
;;
"hybrid") echo "Sending SIGQUIT signal"
mode="--controlplane --services"
;;
*) echo "Unknown kube-vip mode [$3]"
exit -1
;;
esac

source ./testing/nodes

echo "Creating First node!"

ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod $mode --interface ens160 --vip $4 --arp --leaderElection | sudo tee /etc/kubernetes/manifests/vip.yaml"
CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $1 --control-plane-endpoint $4 --upload-certs --pod-network-cidr=10.0.0.0/16 | grep certificate-key")
ssh $NODE01 "sudo rm -rf ~/.kube/"
ssh $NODE01 "mkdir -p .kube"
ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config"
ssh $NODE01 "sudo chown dan:dan .kube/config"
echo "Enabling strict ARP on kube-proxy"
ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system"
ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP"
ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml"
JOIN_CMD=$(ssh $NODE01 " sudo kubeadm token create --print-join-command 2> /dev/null")

ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD"
sleep 3
ssh $NODE02 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod --interface ens160 --vip $4 --arp --leaderElection $mode | sudo tee /etc/kubernetes/manifests/vip.yaml"

ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD"
sleep 3
ssh $NODE03 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod --interface ens160 --vip $4 --arp --leaderElection $mode | sudo tee /etc/kubernetes/manifests/vip.yaml"
ssh $NODE04 "sudo $JOIN_CMD"
ssh $NODE05 "sudo $JOIN_CMD"
echo
echo " Nodes should be deployed at this point, waiting 5 secs and querying the deployment"
sleep 5
ssh $NODE01 "kubectl get nodes"
ssh $NODE01 "kubectl get pods -A"
echo
echo "Kubernetes: $1, Kube-vip $2, Advertising VIP: $4"
13 changes: 13 additions & 0 deletions testing/kubeadm/teardown.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash

source ./testing/nodes

echo "Wiping Nodes in reverse order, and rebooting"
ssh $NODE05 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot"
ssh $NODE04 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot"
ssh $NODE03 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot"
ssh $NODE02 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot"
ssh $NODE01 "sudo kubeadm reset -f --skip-phases preflight update-cluster-status remove-etcd-member; sudo rm -rf /etc/cni/net.d; sudo reboot"
echo
echo "All Control Plane Nodes have been reset"
echo "Consider removing kube-vip images if changing version"
6 changes: 6 additions & 0 deletions testing/nodes
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Home lab (for testing)
NODE01=k8s01.fnnrn.me
NODE02=k8s02.fnnrn.me
NODE03=k8s03.fnnrn.me
NODE04=k8s04.fnnrn.me
NODE05=k8s05.fnnrn.me
10 changes: 0 additions & 10 deletions testing/teardown.sh

This file was deleted.

6 changes: 6 additions & 0 deletions testing/testing.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ docker run --network host --rm plndr/kube-vip:action manifest pod --interface et
echo "==> ARP w/controlplane (using --address)"
docker run --network host --rm plndr/kube-vip:action manifest pod --interface enx001e063262b1 --address k8s-api-vip.lan --arp --leaderElection --controlplane

echo "==> ARP w/controlplane (using --address)"
docker run --network host --rm plndr/kube-vip:action manifest daemonset --interface eth0 --vip 192.168.0.1 --controlplane \
--services \
--inCluster \
--taint

trap : 0

echo >&2 '
Expand Down

0 comments on commit 0a52356

Please sign in to comment.