Skip to content

Commit

Permalink
Add test for LoadBalancer IP advertisement
Browse files Browse the repository at this point in the history
  • Loading branch information
caseydavenport committed Jan 8, 2021
1 parent 822a130 commit 624f0f9
Show file tree
Hide file tree
Showing 4 changed files with 343 additions and 0 deletions.
5 changes: 5 additions & 0 deletions tests/k8st/create_kind_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,11 @@ time ${kubectl} wait pod -l k8s-app=kube-dns --for=condition=Ready -n kube-syste
echo "Calico is running."
echo

echo "Install MetalLB controller for allocating LoadBalancer IPs"
${kubectl} create ns metallb-system
${kubectl} apply -f $TEST_DIR/infra/metallb.yaml
${kubectl} apply -f $TEST_DIR/infra/metallb-config.yaml

# Create and monitor a test webserver service for dual stack.
echo "Create test-webserver deployment..."
${kubectl} apply -f tests/k8st/infra/test-webserver.yaml
Expand Down
12 changes: 12 additions & 0 deletions tests/k8st/infra/metallb-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: bgp
addresses:
- 80.15.0.0/24
200 changes: 200 additions & 0 deletions tests/k8st/infra/metallb.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
spec:
allowPrivilegeEscalation: false
allowedCapabilities: []
allowedHostPaths: []
defaultAddCapabilities: []
defaultAllowPrivilegeEscalation: false
fsGroup:
ranges:
- max: 65535
min: 1
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65535
min: 1
rule: MustRunAs
volumes:
- configMap
- secret
- emptyDir
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- update
- apiGroups:
- ''
resources:
- services/status
verbs:
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
rules:
- apiGroups:
- ''
resources:
- pods
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: metallb
name: config-watcher
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: config-watcher
subjects:
- kind: ServiceAccount
name: controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metallb
component: controller
name: controller
namespace: metallb-system
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb
component: controller
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
app: metallb
component: controller
spec:
containers:
- args:
- --port=7472
- --config=config
image: metallb/controller:v0.9.5
imagePullPolicy: Always
name: controller
ports:
- containerPort: 7472
name: monitoring
resources:
limits:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: controller
terminationGracePeriodSeconds: 0
126 changes: 126 additions & 0 deletions tests/k8st/tests/test_bgp_advert.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import subprocess
import json
import sys
import time

from tests.k8st.test_base import TestBase
from tests.k8st.utils.utils import start_external_node_with_bgp, \
Expand Down Expand Up @@ -175,6 +176,15 @@ def get_svc_cluster_ip(self, svc, ns):
return kubectl("get svc %s -n %s -o json | jq -r .spec.clusterIP" %
(svc, ns)).strip()

def get_svc_loadbalancer_ip(self, svc, ns):
for i in range(10):
lb_ip = kubectl("get svc %s -n %s -o json | jq -r .status.loadBalancer.ingress[0].ip" %
(svc, ns)).strip()
if lb_ip != "null":
return lb_ip
time.sleep(1)
raise Exception("No LoadBalancer IP found for service: %s/%s" % (ns, svc))

def assert_ecmp_routes(self, dst, via):
matchStr = dst + " proto bird "
# sort ips and construct match string for ECMP routes.
Expand Down Expand Up @@ -410,6 +420,122 @@ def test_external_ip_advertisement(self):
# Assert that external IP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_externalips_route, self.get_routes()))

def test_loadbalancer_ip_advertisement(self):
"""
Runs the tests for service LoadBalancer IP advertisement
"""
with DiagsCollector():

# Whitelist IP ranges for the LB IPs we'll test with
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceLoadBalancerIPs:
- cidr: 80.15.0.0/24
EOF
""")

# Create a dummy service first to occupy the first LB IP. This is
# a hack to make sure the chosen IP we use in the tests below
# isn't the same as the zero address in the range.
self.create_service("dummy-service", "dummy-service", self.ns, 80, svc_type="LoadBalancer")

# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("nginx:1.7.9", cluster_svc, self.ns, 80, traffic_policy="Cluster", svc_type="LoadBalancer")
self.deploy("nginx:1.7.9", local_svc, self.ns, 80, svc_type="LoadBalancer")
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)

# Get the allocated LB IPs.
local_lb_ip = self.get_svc_loadbalancer_ip(local_svc, self.ns)
cluster_lb_ip = self.get_svc_loadbalancer_ip(cluster_svc, self.ns)

# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)

# Get host IPs for the nginx pods.
local_svc_host_ip = self.get_svc_host_ip(local_svc, self.ns)
cluster_svc_host_ip = self.get_svc_host_ip(cluster_svc, self.ns)

# Verify that LB IP for local service is advertised but not the cluster service.
local_svc_lb_route = "%s via %s" % (local_lb_ip, local_svc_host_ip)
cluster_svc_lb_route = "%s via %s" % (cluster_lb_ip, cluster_svc_host_ip)
retry_until_success(lambda: self.assertIn(local_svc_lb_route, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_lb_route, self.get_routes()))

# The full range should be advertised from each node.
lb_cidr = "80.15.0.0/24"
retry_until_success(lambda: self.assert_ecmp_routes(lb_cidr, [self.ips[0], self.ips[1], self.ips[2], self.ips[3]]))

# Scale the local_svc to 4 replicas.
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)

# Verify that we have ECMP routes for the LB IP of the local service from nodes running it.
retry_until_success(lambda: self.assert_ecmp_routes(local_lb_ip, [self.ips[1], self.ips[2], self.ips[3]]))

# Apply a modified BGP config that no longer enables advertisement
# for LoadBalancer IPs.
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec: {}
EOF
""")
# Assert routes are withdrawn.
retry_until_success(lambda: self.assertNotIn(local_lb_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(lb_cidr, self.get_routes()))

# Apply a modified BGP config that has a mismatched CIDR specified.
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceLoadBalancerIPs:
- cidr: 90.15.0.0/24
EOF
""")
# Assert routes are still withdrawn.
retry_until_success(lambda: self.assertNotIn(local_lb_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(lb_cidr, self.get_routes()))

# Reapply the correct configuration, we should see routes come back.
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceLoadBalancerIPs:
- cidr: 80.15.0.0/24
EOF
""")
# Verify that we have ECMP routes for the LB IP of the local service from nodes running it.
retry_until_success(lambda: self.assert_ecmp_routes(local_lb_ip, [self.ips[1], self.ips[2], self.ips[3]]))
retry_until_success(lambda: self.assertIn(lb_cidr, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_lb_route, self.get_routes()))

# Services should be reachable from the external node.
retry_until_success(curl, function_args=[local_lb_ip])
retry_until_success(curl, function_args=[cluster_lb_ip])

# Delete both services, assert only CIDR route is advertised.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)

# Assert that LB IP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_lb_ip, self.get_routes()))

def test_many_services(self):
"""
Creates a lot of services quickly
Expand Down

0 comments on commit 624f0f9

Please sign in to comment.