Skip to content

Commit

Permalink
fix: host DNS access with firewall enabled
Browse files Browse the repository at this point in the history
Explicitly enable access to host DNS from pod/service IPs.

Also fix the Kubernetes health checks to assert number of ready pods to
match expectation, otherwise the check might skip a pod (e.g.
`kube-proxy` one) which is not ready, allowing the test to proceed too
early.

Update DNS test to print more logs on error.

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
  • Loading branch information
smira committed Aug 27, 2024
1 parent 4834a61 commit a9551b7
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,37 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller
},
)

if cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil {
if cfg.Config().Machine().Features().HostDNS().ForwardKubeDNSToHost() {
hostDNSIP := netip.MustParseAddr(constants.HostDNSAddress)

// allow traffic to host DNS
for _, protocol := range []nethelpers.Protocol{nethelpers.ProtocolUDP, nethelpers.ProtocolTCP} {
spec.Rules = append(spec.Rules,
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
append(slices.Clone(cfg.Config().Cluster().Network().PodCIDRs()), cfg.Config().Cluster().Network().ServiceCIDRs()...),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: []netip.Prefix{netip.PrefixFrom(hostDNSIP, hostDNSIP.BitLen())},
},
MatchLayer4: &network.NfTablesLayer4Match{
Protocol: protocol,
MatchDestinationPort: &network.NfTablesPortMatch{
Ranges: []network.PortRange{{Lo: 53, Hi: 53}},
},
},
AnonCounter: true,
Verdict: pointer.To(nethelpers.VerdictAccept),
},
)
}
}
}

if cfg.Config().Cluster() != nil {
spec.Rules = append(spec.Rules,
// allow Kubernetes pod/service traffic
Expand Down
6 changes: 6 additions & 0 deletions internal/integration/api/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,12 @@ func (suite *CommonSuite) TestDNSResolver() {
suite.Assert().Contains(stderr, "'index.html' saved")

if suite.T().Failed() {
suite.LogPodLogsByLabel(suite.ctx, "kube-system", "k8s-app", "kube-dns")

for _, node := range suite.DiscoverNodeInternalIPs(suite.ctx) {
suite.DumpLogs(suite.ctx, node, "dns-resolve-cache", "google")
}

suite.T().FailNow()
}

Expand Down
15 changes: 15 additions & 0 deletions internal/integration/base/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,21 @@ func (k8sSuite *K8sSuite) WaitForPodToBeRunning(ctx context.Context, timeout tim
}
}

// LogPodLogsByLabel logs the logs of the pod with the given namespace and label.
func (k8sSuite *K8sSuite) LogPodLogsByLabel(ctx context.Context, namespace, label, value string) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()

podList, err := k8sSuite.Clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", label, value),
})
k8sSuite.Require().NoError(err)

for _, pod := range podList.Items {
k8sSuite.LogPodLogs(ctx, namespace, pod.Name)
}
}

// LogPodLogs logs the logs of the pod with the given namespace and name.
func (k8sSuite *K8sSuite) LogPodLogs(ctx context.Context, namespace, podName string) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
Expand Down
12 changes: 6 additions & 6 deletions pkg/cluster/check/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func DefaultClusterChecks() []ClusterCheck {
// wait for kube-proxy to report ready
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error {
present, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
present, replicas, err := DaemonSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
if err != nil {
return err
}
Expand All @@ -38,14 +38,14 @@ func DefaultClusterChecks() []ClusterCheck {
return conditions.ErrSkipAssertion
}

return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
}, 3*time.Minute, 5*time.Second)
return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-proxy")
}, 5*time.Minute, 5*time.Second)
},

// wait for coredns to report ready
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("coredns to report ready", func(ctx context.Context) error {
present, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
present, replicas, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
if err != nil {
return err
}
Expand All @@ -54,8 +54,8 @@ func DefaultClusterChecks() []ClusterCheck {
return conditions.ErrSkipAssertion
}

return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns")
}, 3*time.Minute, 5*time.Second)
return K8sPodReadyAssertion(ctx, cluster, replicas, "kube-system", "k8s-app=kube-dns")
}, 5*time.Minute, 5*time.Second)
},

// wait for all the nodes to be schedulable
Expand Down
34 changes: 25 additions & 9 deletions pkg/cluster/check/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func K8sAllNodesSchedulableAssertion(ctx context.Context, cluster cluster.K8sPro
// K8sPodReadyAssertion checks whether all the pods matching label selector are Ready, and there is at least one.
//
//nolint:gocyclo
func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) error {
func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, replicas int, namespace, labelSelector string) error {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return err
Expand Down Expand Up @@ -345,44 +345,60 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, name
}

if len(notReadyPods) == 0 {
if len(readyPods) != replicas {
return fmt.Errorf("expected %d ready pods, got %d", replicas, len(readyPods))
}

return nil
}

return fmt.Errorf("some pods are not ready: %v", notReadyPods)
}

// DaemonSetPresent returns true if there is at least one DaemonSet matching given label selector.
func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) {
//
//nolint:dupl
func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return false, err
return false, 0, err
}

dss, err := clientset.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, err
return false, 0, err
}

if len(dss.Items) == 0 {
return false, 0, nil
}

return len(dss.Items) > 0, nil
return true, int(dss.Items[0].Status.DesiredNumberScheduled), nil
}

// ReplicaSetPresent returns true if there is at least one ReplicaSet matching given label selector.
func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, error) {
//
//nolint:dupl
func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return false, err
return false, 0, err
}

rss, err := clientset.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, err
return false, 0, err
}

if len(rss.Items) == 0 {
return false, 0, nil
}

return len(rss.Items) > 0, nil
return true, int(rss.Items[0].Status.Replicas), nil
}

// K8sControlPlaneStaticPods checks whether all the controlplane nodes are running required Kubernetes static pods.
Expand Down

0 comments on commit a9551b7

Please sign in to comment.