Skip to content

Commit

Permalink
refactor: remove use of k8s info and nodes (#2551)
Browse files Browse the repository at this point in the history
## Description

Removes use of k8s info and nodes functions. 

## Related Issue

Relates to #2507

## Checklist before merging

- [ ] Test, docs, adr added or updated as needed
- [ ] [Contributor Guide
Steps](https://github.com/defenseunicorns/zarf/blob/main/.github/CONTRIBUTING.md#developer-workflow)
followed
  • Loading branch information
phillebaba authored May 28, 2024
1 parent ab83d37 commit c09cac5
Show file tree
Hide file tree
Showing 10 changed files with 50 additions and 114 deletions.
4 changes: 2 additions & 2 deletions src/internal/packager/helm/chart.go
Original file line number Diff line number Diff line change
Expand Up @@ -370,12 +370,12 @@ func (h *Helm) loadChartData() (*chart.Chart, chartutil.Values, error) {

func (h *Helm) migrateDeprecatedAPIs(latestRelease *release.Release) error {
// Get the Kubernetes version from the current cluster
kubeVersion, err := h.cluster.GetServerVersion()
kubeVersion, err := h.cluster.Clientset.Discovery().ServerVersion()
if err != nil {
return err
}

kubeGitVersion, err := semver.NewVersion(kubeVersion)
kubeGitVersion, err := semver.NewVersion(kubeVersion.String())
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion src/pkg/cluster/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func NewCluster() (*Cluster, error) {
}

// Dogsled the version output. We just want to ensure no errors were returned to validate cluster connection.
_, err = c.GetServerVersion()
_, err = c.Clientset.Discovery().ServerVersion()
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion src/pkg/cluster/injector.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ func (c *Cluster) getImagesAndNodesForInjection(ctx context.Context) (imageNodeM
for _, pod := range pods.Items {
nodeName := pod.Spec.NodeName

nodeDetails, err := c.GetNode(ctx, nodeName)
nodeDetails, err := c.Clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("unable to get the node %q: %w", nodeName, err)
}
Expand Down
5 changes: 4 additions & 1 deletion src/pkg/cluster/tunnel.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,10 @@ func (c *Cluster) checkForZarfConnectLabel(ctx context.Context, name string) (Tu
zt.remotePort = svc.Spec.Ports[0].TargetPort.IntValue()
// if targetPort == 0, look for Port (which is required)
if zt.remotePort == 0 {
zt.remotePort = c.FindPodContainerPort(ctx, svc)
zt.remotePort, err = c.FindPodContainerPort(ctx, svc)
if err != nil {
return TunnelInfo{}, err
}
}

// Add the url suffix too.
Expand Down
21 changes: 8 additions & 13 deletions src/pkg/k8s/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ import (
"fmt"
"time"

v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"

"github.com/go-logr/logr/funcr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"

// Include the cloud auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
Expand Down Expand Up @@ -48,12 +48,6 @@ func New(logger Log) (*K8s, error) {

// WaitForHealthyCluster checks for an available K8s cluster every second until timeout.
func (k *K8s) WaitForHealthyCluster(ctx context.Context) error {
var (
err error
nodes *v1.NodeList
pods *v1.PodList
)

const waitDuration = 1 * time.Second

timer := time.NewTimer(0)
Expand All @@ -77,23 +71,24 @@ func (k *K8s) WaitForHealthyCluster(ctx context.Context) error {
}

// Make sure there is at least one running Node
nodes, err = k.GetNodes(ctx)
if err != nil || len(nodes.Items) < 1 {
nodeList, err := k.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodeList.Items) < 1 {
k.Log("No nodes reporting healthy yet: %v\n", err)
timer.Reset(waitDuration)
continue
}

// Get the cluster pod list
if pods, err = k.GetAllPods(ctx); err != nil {
pods, err := k.GetAllPods(ctx)
if err != nil {
k.Log("Could not get the pod list: %w", err)
timer.Reset(waitDuration)
continue
}

// Check that at least one pod is in the 'succeeded' or 'running' state
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodRunning {
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodRunning {
return nil
}
}
Expand Down
57 changes: 0 additions & 57 deletions src/pkg/k8s/info.go

This file was deleted.

23 changes: 0 additions & 23 deletions src/pkg/k8s/nodes.go

This file was deleted.

13 changes: 8 additions & 5 deletions src/pkg/k8s/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,13 +178,16 @@ func (k *K8s) WaitForPodsAndContainers(ctx context.Context, target PodLookup, in
// FindPodContainerPort will find a pod's container port from a service and return it.
//
// Returns 0 if no port is found.
func (k *K8s) FindPodContainerPort(ctx context.Context, svc corev1.Service) int {
selectorLabelsOfPods := MakeLabels(svc.Spec.Selector)
func (k *K8s) FindPodContainerPort(ctx context.Context, svc corev1.Service) (int, error) {
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: svc.Spec.Selector})
if err != nil {
return 0, err
}
pods := k.WaitForPodsAndContainers(
ctx,
PodLookup{
Namespace: svc.Namespace,
Selector: selectorLabelsOfPods,
Selector: selector.String(),
},
nil,
)
Expand All @@ -194,11 +197,11 @@ func (k *K8s) FindPodContainerPort(ctx context.Context, svc corev1.Service) int
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == svc.Spec.Ports[0].TargetPort.String() {
return int(port.ContainerPort)
return int(port.ContainerPort), nil
}
}
}
}

return 0
return 0, nil
}
7 changes: 5 additions & 2 deletions src/pkg/k8s/tunnel.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,13 +254,16 @@ func (tunnel *Tunnel) getAttachablePodForService(ctx context.Context) (string, e
if err != nil {
return "", fmt.Errorf("unable to find the service: %w", err)
}
selectorLabelsOfPods := MakeLabels(service.Spec.Selector)
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: service.Spec.Selector})
if err != nil {
return "", err
}

servicePods := tunnel.kube.WaitForPodsAndContainers(
ctx,
PodLookup{
Namespace: tunnel.namespace,
Selector: selectorLabelsOfPods,
Selector: selector.String(),
},
nil,
)
Expand Down
30 changes: 21 additions & 9 deletions src/pkg/packager/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,23 @@ import (
"errors"
"fmt"
"os"
"strings"

"slices"
"strings"

"github.com/Masterminds/semver/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/defenseunicorns/zarf/src/config"
"github.com/defenseunicorns/zarf/src/config/lang"
"github.com/defenseunicorns/zarf/src/internal/packager/template"
"github.com/defenseunicorns/zarf/src/pkg/cluster"
"github.com/defenseunicorns/zarf/src/pkg/variables"
"github.com/defenseunicorns/zarf/src/types"

"github.com/defenseunicorns/zarf/src/config"
"github.com/defenseunicorns/zarf/src/pkg/layout"
"github.com/defenseunicorns/zarf/src/pkg/message"
"github.com/defenseunicorns/zarf/src/pkg/packager/deprecated"
"github.com/defenseunicorns/zarf/src/pkg/packager/sources"
"github.com/defenseunicorns/zarf/src/pkg/utils"
"github.com/defenseunicorns/zarf/src/pkg/variables"
"github.com/defenseunicorns/zarf/src/types"
)

// Packager is the main struct for managing packages.
Expand Down Expand Up @@ -227,14 +227,26 @@ func (p *Packager) validatePackageArchitecture(ctx context.Context) error {
return nil
}

clusterArchitectures, err := p.cluster.GetArchitectures(ctx)
// Get node architectures
nodeList, err := p.cluster.Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return lang.ErrUnableToCheckArch
}
if len(nodeList.Items) == 0 {
return lang.ErrUnableToCheckArch
}
archMap := map[string]bool{}
for _, node := range nodeList.Items {
archMap[node.Status.NodeInfo.Architecture] = true
}
architectures := []string{}
for arch := range archMap {
architectures = append(architectures, arch)
}

// Check if the package architecture and the cluster architecture are the same.
if !slices.Contains(clusterArchitectures, p.cfg.Pkg.Metadata.Architecture) {
return fmt.Errorf(lang.CmdPackageDeployValidateArchitectureErr, p.cfg.Pkg.Metadata.Architecture, strings.Join(clusterArchitectures, ", "))
if !slices.Contains(architectures, p.cfg.Pkg.Metadata.Architecture) {
return fmt.Errorf(lang.CmdPackageDeployValidateArchitectureErr, p.cfg.Pkg.Metadata.Architecture, strings.Join(architectures, ", "))
}

return nil
Expand Down

0 comments on commit c09cac5

Please sign in to comment.