Skip to content

Commit

Permalink
Switch to per-image tunnels, CRC on tags, and progressbar for image p…
Browse files Browse the repository at this point in the history
…ushes (#1590)

## Description

This PR creates a tunnel per image push (making it easier to implement
concurrency - may do that in this PR if we can confirm that issues are
mitigated) moves the CRC from the image name to the tag and changes the
UI to use a progressbar instead of a spinner for better user feedback.

## Related Issue

Relates to #1568 , #1433, #1218, #1364

This also will make #1594 slightly easier.

(See aws/containers-roadmap#853)

Fixes: #1541

## Type of change

- [X] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Other (security config, docs update, etc)

## Checklist before merging

- [X] Test, docs, adr added or updated as needed
- [X] [Contributor Guide
Steps](https://github.com/defenseunicorns/zarf/blob/main/CONTRIBUTING.md#developer-workflow)
followed
  • Loading branch information
Racer159 authored Apr 18, 2023
1 parent 501e111 commit 5666ae2
Show file tree
Hide file tree
Showing 27 changed files with 368 additions and 137 deletions.
4 changes: 2 additions & 2 deletions packages/distros/k3s/common/zarf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ components:
only:
localOS: linux
description: >
*** REQUIRES ROOT ***
Install K3s, certified Kubernetes distribution built for IoT & Edge computing.
*** REQUIRES ROOT (not sudo) ***
Install K3s, a certified Kubernetes distribution built for IoT & Edge computing.
K3s provides the cluster need for Zarf running in Appliance Mode as well as can
host a low-resource Gitops Service if not using an existing Kubernetes platform.
actions:
Expand Down
6 changes: 5 additions & 1 deletion packages/zarf-registry/chart/templates/hpa.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,18 @@ spec:
scaleDown:
# Use 60 second stabilization window becuase zarf will freeze scale down during deploys
stabilizationWindowSeconds: 60
# Initially disable scale down - this gets set to Min later by Zarf (src/test/e2e/20_zarf_init_test.go)
selectPolicy: Disabled
# Scale down one pod per minute
policies:
- type: Pods
value: 1
periodSeconds: 60
periodSeconds: 60
scaleUp:
# Delay initial checks by 30 seconds
stabilizationWindowSeconds: 30
# Scale up as much as is needed
selectPolicy: Max
# Scale up one pod per minute
policies:
- type: Pods
Expand Down
4 changes: 4 additions & 0 deletions src/cmd/connect.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ var (
if len(args) > 0 {
target = args[0]
}
spinner := message.NewProgressSpinner("Preparing a tunnel to connect to %s", target)

tunnel, err := cluster.NewTunnel(connectNamespace, connectResourceType, connectResourceName, connectLocalPort, connectRemotePort)
if err != nil {
Expand All @@ -38,7 +39,10 @@ var (
if !cliOnly {
tunnel.EnableAutoOpen()
}

tunnel.AddSpinner(spinner)
tunnel.Connect(target, true)
spinner.Success()
},
}

Expand Down
2 changes: 1 addition & 1 deletion src/cmd/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var destroyCmd = &cobra.Command{
Short: lang.CmdDestroyShort,
Long: lang.CmdDestroyLong,
Run: func(cmd *cobra.Command, args []string) {
c, err := cluster.NewClusterWithWait(30 * time.Second)
c, err := cluster.NewClusterWithWait(30*time.Second, true)
if err != nil {
message.Fatalf(err, lang.ErrNoClusterConnection)
}
Expand Down
2 changes: 2 additions & 0 deletions src/cmd/package.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ var packageDeployCmd = &cobra.Command{
pkgClient := packager.NewOrDie(&pkgConfig)
defer pkgClient.ClearTempPaths()

pterm.Println()

// Deploy the package
if err := pkgClient.Deploy(); err != nil {
message.Fatalf(err, "Failed to deploy package: %s", err.Error())
Expand Down
1 change: 1 addition & 0 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ func GetCraneOptions(insecure bool, archs ...string) []crane.Option {
OS: "linux",
Architecture: GetArch(archs...),
}),
crane.WithUserAgent("zarf"),
)

return options
Expand Down
3 changes: 2 additions & 1 deletion src/internal/api/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@ func Summary(w http.ResponseWriter, _ *http.Request) {
var hasZarf bool
var k8sRevision string

c, err := cluster.NewClusterWithWait(5 * time.Second)
c, err := cluster.NewClusterWithWait(5*time.Second, false)
rawConfig, _ := clientcmd.NewDefaultClientConfigLoadingRules().GetStartingConfig()

reachable = err == nil
if reachable {
distro, _ = c.Kube.DetectDistro()
Expand Down
28 changes: 23 additions & 5 deletions src/internal/cluster/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var labels = k8s.Labels{

// NewClusterOrDie creates a new cluster instance and waits up to 30 seconds for the cluster to be ready or throws a fatal error.
func NewClusterOrDie() *Cluster {
c, err := NewClusterWithWait(defaultTimeout)
c, err := NewClusterWithWait(defaultTimeout, true)
if err != nil {
message.Fatalf(err, "Failed to connect to cluster")
}
Expand All @@ -37,19 +37,37 @@ func NewClusterOrDie() *Cluster {
}

// NewClusterWithWait creates a new cluster instance and waits for the given timeout for the cluster to be ready.
func NewClusterWithWait(timeout time.Duration) (*Cluster, error) {
func NewClusterWithWait(timeout time.Duration, withSpinner bool) (*Cluster, error) {
var spinner *message.Spinner
if withSpinner {
spinner = message.NewProgressSpinner("Waiting for cluster connection (%s timeout)", timeout.String())
defer spinner.Stop()
}

c := &Cluster{}
var err error

c.Kube, err = k8s.New(message.Debugf, labels)
if err != nil {
return c, err
}
return c, c.Kube.WaitForHealthyCluster(timeout)

err = c.Kube.WaitForHealthyCluster(timeout)
if err != nil {
return c, err
}

if spinner != nil {
spinner.Success()
}

return c, nil
}

// NewCluster creates a new cluster instance without waiting for the cluster to be ready.
func NewCluster() (*Cluster, error) {
var err error
c := &Cluster{}
c.Kube, _ = k8s.New(message.Debugf, labels)
return c, nil
c.Kube, err = k8s.New(message.Debugf, labels)
return c, err
}
9 changes: 5 additions & 4 deletions src/internal/cluster/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ import (

// Zarf Cluster Constants.
const (
ZarfNamespace = "zarf"
ZarfStateSecretName = "zarf-state"
ZarfStateDataKey = "state"
ZarfPackageInfoLabel = "package-deploy-info"
ZarfNamespace = "zarf"
ZarfStateSecretName = "zarf-state"
ZarfStateDataKey = "state"
ZarfPackageInfoLabel = "package-deploy-info"
ZarfInitPackageInfoName = "zarf-package-init"
)

// InitZarfState initializes the Zarf state with the given temporary directory and init configs.
Expand Down
14 changes: 5 additions & 9 deletions src/internal/cluster/tunnel.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,6 @@ func (tunnel *Tunnel) establish() (string, error) {
message.Debug("tunnel.Establish()")

var err error
var spinner *message.Spinner

// Track this locally as we may need to retry if the tunnel fails.
localPort := tunnel.localPort
Expand All @@ -390,6 +389,7 @@ func (tunnel *Tunnel) establish() (string, error) {
defer globalMutex.Unlock()
}

var spinner *message.Spinner
spinnerMessage := fmt.Sprintf("Opening tunnel %d -> %d for %s/%s in namespace %s",
localPort,
tunnel.remotePort,
Expand All @@ -402,8 +402,7 @@ func (tunnel *Tunnel) establish() (string, error) {
spinner = tunnel.spinner
spinner.Updatef(spinnerMessage)
} else {
spinner = message.NewProgressSpinner(spinnerMessage)
defer spinner.Stop()
message.Debug(spinnerMessage)
}

kube, err := k8s.NewWithWait(message.Debugf, labels, defaultTimeout)
Expand Down Expand Up @@ -455,19 +454,16 @@ func (tunnel *Tunnel) establish() (string, error) {
// Wait for an error or the tunnel to be ready.
select {
case err = <-errChan:
if tunnel.spinner == nil {
spinner.Stop()
}
return "", fmt.Errorf("unable to start the tunnel: %w", err)
case <-portforwarder.Ready:
// Store for endpoint output
tunnel.localPort = localPort
url := fmt.Sprintf("http://%s:%d%s", config.IPV4Localhost, localPort, tunnel.urlSuffix)
msg := fmt.Sprintf("Creating port forwarding tunnel at %s", url)
if tunnel.spinner == nil {
spinner.Successf(msg)
if tunnel.spinner != nil {
spinner.Updatef("%s", msg)
} else {
spinner.Updatef(msg)
message.Debug(msg)
}
return url, nil
}
Expand Down
15 changes: 15 additions & 0 deletions src/internal/cluster/zarf.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,21 @@ func (c *Cluster) GetDeployedZarfPackages() ([]types.DeployedPackage, error) {
return deployedPackages, nil
}

// GetDeployedPackage gets the metadata information about the package name provided (if it exists in the cluster).
// We determine what packages have been deployed to the cluster by looking for specific secrets in the Zarf namespace.
func (c *Cluster) GetDeployedPackage(packageName string) (types.DeployedPackage, error) {
var deployedPackage = types.DeployedPackage{}

// Get the secret that describes the deployed init package
secret, err := c.Kube.GetSecret(ZarfNamespace, config.ZarfPackagePrefix+packageName)
if err != nil {
return deployedPackage, err
}

err = json.Unmarshal(secret.Data["data"], &deployedPackage)
return deployedPackage, err
}

// StripZarfLabelsAndSecretsFromNamespaces removes metadata and secrets from existing namespaces no longer manged by Zarf.
func (c *Cluster) StripZarfLabelsAndSecretsFromNamespaces() {
spinner := message.NewProgressSpinner("Removing zarf metadata & secrets from existing namespaces not managed by Zarf")
Expand Down
7 changes: 2 additions & 5 deletions src/internal/packager/images/pull.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,8 @@ func (i *ImgConfig) PullAll() error {
spinner := message.NewProgressSpinner("Loading metadata for %d images. %s", imgCount, longer)
defer spinner.Stop()

if message.GetLogLevel() >= message.DebugLevel {
spinner.EnablePreserveWrites()
logs.Warn.SetOutput(spinner)
logs.Progress.SetOutput(spinner)
}
logs.Warn.SetOutput(&message.DebugWriter{})
logs.Progress.SetOutput(&message.DebugWriter{})

for idx, src := range i.ImgList {
spinner.Updatef("Fetching image metadata (%d of %d): %s", idx+1, imgCount, src)
Expand Down
Loading

0 comments on commit 5666ae2

Please sign in to comment.