Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 0 additions & 17 deletions pkg/compose/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,6 @@ import (
moby "github.com/docker/docker/api/types"
)

const (
// ContainerCreated created status
ContainerCreated = "created"
// ContainerRestarting restarting status
ContainerRestarting = "restarting"
// ContainerRunning running status
ContainerRunning = "running"
// ContainerRemoving removing status
ContainerRemoving = "removing"
// ContainerPaused paused status
ContainerPaused = "paused"
// ContainerExited exited status
ContainerExited = "exited"
// ContainerDead dead status
ContainerDead = "dead"
)

var _ io.ReadCloser = ContainerStdout{}

// ContainerStdout implement ReadCloser for moby.HijackedResponse
Expand Down
2 changes: 1 addition & 1 deletion pkg/compose/containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func isOrphaned(project *types.Project) containerPredicate {
// One-off container
v, ok := c.Labels[api.OneoffLabel]
if ok && v == "True" {
return c.State == ContainerExited || c.State == ContainerDead
return c.State == container.StateExited || c.State == container.StateDead
}
// Service that is not defined in the compose model
service := c.Labels[api.ServiceLabel]
Expand Down
96 changes: 48 additions & 48 deletions pkg/compose/convergence.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (

"github.com/compose-spec/compose-go/v2/types"
"github.com/containerd/platforms"
containerType "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/container"
mmount "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/versions"
specs "github.com/opencontainers/image-spec/specs-go/v1"
Expand Down Expand Up @@ -152,19 +152,19 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
})

slices.Reverse(containers)
for i, container := range containers {
for i, ctr := range containers {
if i >= expected {
// Scale Down
// As we sorted containers, obsolete ones and/or highest number will be removed
container := container
traceOpts := append(tracing.ServiceOptions(service), tracing.ContainerOptions(container)...)
ctr := ctr
traceOpts := append(tracing.ServiceOptions(service), tracing.ContainerOptions(ctr)...)
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "service/scale/down", traceOpts, func(ctx context.Context) error {
return c.service.stopAndRemoveContainer(ctx, container, &service, timeout, false)
return c.service.stopAndRemoveContainer(ctx, ctr, &service, timeout, false)
}))
continue
}

mustRecreate, err := c.mustRecreate(service, container, recreate)
mustRecreate, err := c.mustRecreate(service, ctr, recreate)
if err != nil {
return err
}
Expand All @@ -174,9 +174,9 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
return err
}

i, container := i, container
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(container), func(ctx context.Context) error {
recreated, err := c.service.recreateContainer(ctx, project, service, container, inherit, timeout)
i, ctr := i, ctr
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(ctr), func(ctx context.Context) error {
recreated, err := c.service.recreateContainer(ctx, project, service, ctr, inherit, timeout)
updated[i] = recreated
return err
}))
Expand All @@ -185,20 +185,20 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,

// Enforce non-diverged containers are running
w := progress.ContextWriter(ctx)
name := getContainerProgressName(container)
switch container.State {
case ContainerRunning:
name := getContainerProgressName(ctr)
switch ctr.State {
case container.StateRunning:
w.Event(progress.RunningEvent(name))
case ContainerCreated:
case ContainerRestarting:
case ContainerExited:
case container.StateCreated:
case container.StateRestarting:
case container.StateExited:
default:
container := container
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/start", tracing.ContainerOptions(container), func(ctx context.Context) error {
return c.service.startContainer(ctx, container)
ctr := ctr
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/start", tracing.ContainerOptions(ctr), func(ctx context.Context) error {
return c.service.startContainer(ctx, ctr)
}))
}
updated[i] = container
updated[i] = ctr
}

next := nextContainerNumber(containers)
Expand All @@ -214,8 +214,8 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
UseNetworkAliases: true,
Labels: mergeLabels(service.Labels, service.CustomLabels),
}
container, err := c.service.createContainer(ctx, project, service, name, number, opts)
updated[actual+i] = container
ctr, err := c.service.createContainer(ctx, project, service, name, number, opts)
updated[actual+i] = ctr
return err
}))
continue
Expand Down Expand Up @@ -245,7 +245,7 @@ func (c *convergence) stopDependentContainers(ctx context.Context, project *type
for _, name := range dependents {
dependentStates := c.getObservedState(name)
for i, dependent := range dependentStates {
dependent.State = ContainerExited
dependent.State = container.StateExited
dependentStates[i] = dependent
}
c.setObservedState(name, dependentStates)
Expand Down Expand Up @@ -328,7 +328,7 @@ func (c *convergence) resolveSharedNamespaces(service *types.ServiceConfig) erro
return nil
}

func (c *convergence) mustRecreate(expected types.ServiceConfig, actual containerType.Summary, policy string) (bool, error) {
func (c *convergence) mustRecreate(expected types.ServiceConfig, actual container.Summary, policy string) (bool, error) {
if policy == api.RecreateNever {
return false, nil
}
Expand Down Expand Up @@ -360,7 +360,7 @@ func (c *convergence) mustRecreate(expected types.ServiceConfig, actual containe
return false, nil
}

func checkExpectedNetworks(expected types.ServiceConfig, actual containerType.Summary, networks map[string]string) bool {
func checkExpectedNetworks(expected types.ServiceConfig, actual container.Summary, networks map[string]string) bool {
// check the networks container is connected to are the expected ones
for net := range expected.Networks {
id := networks[net]
Expand All @@ -383,7 +383,7 @@ func checkExpectedNetworks(expected types.ServiceConfig, actual containerType.Su
return false
}

func checkExpectedVolumes(expected types.ServiceConfig, actual containerType.Summary, volumes map[string]string) bool {
func checkExpectedVolumes(expected types.ServiceConfig, actual container.Summary, volumes map[string]string) bool {
// check container's volume mounts and search for the expected ones
for _, vol := range expected.Volumes {
if vol.Type != string(mmount.TypeVolume) {
Expand Down Expand Up @@ -423,7 +423,7 @@ func getDefaultContainerName(projectName, serviceName, index string) string {
return strings.Join([]string{projectName, serviceName, index}, api.Separator)
}

func getContainerProgressName(ctr containerType.Summary) string {
func getContainerProgressName(ctr container.Summary) string {
return "Container " + getCanonicalContainerName(ctr)
}

Expand Down Expand Up @@ -571,7 +571,7 @@ func shouldWaitForDependency(serviceName string, dependencyConfig types.ServiceD
return true, nil
}

func nextContainerNumber(containers []containerType.Summary) int {
func nextContainerNumber(containers []container.Summary) int {
maxNumber := 0
for _, c := range containers {
s, ok := c.Labels[api.ContainerNumberLabel]
Expand All @@ -592,7 +592,7 @@ func nextContainerNumber(containers []containerType.Summary) int {

func (s *composeService) createContainer(ctx context.Context, project *types.Project, service types.ServiceConfig,
name string, number int, opts createOptions,
) (ctr containerType.Summary, err error) {
) (ctr container.Summary, err error) {
w := progress.ContextWriter(ctx)
eventName := "Container " + name
w.Event(progress.CreatingEvent(eventName))
Expand All @@ -612,8 +612,8 @@ func (s *composeService) createContainer(ctx context.Context, project *types.Pro
}

func (s *composeService) recreateContainer(ctx context.Context, project *types.Project, service types.ServiceConfig,
replaced containerType.Summary, inherit bool, timeout *time.Duration,
) (created containerType.Summary, err error) {
replaced container.Summary, inherit bool, timeout *time.Duration,
) (created container.Summary, err error) {
w := progress.ContextWriter(ctx)
eventName := getContainerProgressName(replaced)
w.Event(progress.NewEvent(eventName, progress.Working, "Recreate"))
Expand All @@ -632,7 +632,7 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
return created, err
}

var inherited *containerType.Summary
var inherited *container.Summary
if inherit {
inherited = &replaced
}
Expand All @@ -655,12 +655,12 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
}

timeoutInSecond := utils.DurationSecondToInt(timeout)
err = s.apiClient().ContainerStop(ctx, replaced.ID, containerType.StopOptions{Timeout: timeoutInSecond})
err = s.apiClient().ContainerStop(ctx, replaced.ID, container.StopOptions{Timeout: timeoutInSecond})
if err != nil {
return created, err
}

err = s.apiClient().ContainerRemove(ctx, replaced.ID, containerType.RemoveOptions{})
err = s.apiClient().ContainerRemove(ctx, replaced.ID, container.RemoveOptions{})
if err != nil {
return created, err
}
Expand All @@ -677,12 +677,12 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
// force sequential calls to ContainerStart to prevent race condition in engine assigning ports from ranges
var startMx sync.Mutex

func (s *composeService) startContainer(ctx context.Context, ctr containerType.Summary) error {
func (s *composeService) startContainer(ctx context.Context, ctr container.Summary) error {
w := progress.ContextWriter(ctx)
w.Event(progress.NewEvent(getContainerProgressName(ctr), progress.Working, "Restart"))
startMx.Lock()
defer startMx.Unlock()
err := s.apiClient().ContainerStart(ctx, ctr.ID, containerType.StartOptions{})
err := s.apiClient().ContainerStart(ctx, ctr.ID, container.StartOptions{})
if err != nil {
return err
}
Expand All @@ -695,11 +695,11 @@ func (s *composeService) createMobyContainer(ctx context.Context,
service types.ServiceConfig,
name string,
number int,
inherit *containerType.Summary,
inherit *container.Summary,
opts createOptions,
w progress.Writer,
) (containerType.Summary, error) {
var created containerType.Summary
) (container.Summary, error) {
var created container.Summary
cfgs, err := s.getCreateConfigs(ctx, project, service, number, inherit, opts)
if err != nil {
return created, err
Expand Down Expand Up @@ -733,11 +733,11 @@ func (s *composeService) createMobyContainer(ctx context.Context,
if err != nil {
return created, err
}
created = containerType.Summary{
created = container.Summary{
ID: inspectedContainer.ID,
Labels: inspectedContainer.Config.Labels,
Names: []string{inspectedContainer.Name},
NetworkSettings: &containerType.NetworkSettingsSummary{
NetworkSettings: &container.NetworkSettingsSummary{
Networks: inspectedContainer.NetworkSettings.Networks,
},
}
Expand Down Expand Up @@ -834,24 +834,24 @@ func (s *composeService) isServiceHealthy(ctx context.Context, containers Contai
}
name := ctr.Name[1:]

if ctr.State.Status == containerType.StateExited {
if ctr.State.Status == container.StateExited {
return false, fmt.Errorf("container %s exited (%d)", name, ctr.State.ExitCode)
}

if ctr.Config.Healthcheck == nil && fallbackRunning {
// Container does not define a health check, but we can fall back to "running" state
return ctr.State != nil && ctr.State.Status == containerType.StateRunning, nil
return ctr.State != nil && ctr.State.Status == container.StateRunning, nil
}

if ctr.State == nil || ctr.State.Health == nil {
return false, fmt.Errorf("container %s has no healthcheck configured", name)
}
switch ctr.State.Health.Status {
case containerType.Healthy:
case container.Healthy:
// Continue by checking the next container.
case containerType.Unhealthy:
case container.Unhealthy:
return false, fmt.Errorf("container %s is unhealthy", name)
case containerType.Starting:
case container.Starting:
return false, nil
default:
return false, fmt.Errorf("container %s had unexpected health status %q", name, ctr.State.Health.Status)
Expand All @@ -866,7 +866,7 @@ func (s *composeService) isServiceCompleted(ctx context.Context, containers Cont
if err != nil {
return false, 0, err
}
if ctr.State != nil && ctr.State.Status == containerType.StateExited {
if ctr.State != nil && ctr.State.Status == container.StateExited {
return true, ctr.State.ExitCode, nil
}
}
Expand Down Expand Up @@ -896,7 +896,7 @@ func (s *composeService) startService(ctx context.Context,

w := progress.ContextWriter(ctx)
for _, ctr := range containers.filter(isService(service.Name)) {
if ctr.State == ContainerRunning {
if ctr.State == container.StateRunning {
continue
}

Expand All @@ -912,7 +912,7 @@ func (s *composeService) startService(ctx context.Context,

eventName := getContainerProgressName(ctr)
w.Event(progress.StartingEvent(eventName))
err = s.apiClient().ContainerStart(ctx, ctr.ID, containerType.StartOptions{})
err = s.apiClient().ContainerStart(ctx, ctr.ID, container.StartOptions{})
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/compose/kill_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func testContainer(service string, id string, oneOff bool) container.Summary {
ID: id,
Names: []string{name},
Labels: containerLabels(service, oneOff),
State: ContainerExited,
State: container.StateExited,
}
}

Expand Down