-
Notifications
You must be signed in to change notification settings - Fork 30
/
state_check.go
98 lines (80 loc) · 2.5 KB
/
state_check.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
package statecheck
import (
"context"
"errors"
apiappsv1 "k8s.io/api/apps/v1"
"k8s.io/cli-runtime/pkg/resource"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/kyma-project/lifecycle-manager/api/shared"
)
type ManagerStateCheck struct {
statefulSetChecker StatefulSetStateChecker
deploymentStateChecker DeploymentStateChecker
}
type DeploymentStateChecker interface {
GetState(deploy *apiappsv1.Deployment) (shared.State, error)
}
type StatefulSetStateChecker interface {
GetState(ctx context.Context, clnt client.Client, statefulSet *apiappsv1.StatefulSet) (shared.State, error)
}
type ManagerKind string
const (
DeploymentKind ManagerKind = "Deployment"
StatefulSetKind ManagerKind = "StatefulSet"
)
var (
ErrNoManagerProvided = errors.New("failed to find manager in provided resources")
ErrNoStateDetermined = errors.New("failed to determine state for manager")
)
type Manager struct {
kind ManagerKind
deployment *apiappsv1.Deployment
statefulSet *apiappsv1.StatefulSet
}
func NewManagerStateCheck(statefulSetChecker StatefulSetStateChecker,
deploymentChecker DeploymentStateChecker,
) *ManagerStateCheck {
return &ManagerStateCheck{
statefulSetChecker: statefulSetChecker,
deploymentStateChecker: deploymentChecker,
}
}
// Determines the state based on the manager. The manager may either be a Deployment or a StatefulSet and
// must be included in the provided resources.
// Will be refactored with https://github.com/kyma-project/lifecycle-manager/issues/1831
func (m *ManagerStateCheck) GetState(ctx context.Context,
clnt client.Client,
resources []*resource.Info,
) (shared.State, error) {
mgr := findManager(clnt, resources)
if mgr == nil {
return shared.StateReady, nil
}
switch mgr.kind {
case StatefulSetKind:
return m.statefulSetChecker.GetState(ctx, clnt, mgr.statefulSet)
case DeploymentKind:
return m.deploymentStateChecker.GetState(mgr.deployment)
}
// fall through that should not be reached
return shared.StateReady, nil
}
func findManager(clt client.Client, resources []*resource.Info) *Manager {
deploy := &apiappsv1.Deployment{}
statefulSet := &apiappsv1.StatefulSet{}
for _, res := range resources {
if err := clt.Scheme().Convert(res.Object, deploy, nil); err == nil {
return &Manager{
kind: DeploymentKind,
deployment: deploy,
}
}
if err := clt.Scheme().Convert(res.Object, statefulSet, nil); err == nil {
return &Manager{
kind: StatefulSetKind,
statefulSet: statefulSet,
}
}
}
return nil
}