-
Notifications
You must be signed in to change notification settings - Fork 3
/
endpoints.go
119 lines (104 loc) · 2.58 KB
/
endpoints.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
package targetsync
import (
"context"
"time"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type K8sEndpointsSource struct {
clientset *kubernetes.Clientset
name, namespace string
port int
}
func NewK8sEndpointsSource(cfg *K8sEndpointsConfig) (*K8sEndpointsSource, error) {
var config *rest.Config
var err error
if cfg.K8sConfig.InCluster {
config, err = rest.InClusterConfig()
if err != nil {
return nil, err
}
} else {
config, err = clientcmd.BuildConfigFromFlags("", cfg.K8sConfig.KubeConfigPath)
if err != nil {
return nil, err
}
}
c, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &K8sEndpointsSource{
clientset: c,
name: cfg.Name,
namespace: cfg.Namespace,
port: cfg.Port,
}, nil
}
func (s *K8sEndpointsSource) Subscribe(ctx context.Context) (chan []*Target, error) {
// TODO: configurable size?
ch := make(chan []*Target, 100)
go func(ch chan []*Target) {
defer close(ch)
for {
select {
case <-ctx.Done():
return
default:
}
targets := []*Target{}
ends, err := s.clientset.CoreV1().Endpoints(s.namespace).Get(s.name, metav1.GetOptions{})
if err != nil {
continue
}
for _, subset := range ends.Subsets {
for _, addr := range subset.Addresses {
targets = append(targets, &Target{
IP: addr.IP,
Port: s.port,
})
}
}
ch <- targets
}
}(ch)
return ch, nil
}
func (s *K8sEndpointsSource) Lock(ctx context.Context, opts *LockOptions) (<-chan bool, error) {
leaseLockName := opts.Key
leaseLockNamespace := s.namespace
lock := &resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Name: leaseLockName,
Namespace: leaseLockNamespace,
},
Client: s.clientset.CoreV1(),
}
lockedCh := make(chan bool, 1)
// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: opts.TTL,
// TODO: Make configrable
RenewDeadline: 5 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
logrus.Infof("Lock acquired")
lockedCh <- true
},
OnStoppedLeading: func() {
logrus.Infof("Lock lost")
lockedCh <- false
},
OnNewLeader: func(identity string) {},
},
})
return lockedCh, nil
}