-
Notifications
You must be signed in to change notification settings - Fork 79
/
framework.go
817 lines (707 loc) · 28.9 KB
/
framework.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
package framework
import (
"context"
"fmt"
"regexp"
"strings"
"testing"
"time"
"github.com/k8ssandra/k8ssandra-operator/test/kubectl"
reaperapi "github.com/k8ssandra/k8ssandra-operator/apis/reaper/v1alpha1"
promapi "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
logrusr "github.com/bombsimon/logrusr/v2"
"github.com/go-logr/logr"
terratestlogger "github.com/gruntwork-io/terratest/modules/logger"
terratesttesting "github.com/gruntwork-io/terratest/modules/testing"
cassdcapi "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1"
casstaskapi "github.com/k8ssandra/cass-operator/apis/control/v1alpha1"
configapi "github.com/k8ssandra/k8ssandra-operator/apis/config/v1beta1"
controlapi "github.com/k8ssandra/k8ssandra-operator/apis/control/v1alpha1"
k8taskapi "github.com/k8ssandra/k8ssandra-operator/apis/control/v1alpha1"
api "github.com/k8ssandra/k8ssandra-operator/apis/k8ssandra/v1alpha1"
medusaapi "github.com/k8ssandra/k8ssandra-operator/apis/medusa/v1alpha1"
replicationapi "github.com/k8ssandra/k8ssandra-operator/apis/replication/v1alpha1"
stargateapi "github.com/k8ssandra/k8ssandra-operator/apis/stargate/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Client client.Client
)
// TODO Add a Framework type and make functions method on that type
// By making these functions methods we can pass the testing.T and namespace arguments just
// once in the constructor. We can also include defaults for the timeout and interval
// parameters that show up in multiple functions.
func Init(t *testing.T) {
var err error
err = api.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for k8ssandra-operator")
err = stargateapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for stargate")
err = reaperapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for reaper")
err = configapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for k8ssandra-operator configs")
err = replicationapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for k8ssandra-operator replication")
err = cassdcapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for cass-operator")
err = casstaskapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for cass-operator tasks")
err = promapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for prometheus")
err = medusaapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for medusa")
err = controlapi.AddToScheme(scheme.Scheme)
require.NoError(t, err, "failed to register scheme for control")
// cfg, err := ctrl.GetConfig()
// require.NoError(t, err, "failed to get *rest.Config")
//
// Client, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
// require.NoError(t, err, "failed to create controller-runtime client")
}
// Framework provides methods for use in both integration and e2e tests.
type Framework struct {
// Client is the client for the control plane cluster, i.e., the cluster in which the
// K8ssandraCluster controller is deployed. Note that this may also be one of the
// data plane clients, if the control plane is being deployed in data plane.
Client client.Client
// The control plane, that is, the Kubernetes context in which the K8ssandraCluster controller
// is running.
ControlPlaneContext string
// The data planes, that is, the Kubernetes contexts where K8ssandraCluster controller is going
// to deploy datacenters. There must be at least one data plane defined.
DataPlaneContexts []string
// RemoteClients is a mapping of Kubernetes context names to clients. It includes a client for
// the control plane context as well as clients for all data plane contexts, if any.
remoteClients map[string]client.Client
logger logr.Logger
}
type ClusterKey struct {
types.NamespacedName
K8sContext string
}
func (k ClusterKey) String() string {
return k.K8sContext + string(types.Separator) + k.Namespace + string(types.Separator) + k.Name
}
func NewClusterKey(context, namespace, name string) ClusterKey {
return ClusterKey{
K8sContext: context,
NamespacedName: types.NamespacedName{
Namespace: namespace,
Name: name,
},
}
}
func NewFramework(client client.Client, controlPlaneContext string, dataPlaneContexts []string, remoteClients map[string]client.Client) *Framework {
log := logrusr.New(logrus.New())
terratestlogger.Default = terratestlogger.New(&terratestLoggerBridge{logger: log})
return &Framework{
Client: client,
ControlPlaneContext: controlPlaneContext,
DataPlaneContexts: dataPlaneContexts,
remoteClients: remoteClients,
logger: log,
}
}
func (f *Framework) getRemoteClient(k8sContext string) (client.Client, error) {
if len(k8sContext) == 0 {
return nil, fmt.Errorf("k8sContext must be specified")
}
remoteClient, found := f.remoteClients[k8sContext]
if !found {
return nil, f.k8sContextNotFound(k8sContext)
}
return remoteClient, nil
}
// Get fetches the object specified by key from the cluster specified by key. An error is
// returned is ClusterKey.K8sContext is not set or if there is no corresponding client.
func (f *Framework) Get(ctx context.Context, key ClusterKey, obj client.Object) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Get(ctx, key.NamespacedName, obj)
}
func (f *Framework) List(ctx context.Context, key ClusterKey, obj client.ObjectList, opts ...client.ListOption) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.List(ctx, obj, opts...)
}
func (f *Framework) Update(ctx context.Context, key ClusterKey, obj client.Object) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Update(ctx, obj)
}
func (f *Framework) Delete(ctx context.Context, key ClusterKey, obj client.Object) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Delete(ctx, obj)
}
func (f *Framework) DeleteAllOf(ctx context.Context, k8sContext string, obj client.Object, opts ...client.DeleteAllOfOption) error {
remoteClient, err := f.getRemoteClient(k8sContext)
if err != nil {
return err
}
return remoteClient.DeleteAllOf(ctx, obj, opts...)
}
func (f *Framework) UpdateStatus(ctx context.Context, key ClusterKey, obj client.Object) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Status().Update(ctx, obj)
}
func (f *Framework) Patch(ctx context.Context, obj client.Object, patch client.Patch, key ClusterKey, opts ...client.PatchOption) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Patch(ctx, obj, patch, opts...)
}
func (f *Framework) PatchStatus(ctx context.Context, obj client.Object, patch client.Patch, key ClusterKey, opts ...client.SubResourcePatchOption) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Status().Patch(ctx, obj, patch, opts...)
}
func (f *Framework) Create(ctx context.Context, key ClusterKey, obj client.Object) error {
remoteClient, err := f.getRemoteClient(key.K8sContext)
if err != nil {
return err
}
return remoteClient.Create(ctx, obj)
}
func (f *Framework) CreateNamespace(name string) error {
for k8sContext, remoteClient := range f.remoteClients {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
f.logger.Info("creating namespace", "Namespace", name, "Context", k8sContext)
if err := remoteClient.Create(context.Background(), namespace); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
}
}
return nil
}
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
func CleanupForKubernetes(input string) string {
if len(validation.IsDNS1035Label(input)) > 0 {
r := regexp.MustCompile(dns1035LabelFmt)
// Invalid domain name, Kubernetes will reject this. Try to modify it to a suitable string
input = strings.ToLower(input)
input = strings.ReplaceAll(input, "_", "-")
validParts := r.FindAllString(input, -1)
return strings.Join(validParts, "")
}
return input
}
func (f *Framework) k8sContextNotFound(k8sContext string) error {
return fmt.Errorf("context %s not found", k8sContext)
}
// PatchK8ssandraCluster fetches the K8ssandraCluster specified by key in the control plane,
// applies changes via updateFn, and then performs a patch operation.
func (f *Framework) PatchK8ssandraCluster(ctx context.Context, key client.ObjectKey, updateFn func(kc *api.K8ssandraCluster)) error {
kc := &api.K8ssandraCluster{}
err := f.Client.Get(ctx, key, kc)
if err != nil {
return err
}
patch := client.MergeFrom(kc.DeepCopy())
updateFn(kc)
return f.Client.Patch(ctx, kc, patch)
}
// SetDatacenterStatusReady fetches the CassandraDatacenter specified by key and persists
// a status update to make the CassandraDatacenter ready. It sets the DatacenterReady and
// DatacenterInitialized conditions to true.
func (f *Framework) SetDatacenterStatusReady(ctx context.Context, key ClusterKey) error {
now := metav1.Now()
return f.PatchDatacenterStatus(ctx, key, func(dc *cassdcapi.CassandraDatacenter) {
dc.Status.CassandraOperatorProgress = cassdcapi.ProgressReady
dc.SetCondition(cassdcapi.DatacenterCondition{
Type: cassdcapi.DatacenterReady,
Status: corev1.ConditionTrue,
LastTransitionTime: now,
})
dc.Status.SetCondition(cassdcapi.DatacenterCondition{
Type: cassdcapi.DatacenterInitialized,
Status: corev1.ConditionTrue,
LastTransitionTime: now,
})
dc.Status.ObservedGeneration = dc.Generation
})
}
// SetDeploymentReplicas sets the replicas field of the given deployment to the given value.
func (f *Framework) SetMedusaDeplAvailable(ctx context.Context, key ClusterKey) error {
return f.PatchDeploymentStatus(ctx, key, func(depl *appsv1.Deployment) {
// Add a condition to the deployment to indicate that it is available
now := metav1.Now()
depl.Status.Conditions = append(depl.Status.Conditions, appsv1.DeploymentCondition{
Type: appsv1.DeploymentAvailable,
Status: corev1.ConditionTrue,
LastTransitionTime: now,
})
depl.Status.ReadyReplicas = 1
depl.Status.Replicas = 1
})
}
func (f *Framework) PatchDeploymentStatus(ctx context.Context, key ClusterKey, updateFn func(depl *appsv1.Deployment)) error {
depl := &appsv1.Deployment{}
err := f.Get(ctx, key, depl)
if err != nil {
return err
}
patch := client.MergeFromWithOptions(depl.DeepCopy(), client.MergeFromWithOptimisticLock{})
updateFn(depl)
remoteClient := f.remoteClients[key.K8sContext]
return remoteClient.Status().Patch(ctx, depl, patch)
}
// UpdateDatacenterGeneration fetches the CassandraDatacenter specified by key and persists
// a status update to make the ObservedGeneration match the Generation.
// It will wait for .meta.Generation to be different from .status.ObservedGeneration.
// This is done to simulate the behavior of the CassandraDatacenter controller.
func (f *Framework) UpdateDatacenterGeneration(ctx context.Context, t *testing.T, key ClusterKey) bool {
dc := &cassdcapi.CassandraDatacenter{}
err := f.Get(ctx, key, dc)
require.NoError(t, err, "failed to get CassandraDatacenter %s/%s", key.Namespace, key.Name)
if dc.Generation == dc.Status.ObservedGeneration {
// Expected generation change hasn't happened yet
return false
}
// Generation has changed, update the status accoringly
return f.PatchDatacenterStatus(ctx, key, func(dc *cassdcapi.CassandraDatacenter) {
dc.Status.ObservedGeneration = dc.Generation
}) == nil
}
// SetDatacenterStatusStopped fetches the CassandraDatacenter specified by key and persists
// a status update to make the CassandraDatacenter stopped. It sets the DatacenterStopped and
// DatacenterInitialized conditions to true.
func (f *Framework) SetDatacenterStatusStopped(ctx context.Context, key ClusterKey) error {
now := metav1.Now()
return f.PatchDatacenterStatus(ctx, key, func(dc *cassdcapi.CassandraDatacenter) {
dc.Status.CassandraOperatorProgress = cassdcapi.ProgressReady
dc.SetCondition(cassdcapi.DatacenterCondition{
Type: cassdcapi.DatacenterStopped,
Status: corev1.ConditionTrue,
LastTransitionTime: now,
})
dc.Status.SetCondition(cassdcapi.DatacenterCondition{
Type: cassdcapi.DatacenterInitialized,
Status: corev1.ConditionTrue,
LastTransitionTime: now,
})
dc.Status.ObservedGeneration = dc.Generation
})
}
// PatchDatacenterStatus fetches the datacenter specified by key, applies changes via
// updateFn, and then performs a patch operation. key.K8sContext must be set and must
// have a corresponding client.
func (f *Framework) PatchDatacenterStatus(ctx context.Context, key ClusterKey, updateFn func(dc *cassdcapi.CassandraDatacenter)) error {
dc := &cassdcapi.CassandraDatacenter{}
err := f.Get(ctx, key, dc)
if err != nil {
return err
}
patch := client.MergeFromWithOptions(dc.DeepCopy(), client.MergeFromWithOptimisticLock{})
updateFn(dc)
remoteClient := f.remoteClients[key.K8sContext]
return remoteClient.Status().Patch(ctx, dc, patch)
}
func (f *Framework) SetStargateStatusReady(ctx context.Context, key ClusterKey) error {
return f.PatchStargateStatus(ctx, key, func(sg *stargateapi.Stargate) {
now := metav1.Now()
sg.Status.Progress = stargateapi.StargateProgressRunning
sg.Status.AvailableReplicas = 1
sg.Status.Replicas = 1
sg.Status.ReadyReplicas = 1
sg.Status.UpdatedReplicas = 1
sg.Status.SetCondition(stargateapi.StargateCondition{
Type: stargateapi.StargateReady,
Status: corev1.ConditionTrue,
LastTransitionTime: &now,
})
})
}
func (f *Framework) PatchStargateStatus(ctx context.Context, key ClusterKey, updateFn func(sg *stargateapi.Stargate)) error {
sg := &stargateapi.Stargate{}
err := f.Get(ctx, key, sg)
if err != nil {
return err
}
patch := client.MergeFromWithOptions(sg.DeepCopy(), client.MergeFromWithOptimisticLock{})
updateFn(sg)
remoteClient := f.remoteClients[key.K8sContext]
return remoteClient.Status().Patch(ctx, sg, patch)
}
func (f *Framework) SetReaperStatusReady(ctx context.Context, key ClusterKey) error {
return f.PatchReaperStatus(ctx, key, func(r *reaperapi.Reaper) {
r.Status.Progress = reaperapi.ReaperProgressRunning
r.Status.SetReady()
})
}
func (f *Framework) PatchReaperStatus(ctx context.Context, key ClusterKey, updateFn func(r *reaperapi.Reaper)) error {
r := &reaperapi.Reaper{}
err := f.Get(ctx, key, r)
if err != nil {
return err
}
patch := client.MergeFromWithOptions(r.DeepCopy(), client.MergeFromWithOptimisticLock{})
updateFn(r)
remoteClient := f.remoteClients[key.K8sContext]
return remoteClient.Status().Patch(ctx, r, patch)
}
func (f *Framework) PatchCassandraTaskStatus(ctx context.Context, key ClusterKey, updateFn func(sg *casstaskapi.CassandraTask)) error {
task := &casstaskapi.CassandraTask{}
err := f.Get(ctx, key, task)
if err != nil {
return err
}
patch := client.MergeFromWithOptions(task.DeepCopy(), client.MergeFromWithOptimisticLock{})
updateFn(task)
remoteClient := f.remoteClients[key.K8sContext]
return remoteClient.Status().Patch(ctx, task, patch)
}
// WaitForDeploymentToBeReady Blocks until the Deployment is ready. If
// ClusterKey.K8sContext is empty, this method blocks until the deployment is ready in all
// remote clusters.
func (f *Framework) WaitForDeploymentToBeReady(key ClusterKey, timeout, interval time.Duration) error {
if len(key.K8sContext) == 0 {
for k8sContext := range f.remoteClients {
opts := kubectl.Options{Namespace: key.Namespace, Context: k8sContext}
err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
if err := kubectl.RolloutStatus(ctx, opts, "Deployment", key.Name); err != nil {
return false, err
}
return true, nil
})
if err != nil {
return err
}
}
return nil
} else {
if _, found := f.remoteClients[key.K8sContext]; !found {
return f.k8sContextNotFound(key.K8sContext)
}
opts := kubectl.Options{Namespace: key.Namespace, Context: key.K8sContext}
return wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(ctx context.Context) (bool, error) {
if err := kubectl.RolloutStatus(ctx, opts, "Deployment", key.Name); err != nil {
return false, err
}
return true, nil
})
}
}
func (f *Framework) DeleteK8ssandraCluster(ctx context.Context, key client.ObjectKey, timeout time.Duration, interval time.Duration) error {
kc := &api.K8ssandraCluster{}
err := f.Client.Get(ctx, key, kc)
if err != nil {
return err
}
err = f.Client.Delete(ctx, kc)
if err != nil {
return err
}
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
err := f.Client.Get(ctx, key, kc)
return err != nil && errors.IsNotFound(err), nil
})
}
func (f *Framework) DeleteK8ssandraClusters(namespace string, timeout, interval time.Duration) error {
f.logger.Info("Deleting K8ssandraClusters", "Namespace", namespace)
k8ssandra := &api.K8ssandraCluster{}
// We set the context here so that it correctly fails the DeleteAllOf operation if the context has timed out
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := f.Client.DeleteAllOf(ctx, k8ssandra, client.InNamespace(namespace)); err != nil {
f.logger.Error(err, "Failed to delete K8ssandraClusters")
return err
}
return wait.PollUntilContextCancel(ctx, interval, true, func(ctx context.Context) (bool, error) {
list := &api.K8ssandraClusterList{}
if err := context.Cause(ctx); err != nil {
f.logger.Error(err, "Failed to delete K8ssandraClusters since context is cancelled in loop")
}
if err := f.Client.List(ctx, list, client.InNamespace(namespace)); err != nil {
f.logger.Info("Waiting for k8ssandracluster deletion", "error", err)
return false, nil
}
f.logger.Info("Waiting for K8ssandraClusters to be deleted", "Namespace", namespace, "Count", len(list.Items))
return len(list.Items) == 0, nil
})
}
func (f *Framework) DeleteCassandraDatacenters(namespace string, timeout, interval time.Duration) error {
f.logger.Info("Deleting CassandraDatacenters", "Namespace", namespace)
dc := &cassdcapi.CassandraDatacenter{}
ctx := context.Background()
if err := f.Client.DeleteAllOf(ctx, dc, client.InNamespace(namespace)); err != nil {
f.logger.Error(err, "Failed to delete CassandraDatacenters")
}
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
list := &cassdcapi.CassandraDatacenterList{}
err := f.Client.List(ctx, list, client.InNamespace(namespace))
if err != nil {
f.logger.Info("Waiting for CassandraDatacenter deletion", "error", err)
return false, nil
}
f.logger.Info("Waiting for CassandraDatacenters to be deleted", "Namespace", namespace, "Count", len(list.Items))
return len(list.Items) == 0, nil
})
}
// NewWithDatacenter is a function generator for withDatacenter that is bound to ctx, and key.
func (f *Framework) NewWithDatacenter(ctx context.Context, key ClusterKey) func(func(*cassdcapi.CassandraDatacenter) bool) func() bool {
return func(condition func(dc *cassdcapi.CassandraDatacenter) bool) func() bool {
return f.withDatacenter(ctx, key, condition)
}
}
// withDatacenter Fetches the CassandraDatacenter specified by key and then calls condition.
func (f *Framework) withDatacenter(ctx context.Context, key ClusterKey, condition func(*cassdcapi.CassandraDatacenter) bool) func() bool {
return func() bool {
remoteClient, found := f.remoteClients[key.K8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup CassandraDatacenter", "key", key)
return false
}
dc := &cassdcapi.CassandraDatacenter{}
if err := remoteClient.Get(ctx, key.NamespacedName, dc); err == nil {
return condition(dc)
} else {
if !errors.IsNotFound(err) {
// We won't log the error if its not found because that is expected and it helps cut
// down on the verbosity of the test output.
f.logger.Error(err, "failed to get CassandraDatacenter", "key", key)
}
return false
}
}
}
func (f *Framework) DatacenterExists(ctx context.Context, key ClusterKey) func() bool {
withDc := f.NewWithDatacenter(ctx, key)
return withDc(func(dc *cassdcapi.CassandraDatacenter) bool {
return true
})
}
func (f *Framework) MedusaConfigExists(ctx context.Context, k8sContext string, medusaConfigKey ClusterKey) func() bool {
remoteClient, found := f.remoteClients[k8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(k8sContext), "cannot lookup CassandraDatacenter", "context", k8sContext)
return func() bool { return false }
}
medusaConfig := &medusaapi.MedusaConfiguration{}
if err := remoteClient.Get(ctx, medusaConfigKey.NamespacedName, medusaConfig); err != nil {
f.logger.Error(err, "failed to get MedusaConfiguration", "key", medusaConfigKey)
return func() bool { return false }
} else {
return func() bool { return true }
}
}
// NewWithCassTask is a function generator for withCassandraTask that is bound to ctx, and key.
func (f *Framework) NewWithCassTask(ctx context.Context, key ClusterKey) func(func(*casstaskapi.CassandraTask) bool) func() bool {
return func(condition func(dc *casstaskapi.CassandraTask) bool) func() bool {
return f.withCassTask(ctx, key, condition)
}
}
// withCassTask Fetches the CassandraTask specified by key and then calls condition.
func (f *Framework) withCassTask(ctx context.Context, key ClusterKey, condition func(task *casstaskapi.CassandraTask) bool) func() bool {
return func() bool {
remoteClient, found := f.remoteClients[key.K8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup CassandraDatacenter", "key", key)
return false
}
dc := &casstaskapi.CassandraTask{}
if err := remoteClient.Get(ctx, key.NamespacedName, dc); err == nil {
return condition(dc)
} else {
if !errors.IsNotFound(err) {
f.logger.Error(err, "failed to get CassandraTask", "key", key)
}
return false
}
}
}
func (f *Framework) CassTaskExists(ctx context.Context, key ClusterKey) func() bool {
withCassTask := f.NewWithCassTask(ctx, key)
return withCassTask(func(dc *casstaskapi.CassandraTask) bool {
return true
})
}
// NewWithK8ssandraTask is a function generator for withCassandraTask that is bound to ctx, and key.
func (f *Framework) NewWithK8ssandraTask(ctx context.Context, key ClusterKey) func(func(*k8taskapi.K8ssandraTask) bool) func() bool {
return func(condition func(dc *k8taskapi.K8ssandraTask) bool) func() bool {
return f.withK8ssandraTask(ctx, key, condition)
}
}
// withK8ssandraTask Fetches the CassandraTask specified by key and then calls condition.
func (f *Framework) withK8ssandraTask(ctx context.Context, key ClusterKey, condition func(task *k8taskapi.K8ssandraTask) bool) func() bool {
return func() bool {
remoteClient, found := f.remoteClients[key.K8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup CassandraDatacenter", "key", key)
return false
}
dc := &k8taskapi.K8ssandraTask{}
if err := remoteClient.Get(ctx, key.NamespacedName, dc); err == nil {
return condition(dc)
} else {
if !errors.IsNotFound(err) {
f.logger.Error(err, "failed to get CassandraTask", "key", key)
}
return false
}
}
}
func (f *Framework) K8ssandraTaskExists(ctx context.Context, key ClusterKey) func() bool {
withK8ssandraTask := f.NewWithK8ssandraTask(ctx, key)
return withK8ssandraTask(func(dc *k8taskapi.K8ssandraTask) bool {
return true
})
}
// NewWithStargate is a function generator for withStargate that is bound to ctx, and key.
func (f *Framework) NewWithStargate(ctx context.Context, key ClusterKey) func(func(stargate *stargateapi.Stargate) bool) func() bool {
return func(condition func(*stargateapi.Stargate) bool) func() bool {
return f.withStargate(ctx, key, condition)
}
}
// withStargate Fetches the stargate specified by key and then calls condition.
func (f *Framework) withStargate(ctx context.Context, key ClusterKey, condition func(*stargateapi.Stargate) bool) func() bool {
return func() bool {
remoteClient, found := f.remoteClients[key.K8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup Stargate", "key", key)
return false
}
stargate := &stargateapi.Stargate{}
if err := remoteClient.Get(ctx, key.NamespacedName, stargate); err == nil {
return condition(stargate)
} else {
f.logger.Error(err, "failed to get Stargate", "key", key)
return false
}
}
}
func (f *Framework) StargateExists(ctx context.Context, key ClusterKey) func() bool {
withStargate := f.NewWithStargate(ctx, key)
return withStargate(func(s *stargateapi.Stargate) bool {
return true
})
}
// NewWithReaper is a function generator for withReaper that is bound to ctx, and key.
func (f *Framework) NewWithReaper(ctx context.Context, key ClusterKey) func(func(reaper *reaperapi.Reaper) bool) func() bool {
return func(condition func(*reaperapi.Reaper) bool) func() bool {
return f.withReaper(ctx, key, condition)
}
}
// withReaper Fetches the reaper specified by key and then calls condition.
func (f *Framework) withReaper(ctx context.Context, key ClusterKey, condition func(*reaperapi.Reaper) bool) func() bool {
return func() bool {
remoteClient, found := f.remoteClients[key.K8sContext]
if !found {
f.logger.Error(f.k8sContextNotFound(key.K8sContext), "cannot lookup Reaper", "key", key)
return false
}
reaper := &reaperapi.Reaper{}
if err := remoteClient.Get(ctx, key.NamespacedName, reaper); err == nil {
return condition(reaper)
} else {
return false
}
}
}
func (f *Framework) ReaperExists(ctx context.Context, key ClusterKey) func() bool {
withReaper := f.NewWithReaper(ctx, key)
return withReaper(func(r *reaperapi.Reaper) bool {
return true
})
}
type terratestLoggerBridge struct {
logger logr.Logger
}
func (c *terratestLoggerBridge) Logf(t terratesttesting.TestingT, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
c.logger.Info(msg)
}
func (f *Framework) ContainerHasVolumeMount(container corev1.Container, volumeName, volumePath string) bool {
for _, volume := range container.VolumeMounts {
if volume.Name == volumeName && volume.MountPath == volumePath {
return true
}
}
return false
}
func (f *Framework) ContainerHasEnvVar(container corev1.Container, envVarName, envVarValue string) bool {
for _, envVar := range container.Env {
if envVar.Name == envVarName && (envVar.Value == envVarValue || envVarValue == "") {
return true
}
}
return false
}
func (f *Framework) AssertObjectDoesNotExist(ctx context.Context, t *testing.T, key ClusterKey, obj client.Object, timeout, interval time.Duration) {
assert.Eventually(t, func() bool {
err := f.Get(ctx, key, obj)
return err != nil && errors.IsNotFound(err)
}, timeout, interval, fmt.Sprintf("failed to verify object (%+v) does not exist", key))
}
// NewAllPodsEndpoints simulates the *-all-pods-service Endpoints that lists the nodes of a DC (in real life, this is
// done by the CassandraDatacenter's StatefulSet).
func (f *Framework) NewAllPodsEndpoints(
kcKey ClusterKey, kc *api.K8ssandraCluster, dcKey ClusterKey, podIp string,
) *corev1.Endpoints {
return &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: dcKey.Namespace,
Name: fmt.Sprintf("%s-%s-all-pods-service", kc.SanitizedName(), dcKey.Name),
Labels: map[string]string{
api.K8ssandraClusterNamespaceLabel: kcKey.Namespace,
api.K8ssandraClusterNameLabel: kcKey.Name,
},
},
Subsets: []corev1.EndpointSubset{
{
Addresses: []corev1.EndpointAddress{{IP: podIp}},
Ports: []corev1.EndpointPort{{Name: "mock-port", Port: 9042, Protocol: corev1.ProtocolTCP}},
},
},
}
}
func (f *Framework) GetContactPointsService(
ctx context.Context, kcKey ClusterKey, kc *api.K8ssandraCluster, dcKey ClusterKey,
) (*corev1.Service, *corev1.Endpoints, error) {
serviceKey := types.NamespacedName{
Namespace: kcKey.Namespace,
Name: fmt.Sprintf("%s-%s-contact-points-service", kc.SanitizedName(), dcKey.Name),
}
service := &corev1.Service{}
err := f.Client.Get(ctx, serviceKey, service)
if err != nil {
return nil, nil, err
}
endpoints := &corev1.Endpoints{}
err = f.Client.Get(ctx, serviceKey, endpoints)
if err != nil {
return nil, nil, err
}
return service, endpoints, nil
}