Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add datacenter to existing cluster [K8SSAND-615][K8SSAND-784] #262

Merged
merged 11 commits into from
Jan 26, 2022
12 changes: 8 additions & 4 deletions .github/workflows/kind_e2e_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,12 @@ jobs:
outputs:
image: ${{ steps.set_image_var.outputs.image }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: actions/checkout@v2
if: github.event_name == 'pull_request'
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: actions/checkout@v2
if: github.event_name != 'pull_request'
- name: Set up Docker buildx
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
Expand All @@ -33,7 +37,7 @@ jobs:
- name: Set build tags
id: set_build_tags
run: |
image="k8ssandra-operator/k8ssandra-operator:latest"
image="k8ssandra/k8ssandra-operator:latest"
echo "build_tags=$image" >> $GITHUB_ENV
echo "image=$image" >> $GITHUB_ENV
- name: Update build tags
Expand Down Expand Up @@ -126,7 +130,7 @@ jobs:
run: |
docker load --input /tmp/k8ssandra-operator.tar
- name: Setup kind cluster
run: make IMG=${{ needs.build_image.outputs.image }} e2e-setup-single
run: make IMG=${{ needs.build_image.outputs.image }} create-kind-cluster kind-load-image
- name: Run e2e test ( ${{ matrix.e2e_test }} )
run: make E2E_TEST=TestOperator/${{ matrix.e2e_test }} e2e-test
- name: Archive k8s logs
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/kind_multicluster_e2e_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
- name: Set build tags
id: set_build_tags
run: |
image="k8ssandra-operator/k8ssandra-operator:latest"
image="k8ssandra/k8ssandra-operator:latest"
echo "build_tags=$image" >> $GITHUB_ENV
echo "image=$image" >> $GITHUB_ENV
- name: Update build tags
Expand Down Expand Up @@ -71,6 +71,7 @@ jobs:
matrix:
e2e_test:
- CreateMultiDatacenterCluster
- AddDcToCluster
- CheckStargateApisWithMultiDcCluster
- CreateMultiStargateAndDatacenter
- CreateMultiReaper
Expand Down Expand Up @@ -128,7 +129,7 @@ jobs:
run: |
docker load --input /tmp/k8ssandra-operator.tar
- name: Setup kind clusters
run: make IMG=${{ needs.build_image.outputs.image }} e2e-setup-multi
run: make IMG=${{ needs.build_image.outputs.image }} create-kind-multicluster kind-load-image-multi
- name: Run e2e test ( ${{ matrix.e2e_test }} )
run: make E2E_TEST=TestOperator/${{ matrix.e2e_test }} NODETOOL_STATUS_TIMEOUT=2m e2e-test
- name: Archive k8s logs
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/kuttl_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ on:
paths-ignore:
- 'docs/**'
- 'CHANGELOG/**'

jobs:
build_image:
name: Build image
Expand All @@ -33,7 +34,7 @@ jobs:
- name: Set build tags
id: set_build_tags
run: |
image="k8ssandra-operator/k8ssandra-operator:latest"
image="k8ssandra/k8ssandra-operator:latest"
echo "build_tags=$image" >> $GITHUB_ENV
echo "image=$image" >> $GITHUB_ENV
- name: Update build tags
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG/CHANGELOG-1.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ Changelog for the K8ssandra Operator, new PRs should update the `unreleased` sec
When cutting a new release, update the `unreleased` heading to the tag being generated and date, like `## vX.Y.Z - YYYY-MM-DD` and create a new placeholder section for `unreleased` entries.

## Unreleased
* [FEATURE] [#21](https://github.com/k8ssandra/k8ssandra-operator/issues/21) Add datacenter to existing cluster

## v1.0.0-alpha.3 2022-01-23

Expand Down
65 changes: 37 additions & 28 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,15 @@ TEST_ARGS=

NS ?= k8ssandra-operator

CLUSTER_SCOPE = false
# DEPLOYMENT specifies a particular kustomization to use for configuring the operator
# in a particular way, cluster-scoped for example. See config/deployments/README.md for
# more info.
DEPLOYMENT =

# Indicates the number of kind clusters that are being used. Note that the clusters should
# be created with scripts/setup-kind-multicluster.sh.
NUM_CLUSTERS = 2

ifeq ($(DEPLOYMENT), )
DEPLOY_TARGET =
else
Expand Down Expand Up @@ -173,38 +179,36 @@ multi-up: cleanup build manifests kustomize docker-build create-kind-multicluste
kubectl config use-context kind-k8ssandra-0
$(KUSTOMIZE) build config/deployments/control-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -
##install the data plane
kubectl config use-context kind-k8ssandra-1
$(KUSTOMIZE) build config/deployments/data-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -
for ((i = 1; i < $(NUM_CLUSTERS); ++i)); do \
kubectl config use-context kind-k8ssandra-$$i; \
$(KUSTOMIZE) build config/deployments/data-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -; \
done
## Create a client config
make create-client-config
make create-clientconfig
## Restart the control plane
kubectl config use-context kind-k8ssandra-0
kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator
kubectl -n $(NS) rollout status deployment k8ssandra-operator
ifeq ($(DEPLOYMENT), cass-operator-dev)
kubectl -n $(NS) delete pod -l name=cass-operator
kubectl -n $(NS) rollout status deployment cass-operator-controller-manager
endif

multi-reload: build manifests kustomize docker-build kind-load-image-multi cert-manager-multi
# Reload the operator on the control-plane
kubectl config use-context kind-k8ssandra-0
$(KUSTOMIZE) build config/deployments/control-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -
kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator
kubectl -n $(NS) rollout status deployment k8ssandra-operator
ifeq ($(DEPLOYMENT), cass-operator-dev)
kubectl -n $(NS) delete pod -l name=cass-operator
kubectl -n $(NS) rollout status deployment cass-operator-controller-manager
endif
# Reload the operator on the data-plane
kubectl config use-context kind-k8ssandra-1
$(KUSTOMIZE) build config/deployments/data-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -
kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator
kubectl -n $(NS) rollout status deployment k8ssandra-operator
ifeq ($(DEPLOYMENT), cass-operator-dev)
kubectl -n $(NS) delete pod -l name=cass-operator
kubectl -n $(NS) rollout status deployment cass-operator-controller-manager
endif
for ((i = 1; i < $(NUM_CLUSTERS); ++i)); do \
kubectl config use-context kind-k8ssandra-$$i; \
$(KUSTOMIZE) build config/deployments/data-plane$(DEPLOY_TARGET) | kubectl apply --server-side --force-conflicts -f -; \
kubectl -n $(NS) delete pod -l control-plane=k8ssandra-operator; \
kubectl -n $(NS) rollout status deployment k8ssandra-operator; \
kubectl -n $(NS) delete pod -l name=cass-operator; \
kubectl -n $(NS) rollout status deployment cass-operator-controller-manager; \
done

single-deploy:
kubectl config use-context kind-k8ssandra-0
Expand All @@ -215,18 +219,20 @@ multi-deploy:
kubectl -n $(NS) apply -f test/testdata/samples/k8ssandra-multi-kind.yaml

cleanup:
kind delete cluster --name k8ssandra-0
kind delete cluster --name k8ssandra-1
for ((i = 0; i < $(NUM_CLUSTERS); ++i)); do \
kind delete cluster --name k8ssandra-$$i; \
done

create-kind-cluster:
scripts/setup-kind-multicluster.sh --clusters 1 --kind-worker-nodes 4

create-kind-multicluster:
scripts/setup-kind-multicluster.sh --clusters 2 --kind-worker-nodes 4
scripts/setup-kind-multicluster.sh --clusters $(NUM_CLUSTERS) --kind-worker-nodes 4

kind-load-image-multi:
kind load docker-image --name k8ssandra-0 ${IMG}
kind load docker-image --name k8ssandra-1 ${IMG}
for ((i = 0; i < $(NUM_CLUSTERS); ++i)); do \
kind load docker-image --name k8ssandra-$$i ${IMG}; \
done

##@ Deployment

Expand All @@ -249,15 +255,18 @@ cert-manager: ## Install cert-manager to the cluster
kubectl rollout status deployment cert-manager-webhook -n cert-manager

cert-manager-multi: ## Install cert-manager to the clusters
kubectl config use-context kind-k8ssandra-0
make cert-manager
kubectl config use-context kind-k8ssandra-1
make cert-manager
for ((i = 0; i < $(NUM_CLUSTERS); ++i)); do \
kubectl config use-context kind-k8ssandra-$$i; \
make cert-manager; \
done

create-client-config:
create-clientconfig:
kubectl config use-context kind-k8ssandra-0
make install
scripts/create-clientconfig.sh --namespace $(NS) --src-kubeconfig build/kubeconfigs/k8ssandra-1.yaml --dest-kubeconfig build/kubeconfigs/k8ssandra-0.yaml --in-cluster-kubeconfig build/kubeconfigs/updated/k8ssandra-1.yaml --output-dir clientconfig
for ((i = 0; i < $(NUM_CLUSTERS); ++i)); do \
make install; \
scripts/create-clientconfig.sh --namespace $(NS) --src-kubeconfig build/kubeconfigs/k8ssandra-$$i.yaml --dest-kubeconfig build/kubeconfigs/k8ssandra-0.yaml --in-cluster-kubeconfig build/kubeconfigs/updated/k8ssandra-$$i.yaml --output-dir clientconfig; \
done


CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
Expand Down
29 changes: 27 additions & 2 deletions apis/k8ssandra/v1alpha1/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,32 @@ package v1alpha1
const (
ResourceHashAnnotation = "k8ssandra.io/resource-hash"

// SystemReplicationAnnotation provides the initial replication of system keyspaces
// InitialSystemReplicationAnnotation provides the initial replication of system keyspaces
// (system_auth, system_distributed, system_traces) encoded as JSON. This annotation
// is set on a K8ssandraCluster when it is first created. The value does not change
// regardless of whether the replication of the system keyspaces changes.
SystemReplicationAnnotation = "k8ssandra.io/system-replication"
InitialSystemReplicationAnnotation = "k8ssandra.io/initial-system-replication"

// DcReplicationAnnotation tells the operator the replication settings to apply to user
// keyspaces when adding a DC to an existing cluster. The value should be serialized
// JSON, e.g., {"dc2": {"ks1": 3, "ks2": 3}}. All user keyspaces must be specified;
// otherwise, reconciliation will fail with a validation error. If you do not want to
// replicate a particular keyspace, specify a value of 0. Replication settings can be
// specified for multiple DCs; however, existing DCs won't be modified, and only the DC
// currently being added will be updated. Specifying multiple DCs can be useful though
// if you add multiple DCs to the cluster at once (Note that the CassandraDatacenters
// are still deployed serially).
DcReplicationAnnotation = "k8ssandra.io/dc-replication"

// RebuildSourceDcAnnotation tells the operation the DC from which to stream when
// rebuilding a DC. If not set the operator will choose the first DC. The value for
// this annotation must specify the name of a CassandraDatacenter whose Ready
// condition is true.
RebuildSourceDcAnnotation = "k8ssandra.io/rebuild-src-dc"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A quick question, we leave this (and the other annotation indicating replicationFactors) to the K8ssandraCluster. Should we remove it after the processing has finished to ensure these rules are reset if someone wants to add another?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good question. I have asked myself the same. Part of the reason I didn't implement behavior to remove them is that I thought users might want to see that state for auditing purposes and what not. Now I also think it could be good to remove them because the user might forget about them.

In the case of the k8ssandra.io/dc-replication, it won't matter. It only applies to the dc being added. So if it left in place and another DC is added and the annotation is ignored it should be the same as not specifying the annotation.

For the k8ssandra.io/rebuild-src-dc annotation, I don't think it's a big deal to leave it. Doing so would basically only alter the default behavior.


RebuildDcAnnotation = "k8ssandra.io/rebuild-dc"

RebuildLabel = "k8ssandra.io/rebuild"

NameLabel = "app.kubernetes.io/name"
NameLabelValue = "k8ssandra-operator"
Expand Down Expand Up @@ -35,3 +56,7 @@ const (

DatacenterLabel = "k8ssandra.io/datacenter"
)

var (
SystemKeyspaces = []string{"system_traces", "system_distributed", "system_auth"}
)
24 changes: 24 additions & 0 deletions apis/k8ssandra/v1alpha1/k8ssandracluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,30 @@ func (in *K8ssandraCluster) HasReapers() bool {
return false
}

func (in *K8ssandraCluster) GetInitializedDatacenters() []CassandraDatacenterTemplate {
datacenters := make([]CassandraDatacenterTemplate, 0)
if in != nil && in.Spec.Cassandra != nil {
for _, dc := range in.Spec.Cassandra.Datacenters {
if status, found := in.Status.Datacenters[dc.Meta.Name]; found && status.Cassandra.GetConditionStatus(cassdcapi.DatacenterInitialized) == corev1.ConditionTrue {
datacenters = append(datacenters, dc)
}
}
}
return datacenters
}

func (in *K8ssandraCluster) GetReadyDatacenters() []CassandraDatacenterTemplate {
adutra marked this conversation as resolved.
Show resolved Hide resolved
datacenters := make([]CassandraDatacenterTemplate, 0)
if in != nil && in.Spec.Cassandra != nil {
for _, dc := range in.Spec.Cassandra.Datacenters {
if status, found := in.Status.Datacenters[dc.Meta.Name]; found && status.Cassandra.GetConditionStatus(cassdcapi.DatacenterReady) == corev1.ConditionTrue {
datacenters = append(datacenters, dc)
}
}
}
return datacenters
}

// +kubebuilder:object:root=true

// K8ssandraClusterList contains a list of K8ssandraCluster
Expand Down
2 changes: 1 addition & 1 deletion config/deployments/control-plane/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ resources:

images:
- name: k8ssandra/cass-operator
newTag: 9d1c58a5
newTag: 9d1c58a5
12 changes: 12 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,18 @@ rules:
- patch
- update
- watch
- apiGroups:
- control.k8ssandra.io
resources:
- cassandratasks
verbs:
- create
- delete
- get
- list
- patch
jsanda marked this conversation as resolved.
Show resolved Hide resolved
- update
- watch
- apiGroups:
- ""
resources:
Expand Down
Loading