-
Notifications
You must be signed in to change notification settings - Fork 78
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
requeue after the last node has its node-state label set to Started during cluster creation #77
Changes from 4 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
package scale_up_status_updates | ||
|
||
import ( | ||
"fmt" | ||
"testing" | ||
|
||
. "github.com/onsi/ginkgo" | ||
. "github.com/onsi/gomega" | ||
|
||
ginkgo_util "github.com/datastax/cass-operator/mage/ginkgo" | ||
"github.com/datastax/cass-operator/mage/kubectl" | ||
) | ||
|
||
var ( | ||
testName = "Scale up status updates" | ||
namespace = "test-scale-up-status-updates" | ||
dcName = "dc1" | ||
dcYaml = "../testdata/oss-two-rack-six-node-dc.yaml" | ||
operatorYaml = "../testdata/operator.yaml" | ||
dcResource = fmt.Sprintf("CassandraDatacenter/%s", dcName) | ||
dcLabel = fmt.Sprintf("cassandra.datastax.com/datacenter=%s", dcName) | ||
ns = ginkgo_util.NewWrapper(testName, namespace) | ||
) | ||
|
||
func TestLifecycle(t *testing.T) { | ||
AfterSuite(func() { | ||
logPath := fmt.Sprintf("%s/aftersuite", ns.LogDir) | ||
kubectl.DumpAllLogs(logPath).ExecV() | ||
fmt.Printf("\n\tPost-run logs dumped at: %s\n\n", logPath) | ||
ns.Terminate() | ||
}) | ||
|
||
RegisterFailHandler(Fail) | ||
RunSpecs(t, testName) | ||
} | ||
|
||
var _ = Describe(testName, func() { | ||
Context("when in a new cluster", func() { | ||
Specify("the operator can scale up a datacenter and does not upsert the super user until all nodes have been started", func() { | ||
By("creating a namespace") | ||
err := kubectl.CreateNamespace(namespace).ExecV() | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
step := "setting up cass-operator resources via helm chart" | ||
ns.HelmInstall("../../charts/cass-operator-chart") | ||
|
||
ns.WaitForOperatorReady() | ||
|
||
step = "creating a datacenter resource with 2 racks/6 nodes" | ||
k := kubectl.ApplyFiles(dcYaml) | ||
ns.ExecAndLog(step, k) | ||
|
||
ns.WaitForSuperUserUpserted(dcName, 600) | ||
|
||
step = "checking that all nodes have been started" | ||
nodeStatusesHostIds := ns.GetNodeStatusesHostIds(dcName) | ||
Expect(len(nodeStatusesHostIds), 6) | ||
|
||
step = "deleting the dc" | ||
k = kubectl.DeleteFromFiles(dcYaml) | ||
ns.ExecAndLog(step, k) | ||
|
||
step = "checking that the dc no longer exists" | ||
json := "jsonpath={.items}" | ||
k = kubectl.Get("CassandraDatacenter"). | ||
WithLabel(dcLabel). | ||
FormatOutput(json) | ||
ns.WaitForOutputAndLog(step, k, "[]", 300) | ||
}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Most of our tests will typically test that the dc can be successfully deleted at the end of the test scenario. It probably isn't super important to have on every single one (especially since you really aren't doing any crazy patching or unusual things) so I'll let you decide if you want to add the logic to this test or not. See our scale up test for reference if you choose to (again, purely optional from my perspective). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will add the deletion check(s). I would rather be consistent with other tests. |
||
}) | ||
}) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
apiVersion: cassandra.datastax.com/v1beta1 | ||
kind: CassandraDatacenter | ||
metadata: | ||
name: dc1 | ||
spec: | ||
clusterName: cluster1 | ||
serverType: cassandra | ||
serverVersion: "3.11.6" | ||
serverImage: datastax/cassandra-mgmtapi-3_11_6:v0.1.0 | ||
configBuilderImage: datastax/cass-config-builder:1.0.0 | ||
managementApiAuth: | ||
insecure: {} | ||
size: 6 | ||
storageConfig: | ||
cassandraDataVolumeClaimSpec: | ||
storageClassName: server-storage | ||
accessModes: | ||
- ReadWriteOnce | ||
resources: | ||
requests: | ||
storage: 1Gi | ||
racks: | ||
- name: r1 | ||
- name: r2 | ||
config: | ||
jvm-options: | ||
initial_heap_size: "800m" | ||
max_heap_size: "800m" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think you should add a one line comment here
also why not move this directly under
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
comment added.
I hadn't really considered moving the
if
block. That will require some further discussion to ensure I don't wind up introducing some regressions.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yeah let's not introduce instability, I can tackle this when refactoring some stuff in an upcoming PR