From a3823ba1fb34d9b95b8d69921517ee32223dca45 Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Thu, 17 Nov 2022 09:31:44 +1100 Subject: [PATCH] cassandra_int_tests test node up --- .../cassandra-cluster-v4/docker-compose.yaml | 3 +- .../cassandra-3.11.13/cassandra.yaml | 2 +- .../cassandra-4.0.6/cassandra.yaml | 4 +-- .../cassandra-tls-4.0.6/cassandra.yaml | 4 +-- .../cassandra/sink_cluster/node_pool.rs | 2 +- .../cluster/single_rack_v4.rs | 34 ++++++++++++++++--- test-helpers/src/docker_compose.rs | 11 ++++++ 7 files changed, 49 insertions(+), 11 deletions(-) diff --git a/shotover-proxy/example-configs/cassandra-cluster-v4/docker-compose.yaml b/shotover-proxy/example-configs/cassandra-cluster-v4/docker-compose.yaml index 777fd2e19..6e77641d3 100644 --- a/shotover-proxy/example-configs/cassandra-cluster-v4/docker-compose.yaml +++ b/shotover-proxy/example-configs/cassandra-cluster-v4/docker-compose.yaml @@ -27,8 +27,9 @@ services: MIN_HEAP_SIZE: "400M" HEAP_NEWSIZE: "48M" volumes: + # Using volume instead of tmpfs adds 3 seconds to the runtime of the cassandra standard_test_suite but allows running tests that restart nodes &volumes - - type: tmpfs + - type: volume target: /var/lib/cassandra command: &command cassandra -f -Dcassandra.initial_token="$$CASSANDRA_INITIAL_TOKENS" -Dcassandra.native_transport_port=9044 diff --git a/shotover-proxy/example-configs/docker-images/cassandra-3.11.13/cassandra.yaml b/shotover-proxy/example-configs/docker-images/cassandra-3.11.13/cassandra.yaml index c7b4dc76b..3a138616c 100644 --- a/shotover-proxy/example-configs/docker-images/cassandra-3.11.13/cassandra.yaml +++ b/shotover-proxy/example-configs/docker-images/cassandra-3.11.13/cassandra.yaml @@ -23,7 +23,7 @@ cluster_name: 'Test Cluster' # # If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations -num_tokens: 256 +num_tokens: 128 # Triggers automatic allocation of num_tokens tokens for this node. The allocation # algorithm attempts to choose tokens in a way that optimizes replicated load over diff --git a/shotover-proxy/example-configs/docker-images/cassandra-4.0.6/cassandra.yaml b/shotover-proxy/example-configs/docker-images/cassandra-4.0.6/cassandra.yaml index 43da4be5b..ed736e828 100644 --- a/shotover-proxy/example-configs/docker-images/cassandra-4.0.6/cassandra.yaml +++ b/shotover-proxy/example-configs/docker-images/cassandra-4.0.6/cassandra.yaml @@ -24,7 +24,7 @@ cluster_name: 'Test Cluster' # See https://cassandra.apache.org/doc/latest/getting_started/production.html#tokens for # best practice information about num_tokens. # -#num_tokens: 16 +num_tokens: 128 # Triggers automatic allocation of num_tokens tokens for this node. The allocation # algorithm attempts to choose tokens in a way that optimizes replicated load over @@ -47,7 +47,7 @@ allocate_tokens_for_local_replication_factor: 3 # vnodes (num_tokens > 1, above) -- in which case you should provide a # comma-separated list -- it's primarily used when adding nodes to legacy clusters # that do not have vnodes enabled. -initial_token: 0 +#initial_token: 0 # May either be "true" or "false" to enable globally hinted_handoff_enabled: true diff --git a/shotover-proxy/example-configs/docker-images/cassandra-tls-4.0.6/cassandra.yaml b/shotover-proxy/example-configs/docker-images/cassandra-tls-4.0.6/cassandra.yaml index 02d161b81..51d14fe7b 100644 --- a/shotover-proxy/example-configs/docker-images/cassandra-tls-4.0.6/cassandra.yaml +++ b/shotover-proxy/example-configs/docker-images/cassandra-tls-4.0.6/cassandra.yaml @@ -24,7 +24,7 @@ cluster_name: 'Test Cluster' # See https://cassandra.apache.org/doc/latest/getting_started/production.html#tokens for # best practice information about num_tokens. # -#num_tokens: 16 +num_tokens: 128 # Triggers automatic allocation of num_tokens tokens for this node. The allocation # algorithm attempts to choose tokens in a way that optimizes replicated load over @@ -47,7 +47,7 @@ allocate_tokens_for_local_replication_factor: 3 # vnodes (num_tokens > 1, above) -- in which case you should provide a # comma-separated list -- it's primarily used when adding nodes to legacy clusters # that do not have vnodes enabled. -initial_token: 0 +#initial_token: 0 # May either be "true" or "false" to enable globally hinted_handoff_enabled: true diff --git a/shotover-proxy/src/transforms/cassandra/sink_cluster/node_pool.rs b/shotover-proxy/src/transforms/cassandra/sink_cluster/node_pool.rs index 313656671..cd4ccd16e 100644 --- a/shotover-proxy/src/transforms/cassandra/sink_cluster/node_pool.rs +++ b/shotover-proxy/src/transforms/cassandra/sink_cluster/node_pool.rs @@ -69,7 +69,7 @@ impl NodePool { for node in self.nodes.drain(..) { if let Some(outbound) = node.outbound { for new_node in &mut new_nodes { - if new_node.host_id == node.host_id { + if new_node.host_id == node.host_id && new_node.is_up { new_node.outbound = Some(outbound); break; } diff --git a/shotover-proxy/tests/cassandra_int_tests/cluster/single_rack_v4.rs b/shotover-proxy/tests/cassandra_int_tests/cluster/single_rack_v4.rs index 94eef8741..e7b74d7f0 100644 --- a/shotover-proxy/tests/cassandra_int_tests/cluster/single_rack_v4.rs +++ b/shotover-proxy/tests/cassandra_int_tests/cluster/single_rack_v4.rs @@ -264,6 +264,10 @@ pub async fn test_node_going_down( run_query(&connection_shotover, "CREATE KEYSPACE cluster_single_rack_node_going_down WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 };").await; run_query(&connection_shotover, "CREATE TABLE cluster_single_rack_node_going_down.test_table (pk varchar PRIMARY KEY, col1 int, col2 boolean);").await; + // setup data to read + run_query(&connection_shotover, "INSERT INTO cluster_single_rack_node_going_down.test_table (pk, col1, col2) VALUES ('pk1', 42, true);").await; + run_query(&connection_shotover, "INSERT INTO cluster_single_rack_node_going_down.test_table (pk, col1, col2) VALUES ('pk2', 413, false);").await; + { let event_connection_direct = CassandraConnection::new("172.16.1.2", 9044, CassandraDriver::CdrsTokio).await; @@ -322,15 +326,37 @@ pub async fn test_node_going_down( let new_connection = CassandraConnection::new("127.0.0.1", 9042, driver).await; - // setup data to read - run_query(&new_connection, "INSERT INTO cluster_single_rack_node_going_down.test_table (pk, col1, col2) VALUES ('pk1', 42, true);").await; - run_query(&new_connection, "INSERT INTO cluster_single_rack_node_going_down.test_table (pk, col1, col2) VALUES ('pk2', 413, false);").await; - // test that shotover handles new connections after node goes down test_connection_handles_node_down(&new_connection).await; // test that shotover handles preexisting connections after node goes down test_connection_handles_node_down(&connection_shotover).await; + + compose.start_service("cassandra-two"); + + // The direct connection should allow all events to pass through + let event = timeout(Duration::from_secs(120), event_recv_direct.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!( + event, + ServerEvent::StatusChange(StatusChange { + change_type: StatusChangeType::Up, + addr: "172.16.1.3:9044".parse().unwrap() + }) + ); + // we have already received an event directly from the cassandra instance so its reasonable to + // expect shotover to have processed that event within 10 seconds if it was ever going to + timeout(Duration::from_secs(10), event_recv_shotover.recv()) + .await + .expect_err("CassandraSinkCluster must filter out this event"); + + let new_new_connection = CassandraConnection::new("127.0.0.1", 9042, driver).await; + + test_connection_handles_node_down(&new_new_connection).await; + test_connection_handles_node_down(&new_connection).await; + test_connection_handles_node_down(&connection_shotover).await; } std::mem::drop(connection_shotover); diff --git a/test-helpers/src/docker_compose.rs b/test-helpers/src/docker_compose.rs index dd52601b0..7f29b4226 100644 --- a/test-helpers/src/docker_compose.rs +++ b/test-helpers/src/docker_compose.rs @@ -106,6 +106,17 @@ impl DockerCompose { .unwrap(); } + /// Restarts the container with the provided service name + pub fn start_service(&self, service_name: &str) { + run_command( + "docker-compose", + &["-f", &self.file_path, "start", service_name], + ) + .unwrap(); + + // TODO: call wait_for_containers_to_startup + } + fn wait_for_containers_to_startup(&self) { match self.file_path.as_ref() { "tests/transforms/docker-compose-moto.yaml" => {