-
Notifications
You must be signed in to change notification settings - Fork 98
/
Copy pathlongevity-large-partition-200k_pks-4days.yaml
76 lines (59 loc) · 7.33 KB
/
longevity-large-partition-200k_pks-4days.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
test_duration: 6480
# Writing 1250 partitions, 200k clustering rows each (1 write command per loader).
prepare_write_cmd: ["scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=250 -partition-offset=1 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=250 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -validate-data" ,
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=250 -partition-offset=251 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=250 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -validate-data",
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=250 -partition-offset=501 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=250 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -validate-data",
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=250 -partition-offset=751 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=250 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -validate-data",
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=250 -partition-offset=1001 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=250 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -validate-data"
]
# Not verifying the entire range, only some partitions: 251-300, 551-600, 701-750, 801-850, 1001-1050
prepare_verify_cmd: ["scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=50 -partition-offset=251 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations 1 -validate-data",
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=50 -partition-offset=551 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations 1 -validate-data",
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=50 -partition-offset=701 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations 1 -validate-data",
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=50 -partition-offset=801 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations 1 -validate-data",
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=50 -partition-offset=1001 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations 1 -validate-data"
]
stress_cmd: [
# Write additional 750 partitions with 200k rows each (on top of what was written in prepare_write_cmd) - test total partitions is 2000
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=750 -partition-offset=1251 -clustering-row-count=200000 -clustering-row-size=uniform:100..8192 -concurrency=10 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations=0 -duration=6420m",
# due to increasing of amount deleted rows/partitions (delete nemeses) we need to insert back the part (from 251 to 750 partitions) of the rows. Otherwise, the table may become empty after few deletions.
# Partition range from 751 to 1250 won't be re-written, remains as tombstones
# Also, the first 250 partitions (as defined in partition_range_with_data_validation) won't be deleted. So it is not needed to be re-written
"scylla-bench -workload=sequential -mode=write -replication-factor=3 -partition-count=500 -partition-offset=251 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=10 -connection-count=10 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations=0 -duration=6420m"
]
stress_read_cmd: [
# Read the first 250 paritions with validate-data flag (Partitions we don't delete during the test)
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=250 -partition-offset=1 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -concurrency=50 -connection-count=100 -consistency-level=quorum -rows-per-request=10 -timeout=90s -iterations=0 -duration=6420m -validate-data",
# Read partitions 251-1250 (1000 partitions)
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=1000 -partition-offset=251 -clustering-row-count=200000 -clustering-row-size=uniform:512..5120 -rows-per-request=10 -consistency-level=quorum -timeout=90s -concurrency=200 -connection-count=100 -iterations=0 -duration=6420m",
# Read partitions 1251-2000 (750 partitions) that are being written during the test (different row size)
"scylla-bench -workload=sequential -mode=read -replication-factor=3 -partition-count=750 -partition-offset=1251 -clustering-row-count=200000 -clustering-row-size=uniform:100..8192 -rows-per-request=10 -consistency-level=quorum -timeout=90s -concurrency=150 -connection-count=100 -iterations=0 -duration=6420m"
]
post_prepare_cql_cmds: "CREATE MATERIALIZED VIEW scylla_bench.view_test AS SELECT * FROM scylla_bench.test where ck IS NOT NULL AND v is not null PRIMARY KEY (v, pk, ck) with comment = 'TEST VIEW'"
n_db_nodes: 5
n_loaders: 5 # Each loader will have 1 scylla-bench process at every step of the test (prepare, verify, stress & stress_read)
n_monitor_nodes: 1
round_robin: true
instance_type_db: 'i3en.2xlarge'
gce_instance_type_db: 'n2-highmem-16'
gce_instance_type_loader: 'e2-standard-16'
gce_n_local_ssd_disk_db: 16
instance_type_loader: 'c5n.4xlarge'
nemesis_class_name: 'SisyphusMonkey'
nemesis_seed: '016'
nemesis_interval: 30
user_prefix: 'longevity-large-partitions-200k-pks-4d'
space_node_threshold: 644245094
stop_test_on_stress_failure: false
run_fullscan: ['{"mode": "partition", "ks_cf": "scylla_bench.test", "interval": 300, "pk_name":"pk", "rows_count": 200000, "validate_data": true}']
use_preinstalled_scylla: true
# To validate rows in partitions: collect data about partitions and their rows amount
# before and after running nemesis and compare it
# TODO: enable it back once https://github.com/scylladb/qa-tasks/issues/1578 is handled
#data_validation: |
# validate_partitions: true
# table_name: "scylla_bench.test"
# primary_key_column: "pk"
# max_partitions_in_test_table: 1000
# partition_range_with_data_validation: 0-250
# limit_rows_number: 10000