Skip to content

Commit

Permalink
Minor readme changes
Browse files Browse the repository at this point in the history
  • Loading branch information
KH committed Aug 17, 2023
1 parent 97bb124 commit 91fa539
Show file tree
Hide file tree
Showing 3 changed files with 395 additions and 22 deletions.
29 changes: 18 additions & 11 deletions test/deploy/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -157,17 +157,24 @@ $capideploy attach_volumes '*' -prj=sampledeployment.json
./start_cluster.sh sampledeployment.json
# Upload binaries and their configs in one shot. Make sure you have all binaries and test data built before uploading them (see above).
$capideploy upload_files up_daemon_binary,up_daemon_env_config,up_webapi_env_config,up_webapi_binary,up_ui,up_toolbelt_env_config,up_toolbelt_binary,up_capiparquet_binary,up_diff_scripts -prj=sampledeployment.json
# Upload test files in one shot
$capideploy upload_files up_all_cfg,up_lookup_bigtest_in,up_lookup_bigtest_out,up_lookup_quicktest_in,up_lookup_quicktest_out,up_portfolio_bigtest_in,up_portfolio_bigtest_out -prj=sampledeployment.json
# If you want to run tag_and_denormalize_quicktest, py_calc_quicktest, and portfolio_quicktest upload corresponding data files
$capideploy upload_files up_tag_and_denormalize_quicktest_in,up_tag_and_denormalize_quicktest_out,up_py_calc_quicktest_in,up_py_calc_quicktest_out,up_portfolio_quicktest_in,up_portfolio_quicktest_out -prj=sampledeployment.json
# Upload binaries and their configs. Make sure you have all binaries and test data built before uploading them (see above).
$capideploy upload_files up_daemon_binary,up_daemon_env_config -prj=sampledeployment.json;
$capideploy upload_files up_webapi_binary,up_webapi_env_config -prj=sampledeployment.json;
$capideploy upload_files up_ui -prj=sampledeployment.json;
$capideploy upload_files up_toolbelt_binary,up_toolbelt_env_config -prj=sampledeployment.json;
$capideploy upload_files up_capiparquet_binary -prj=sampledeployment.json;
$capideploy upload_files up_diff_scripts -prj=sampledeployment.json;
# Upload test files (pick those that you need)
$capideploy upload_files up_all_cfg -prj=sampledeployment.json;
$capideploy upload_files up_portfolio_bigtest_in,up_portfolio_bigtest_out -prj=sampledeployment.json;
$capideploy upload_files up_lookup_bigtest_in,up_lookup_bigtest_out -prj=sampledeployment.json;
$capideploy upload_files up_lookup_quicktest_in,up_lookup_quicktest_out -prj=sampledeployment.json;
$capideploy upload_files up_tag_and_denormalize_quicktest_in,up_tag_and_denormalize_quicktest_out -prj=sampledeployment.json;
$capideploy upload_files up_py_calc_quicktest_in,up_py_calc_quicktest_out -prj=sampledeployment.json;
$capideploy upload_files up_portfolio_quicktest_in,up_portfolio_quicktest_out -prj=sampledeployment.json;
# Configure all services except Cassandra (which requires extra care), bastion first (it configs NFS)
Expand Down
12 changes: 9 additions & 3 deletions test/deploy/samledeployment.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
// Choose your Openstack provider here. This script supports 002,003,004.
local dep_name = 'sampledeployment002', // Can be any combination of alphanumeric characters. Make it unique.

// x - test bare minimum, 2x - better, 4x - decent test
// x - test bare minimum, 2x - better, 4x - decent test, 16x - that's where it gets interesting
local cassandra_node_flavor = "4x",
// Cassandra cluster size - 4,8,16
local cassandra_total_nodes = 4,
local cassandra_total_nodes = 8,
// If tasks are CPU-intensive (Python calc), make it equal to cassandra_total_nodes, otherwise cassandra_total_nodes/2
local daemon_total_instances = cassandra_total_nodes,
local DEFAULT_DAEMON_THREAD_POOL_SIZE = '8', // Depends on instance/cassandra perf
Expand Down Expand Up @@ -91,14 +91,16 @@
if cassandra_node_flavor == "x" then 'a1-ram2-disk20-perf1'
else if cassandra_node_flavor == "2x" then 'a1-ram2-disk20-perf1'
else if cassandra_node_flavor == "4x" then 'a1-ram2-disk20-perf1'
else if cassandra_node_flavor == "8x" then 'a1-ram2-disk20-perf1'
else if cassandra_node_flavor == "16x" then 'a1-ram2-disk20-perf1'
else "unknown"
else 'unknown',

local instance_flavor_cassandra = // Fast/big everything: CPU, network, disk, RAM. Preferably local disk, preferably bare metal
if dep_name == 'sampledeployment002' then
if cassandra_node_flavor == "x" then 'c5d.xlarge' //'c6asx.xlarge'
else if cassandra_node_flavor == "2x" then 'c5d.2xlarge' //'c6asx.2xlarge'
else if cassandra_node_flavor == "4x" then 'c5d.4xlarge'//'c6asx.4xlarge'
else if cassandra_node_flavor == "4x" then 'c5d.4xlarge' //'m5d.4xlarge'//'c6asx.4xlarge'
else "unknown"
else if dep_name == 'sampledeployment003' then
if cassandra_node_flavor == "x" then 'b2-7'
Expand All @@ -109,6 +111,8 @@
if cassandra_node_flavor == "x" then 'a2-ram4-disk20-perf1' // They don't have perf2 version
else if cassandra_node_flavor == "2x" then 'a4-ram8-disk20-perf2'
else if cassandra_node_flavor == "4x" then 'a8-ram16-disk20-perf2'
else if cassandra_node_flavor == "8x" then 'a16-ram32-disk20-perf1'
else if cassandra_node_flavor == "16x" then 'a32-ram64-disk20-perf2' // They don't have perf1
else "unknown"
else 'unknown',

Expand All @@ -127,6 +131,8 @@
if cassandra_node_flavor == "x" then 'a2-ram4-disk20-perf1'
else if cassandra_node_flavor == "2x" then 'a4-ram8-disk20-perf1'
else if cassandra_node_flavor == "4x" then 'a8-ram16-disk20-perf1' // For cluster16, need to stay within 200 vCpu quota, so no a8-ram16 for daemons
else if cassandra_node_flavor == "8x" then 'a8-ram16-disk20-perf1' // For cluster16, need to stay within 200 vCpu quota, so no a8-ram16 for daemons
else if cassandra_node_flavor == "16x" then 'a16-ram32-disk20-perf1'
else "unknown"
else 'unknown',

Expand Down
Loading

0 comments on commit 91fa539

Please sign in to comment.