diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index d01dfb6..bb01475 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: '1.20' - name: Build run: go build -v ./... @@ -60,7 +60,7 @@ jobs: - name: pkg/eval test coverage threshold env: - TESTCOVERAGE_THRESHOLD: 80.89 + TESTCOVERAGE_THRESHOLD: 80.99 run: | echo "Quality Gate: checking test coverage is above threshold $TESTCOVERAGE_THRESHOLD %..." go test -v ./pkg/eval/... -coverprofile coverage.out -covermode count diff --git a/.vscode/launch.json b/.vscode/launch.json index 95f0fd7..1a45435 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,6 +4,15 @@ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ + { + "name": "Generate portfolio bigtest data", + "type": "go", + "request": "launch", + "mode": "debug", + "cwd":"${workspaceFolder}/test/code/portfolio/bigtest", + "program": "${workspaceFolder}/test/code/portfolio/bigtest/generate_bigtest_data.go", + "args": [""] + }, { "name": "Deploy create security group", "type": "go", diff --git a/doc/testing.md b/doc/testing.md index af05d5d..663b2d6 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -4,7 +4,7 @@ cd into any directory under pkg/ and run ``` -go test - v +go test -v ``` To see test code coverage: diff --git a/pkg/eval/eval_ctx.go b/pkg/eval/eval_ctx.go index 5766aff..51b3473 100644 --- a/pkg/eval/eval_ctx.go +++ b/pkg/eval/eval_ctx.go @@ -448,6 +448,8 @@ func (eCtx *EvalCtx) EvalFunc(callExp *ast.CallExpr, funcName string, args []int switch funcName { case "math.Sqrt": eCtx.Value, err = callMathSqrt(args) + case "math.Round": + eCtx.Value, err = callMathRound(args) case "len": eCtx.Value, err = callLen(args) case "string": @@ -472,6 +474,10 @@ func (eCtx *EvalCtx) EvalFunc(callExp *ast.CallExpr, funcName string, args []int eCtx.Value, err = callTimeUnixMilli(args) case "time.DiffMilli": eCtx.Value, err = callTimeDiffMilli(args) + case "time.Before": + eCtx.Value, err = callTimeBefore(args) + case "time.After": + eCtx.Value, err = callTimeAfter(args) case "time.FixedZone": eCtx.Value, err = callTimeFixedZone(args) case "re.MatchString": diff --git a/pkg/eval/math.go b/pkg/eval/math.go index 2d3e8ec..6f5628c 100644 --- a/pkg/eval/math.go +++ b/pkg/eval/math.go @@ -27,3 +27,15 @@ func callMathSqrt(args []interface{}) (interface{}, error) { return math.Sqrt(argFloat), nil } + +func callMathRound(args []interface{}) (interface{}, error) { + if err := checkArgs("math.Round", 1, len(args)); err != nil { + return nil, err + } + argFloat, err := castToFloat64(args[0]) + if err != nil { + return nil, fmt.Errorf("cannot evaluate math.Round(), invalid args %v: [%s]", args, err.Error()) + } + + return math.Round(argFloat), nil +} diff --git a/pkg/eval/math_test.go b/pkg/eval/math_test.go index f02aa5e..345c298 100644 --- a/pkg/eval/math_test.go +++ b/pkg/eval/math_test.go @@ -12,4 +12,8 @@ func TestMathFunctions(t *testing.T) { assertEvalError(t, `math.Sqrt("aa")`, "cannot evaluate math.Sqrt(), invalid args [aa]: [cannot cast aa(string) to float64, unsuported type]", varValuesMap) assertFloatNan(t, "math.Sqrt(-1)", varValuesMap) assertEvalError(t, "math.Sqrt(123,567)", "cannot evaluate math.Sqrt(), requires 1 args, 2 supplied", varValuesMap) + + assertEqual(t, "math.Round(5.1)", 5.0, varValuesMap) + assertEvalError(t, `math.Round("aa")`, "cannot evaluate math.Round(), invalid args [aa]: [cannot cast aa(string) to float64, unsuported type]", varValuesMap) + assertEvalError(t, "math.Round(5,1)", "cannot evaluate math.Round(), requires 1 args, 2 supplied", varValuesMap) } diff --git a/pkg/eval/time.go b/pkg/eval/time.go index 51a7705..1e54ab8 100644 --- a/pkg/eval/time.go +++ b/pkg/eval/time.go @@ -102,3 +102,29 @@ func callTimeUnixMilli(args []interface{}) (interface{}, error) { return arg0.UnixMilli(), nil } + +func callTimeBefore(args []interface{}) (interface{}, error) { + if err := checkArgs("time.Before", 2, len(args)); err != nil { + return nil, err + } + arg0, ok0 := args[0].(time.Time) + arg1, ok1 := args[1].(time.Time) + if !ok0 || !ok1 { + return nil, fmt.Errorf("cannot evaluate time.Before(), invalid args %v", args) + } + + return arg0.Before(arg1), nil +} + +func callTimeAfter(args []interface{}) (interface{}, error) { + if err := checkArgs("time.After", 2, len(args)); err != nil { + return nil, err + } + arg0, ok0 := args[0].(time.Time) + arg1, ok1 := args[1].(time.Time) + if !ok0 || !ok1 { + return nil, fmt.Errorf("cannot evaluate time.After(), invalid args %v", args) + } + + return arg0.After(arg1), nil +} diff --git a/pkg/eval/time_test.go b/pkg/eval/time_test.go index 13a5c74..19175f6 100644 --- a/pkg/eval/time_test.go +++ b/pkg/eval/time_test.go @@ -11,6 +11,7 @@ import ( func TestTimeFunctions(t *testing.T) { testTime := time.Date(2001, 1, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)) + testTimeUtc := time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) varValuesMap := VarValuesMap{ "t": map[string]interface{}{ "test_time": testTime}} @@ -18,6 +19,7 @@ func TestTimeFunctions(t *testing.T) { assertEvalError(t, `time.Parse("2006-01-02T15:04:05.000-0700","2001-01-01T01:01:01.100-0200","aaa")`, "cannot evaluate time.Parse(), requires 2 args, 3 supplied", varValuesMap) assertEvalError(t, `time.Parse("2006-01-02T15:04:05.000-0700",123)`, "cannot evaluate time.Parse(), invalid args [2006-01-02T15:04:05.000-0700 123]", varValuesMap) assertEvalError(t, `time.Parse("2006-01-02T15:04:05.000-0700","2001-01-01T01:01:01")`, `parsing time "2001-01-01T01:01:01" as "2006-01-02T15:04:05.000-0700": cannot parse "" as ".000"`, varValuesMap) + assertEqual(t, `time.Parse("2006-01-02","2001-01-01")`, testTimeUtc, varValuesMap) assertEqual(t, `time.Format(time.Date(2001, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), "2006-01-02T15:04:05.000-0700")`, testTime.Format("2006-01-02T15:04:05.000-0700"), varValuesMap) @@ -29,6 +31,11 @@ func TestTimeFunctions(t *testing.T) { assertEqual(t, `time.DiffMilli(time.Date(2002, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, int64(31536000000), varValuesMap) assertEqual(t, `time.DiffMilli(time.Date(2000, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, int64(-31622400000), varValuesMap) + assertEqual(t, `time.Before(time.Date(2000, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, true, varValuesMap) + assertEqual(t, `time.After(time.Date(2002, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, true, varValuesMap) + assertEqual(t, `time.Before(time.Date(2002, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, false, varValuesMap) + assertEqual(t, `time.After(time.Date(2000, time.January, 1, 1, 1, 1, 100000000, time.FixedZone("", -7200)), t.test_time)`, false, varValuesMap) + assertEqual(t, `time.Unix(t.test_time)`, testTime.Unix(), varValuesMap) assertEqual(t, `time.UnixMilli(t.test_time)`, testTime.UnixMilli(), varValuesMap) diff --git a/pkg/exe/daemon/docker/Dockerfile b/pkg/exe/daemon/docker/Dockerfile index df4912c..c670784 100644 --- a/pkg/exe/daemon/docker/Dockerfile +++ b/pkg/exe/daemon/docker/Dockerfile @@ -1,10 +1,8 @@ -FROM golang:1.19 +FROM golang:1.20 # These image lacks pip and dateutil RUN apt update -RUN apt install -y python3-venv python3-pip -RUN pip install --upgrade pip -RUN pip install python-dateutil --upgrade +RUN apt install -y python3-full python3-dateutil WORKDIR /usr/src/capillaries diff --git a/pkg/exe/deploy/capideploy.go b/pkg/exe/deploy/capideploy.go index a26221d..c8bb93a 100644 --- a/pkg/exe/deploy/capideploy.go +++ b/pkg/exe/deploy/capideploy.go @@ -258,9 +258,9 @@ func main() { log.Fatalf(err.Error()) } - fmt.Printf("Creating instances, consider clearing known_hosts:\n") + fmt.Printf("Creating instances, consider clearing known_hosts to avoid ssh complaints:\n") for _, i := range instances { - fmt.Printf("ssh-keygen -f ~/.ssh/known_hosts -R %s\n", i.BestIpAddress()) + fmt.Printf("ssh-keygen -f ~/.ssh/known_hosts -R %s;\n", i.BestIpAddress()) } for iNickname, _ := range instances { diff --git a/pkg/exe/webapi/docker/Dockerfile b/pkg/exe/webapi/docker/Dockerfile index 952fa39..90046f1 100644 --- a/pkg/exe/webapi/docker/Dockerfile +++ b/pkg/exe/webapi/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19 +FROM golang:1.20 WORKDIR /usr/src/capillaries diff --git a/pkg/proc/data_util.go b/pkg/proc/data_util.go index 7a3b6fe..9aeb546 100644 --- a/pkg/proc/data_util.go +++ b/pkg/proc/data_util.go @@ -95,6 +95,11 @@ func selectBatchFromDataTablePaged(logger *l.Logger, if err := scanner.Scan(*rs.Rows[rs.RowCount]...); err != nil { return nil, cql.WrapDbErrorWithQuery("cannot scan paged data row", q, err) } + // We assume gocql creates only UTC timestamps, so this is not needed. + // If we ever catch a ts stored in our tables with a non-UTC tz, or gocql returning a non-UTC tz - investigate it. Sanitizing is the last resort and should be avoided. + // if err := rs.SanitizeScannedDatetimesToUtc(rs.RowCount); err != nil { + // return nil, cql.WrapDbErrorWithQuery("cannot sanitize datetimes", q, err) + // } rs.RowCount++ } @@ -152,6 +157,11 @@ func selectBatchPagedAllRowids(logger *l.Logger, if err := scanner.Scan(*rs.Rows[rs.RowCount]...); err != nil { return nil, cql.WrapDbErrorWithQuery("cannot scan all rows data row", q, err) } + // We assume gocql creates only UTC timestamps, so this is not needed + // If we ever catch a ts stored in our tables with a non-UTC tz, or gocql returning a non-UTC tz - investigate it. Sanitizing is the last resort and should be avoided. + // if err := rs.SanitizeScannedDatetimesToUtc(rs.RowCount); err != nil { + // return nil, cql.WrapDbErrorWithQuery("cannot sanitize datetimes", q, err) + // } rs.RowCount++ } if err := scanner.Err(); err != nil { diff --git a/pkg/proc/proc_table_creator.go b/pkg/proc/proc_table_creator.go index 2d29acf..5033cef 100644 --- a/pkg/proc/proc_table_creator.go +++ b/pkg/proc/proc_table_creator.go @@ -485,7 +485,9 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, defer instr.waitForWorkersAndCloseErrorsOut(logger, pCtx) curStartLeftToken := startLeftToken + leftPageIdx := 0 for { + selectLeftBatchByTokenStartTime := time.Now() lastRetrievedLeftToken, err := selectBatchFromTableByToken(logger, pCtx, rsLeft, @@ -497,6 +499,9 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, if err != nil { return bs, err } + + logger.DebugCtx(pCtx, "selectBatchFromTableByToken: leftPageIdx %d, queried tokens from %d to %d in %.3fs, retrieved %d rows", leftPageIdx, curStartLeftToken, endLeftToken, time.Since(selectLeftBatchByTokenStartTime).Seconds(), rsLeft.RowCount) + curStartLeftToken = lastRetrievedLeftToken + 1 if rsLeft.RowCount == 0 { @@ -559,7 +564,9 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, sc.FieldRefs{sc.IdxKeyFieldRef()}) var idxPageState []byte + rightIdxPageIdx := 0 for { + selectIdxBatchStartTime := time.Now() idxPageState, err = selectBatchFromIdxTablePaged(logger, pCtx, rsIdx, @@ -589,6 +596,8 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, rowidsToFind[k] = struct{}{} } + logger.DebugCtx(pCtx, "selectBatchFromIdxTablePaged: leftPageIdx %d, rightIdxPageIdx %d, queried %d keys in %.3fs, retrieved %d rowids", leftPageIdx, rightIdxPageIdx, len(keysToFind), time.Since(selectIdxBatchStartTime).Seconds(), len(rowidsToFind)) + // Select from right table by rowid rsRight := NewRowsetFromFieldRefs( sc.FieldRefs{sc.RowidFieldRef(node.Lookup.TableCreator.Name)}, @@ -596,7 +605,9 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, srcRightFieldRefs) var rightPageState []byte + rightDataPageIdx := 0 for { + selectBatchStartTime := time.Now() rightPageState, err = selectBatchFromDataTablePaged(logger, pCtx, rsRight, @@ -609,6 +620,8 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, return bs, err } + logger.DebugCtx(pCtx, "selectBatchFromDataTablePaged: leftPageIdx %d, rightIdxPageIdx %d, rightDataPageIdx %d, queried %d rowids in %.3fs, retrieved %d rowids", leftPageIdx, rightIdxPageIdx, rightDataPageIdx, len(rowidsToFind), time.Since(selectBatchStartTime).Seconds(), rsRight.RowCount) + if rsRight.RowCount == 0 { break } @@ -711,12 +724,14 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, if rsRight.RowCount < node.Lookup.RightLookupReadBatchSize || len(rightPageState) == 0 { break } - } + rightDataPageIdx++ + } // for each data page if rsIdx.RowCount < node.Lookup.IdxReadBatchSize || len(idxPageState) == 0 { break } - } // for each idx batch + rightIdxPageIdx++ + } // for each idx page if node.Lookup.IsGroup { // Time to write the result of the grouped @@ -860,6 +875,7 @@ func RunCreateTableRelForBatch(envConfig *env.EnvConfig, if rsLeft.RowCount < leftBatchSize { break } + leftPageIdx++ } // for each source table batch // Write leftovers regardless of tableRecordBatchCount == 0 diff --git a/pkg/proc/rowset.go b/pkg/proc/rowset.go index a73630a..e45ee36 100644 --- a/pkg/proc/rowset.go +++ b/pkg/proc/rowset.go @@ -238,3 +238,18 @@ func (rs *Rowset) ExportToVarsWithAlias(rowIdx int, vars *eval.VarValuesMap, use } return nil } + +// Force UTC TZ to each ts returned by gocql +// func (rs *Rowset) SanitizeScannedDatetimesToUtc(rowIdx int) error { +// for valIdx := 0; valIdx < len(rs.Fields); valIdx++ { +// if rs.Fields[valIdx].FieldType == sc.FieldTypeDateTime { +// origVolatile := (*rs.Rows[rowIdx])[valIdx] +// origDt, ok := origVolatile.(time.Time) +// if !ok { +// return fmt.Errorf("invalid type %t(%v), expected datetime", origVolatile, origVolatile) +// } +// (*rs.Rows[rowIdx])[valIdx] = origDt.In(time.UTC) +// } +// } +// return nil +// } diff --git a/pkg/sc/file_creator_def.go b/pkg/sc/file_creator_def.go index 38dcddc..c6001ab 100644 --- a/pkg/sc/file_creator_def.go +++ b/pkg/sc/file_creator_def.go @@ -125,7 +125,7 @@ func (creatorDef *FileCreatorDef) Deserialize(rawWriter json.RawMessage) error { creatorDef.Csv.Separator = "," } } else { - return fmt.Errorf("cannot cannot detect file creator type") + return fmt.Errorf("cannot cannot detect file creator type: parquet dhould have column_name, csv should have header etc") } // Having diff --git a/pkg/sc/file_reader_def.go b/pkg/sc/file_reader_def.go index 7b97f8e..57ac073 100644 --- a/pkg/sc/file_reader_def.go +++ b/pkg/sc/file_reader_def.go @@ -137,7 +137,7 @@ func (frDef *FileReaderDef) Deserialize(rawReader json.RawMessage) error { } if frDef.ReaderFileType == ReaderFileTypeUnknown { - errors = append(errors, "cannot detect file reader type") + errors = append(errors, "cannot detect file reader type: parquet should have col_name, csv should have col_hdr or col_idx etc") } if len(errors) > 0 { diff --git a/pkg/storage/parquet.go b/pkg/storage/parquet.go index e659b3a..de9cd7d 100644 --- a/pkg/storage/parquet.go +++ b/pkg/storage/parquet.go @@ -208,17 +208,20 @@ func ParquetReadDateTime(val interface{}, se *gp_parquet.SchemaElement) (time.Ti if !isParquetDateTime(se) && !isParquetInt96Date(se) && !isParquetInt32Date(se) { return sc.DefaultDateTime(), fmt.Errorf("cannot read parquet datetime, schema %v", se) } + // Important: all time constructor below createdatetime objects with Local TZ. + // This is not good because our time.Format("2006-01-02") will use this TZ and produce a datetime for a local TZ, causing confusion. + // Only UTC times should be used internally. switch typedVal := val.(type) { case int32: if isParquetInt32Date(se) { // It's a number of days from UNIX epoch - return time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC).AddDate(0, 0, int(typedVal)), nil + return time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC).AddDate(0, 0, int(typedVal)).In(time.UTC), nil } else { switch *se.ConvertedType { case gp_parquet.ConvertedType_TIMESTAMP_MILLIS: - return time.UnixMilli(int64(typedVal)), nil + return time.UnixMilli(int64(typedVal)).In(time.UTC), nil case gp_parquet.ConvertedType_TIMESTAMP_MICROS: - return time.UnixMicro(int64(typedVal)), nil + return time.UnixMicro(int64(typedVal)).In(time.UTC), nil default: return sc.DefaultDateTime(), fmt.Errorf("cannot read parquet datetime from int32, unsupported converted type, schema %v", se) } @@ -226,15 +229,15 @@ func ParquetReadDateTime(val interface{}, se *gp_parquet.SchemaElement) (time.Ti case int64: switch *se.ConvertedType { case gp_parquet.ConvertedType_TIMESTAMP_MILLIS: - return time.UnixMilli(typedVal), nil + return time.UnixMilli(typedVal).In(time.UTC), nil case gp_parquet.ConvertedType_TIMESTAMP_MICROS: - return time.UnixMicro(typedVal), nil + return time.UnixMicro(typedVal).In(time.UTC), nil default: return sc.DefaultDateTime(), fmt.Errorf("cannot read parquet datetime from int64, unsupported converted type, schema %v", se) } case [12]byte: // Deprecated parquet int96 timestamp - return gp.Int96ToTime(typedVal), nil + return gp.Int96ToTime(typedVal).In(time.UTC), nil default: return sc.DefaultDateTime(), fmt.Errorf("cannot read parquet datetime from %T, schema %v", se, typedVal) } diff --git a/test/code/lookup/README.md b/test/code/lookup/README.md index f856440..febf60d 100644 --- a/test/code/lookup/README.md +++ b/test/code/lookup/README.md @@ -2,8 +2,16 @@ Created using Ubuntu WSL. Other Linux flavors and MacOS may require edits. -`quicktest` - small number of items, CSV input/output -`bigtest` - large naumber of items. CSV and Parquet input/output +## lookup_quicktest vs lookup_bigtest + +This test comes in two flavors. + +portfolio_quicktest has all data ready, it just has to be copied to /tmp/capi_*, and you can run the test. Root-level [copy_demo_data.sh](../../../copy_demo_data.sh) script does that, among other things. + +lookup_bigtest is a variation of this test that uses: +- large number of orders +- parquet files for input and output +and requires test data to be generated - see [1_create_test.data.sh](./bigtest/1_create_data.sh). ## Workflow @@ -30,7 +38,7 @@ See [integration tests](../../../doc/testing.md#integration-tests) section for g ## Possible edits -Play with number of total line items (see "-items=..." in [1_create_data.sh](quicktest/1_create_data.sh)). +Play with number of total line items (see "-items=..." in [1_create_data.sh](./quicktest/1_create_data.sh)). ## References: diff --git a/test/code/parquet/capiparquet.go b/test/code/parquet/capiparquet.go index 66db142..358b66b 100644 --- a/test/code/parquet/capiparquet.go +++ b/test/code/parquet/capiparquet.go @@ -7,6 +7,8 @@ import ( "log" "os" "reflect" + "regexp" + "sort" "strconv" "strings" @@ -20,14 +22,17 @@ import ( const ( CmdDiff string = "diff" CmdCat string = "cat" + CmdSort string = "sort" ) func usage(flagset *flag.FlagSet) { fmt.Printf("Capillaries parquet tool\nUsage: capiparquet \nCommands:\n") - fmt.Printf(" %s %s %s\n", - CmdDiff, "", "") + fmt.Printf(" %s %s\n %s %s %s %s\n %s %s %s\n", + CmdCat, "", + CmdDiff, "", "", "[optional paramaters]", + CmdSort, "", "") if flagset != nil { - fmt.Printf("\n%s parameters:\n", flagset.Name()) + fmt.Printf("\n%s optional parameters:\n", flagset.Name()) flagset.PrintDefaults() } os.Exit(0) @@ -320,6 +325,156 @@ func cat(path string) error { return nil } +type IndexedRow struct { + Key string + Row map[string]interface{} +} + +func sortFile(path string, idxDef *sc.IdxDef) error { + f, err := os.OpenFile(path, os.O_APPEND|os.O_RDWR, 0644) + if err != nil { + return err + } + defer f.Close() + + reader, err := gp.NewFileReader(f) + if err != nil { + return err + } + schema := reader.GetSchemaDefinition() + + schemaElementMap := map[string]*parquet.SchemaElement{} + fields := make([]string, len(schema.RootColumn.Children)) + for i, column := range schema.RootColumn.Children { + fields[i] = column.SchemaElement.Name + schemaElementMap[column.SchemaElement.Name] = column.SchemaElement + } + + types := make([]sc.TableFieldType, len(fields)) + for fieldIdx, fieldName := range fields { + se, _ := schemaElementMap[fieldName] + types[fieldIdx], err = storage.ParquetGuessCapiType(se) + if err != nil { + return fmt.Errorf("cannot guess column type %s: %s", fieldName, err.Error()) + } + for idxFieldIdx, _ := range idxDef.Components { + if idxDef.Components[idxFieldIdx].FieldName == fieldName { + idxDef.Components[idxFieldIdx].FieldType = types[fieldIdx] + break + } + } + } + + for i, _ := range idxDef.Components { + if idxDef.Components[i].FieldType == sc.FieldTypeUnknown { + return fmt.Errorf("cannot find column %s in the parquet file", idxDef.Components[i].FieldName) + } + } + + indexedRows := make([]IndexedRow, 0) + + rowIdx := 0 + for { + d, err := reader.NextRow() + + if err == io.EOF { + break + } else if err != nil { + return fmt.Errorf("cannot get row %d: %s", rowIdx, err.Error()) + } + + typedData := map[string]interface{}{} + + for colIdx, fieldName := range fields { + se, _ := schemaElementMap[fieldName] + volatile, present := d[fieldName] + if !present { + return fmt.Errorf("cannot handle nil %s, sorry", fieldName) + } + switch types[colIdx] { + case sc.FieldTypeString: + typedVal, err := storage.ParquetReadString(volatile, se) + if err != nil { + return fmt.Errorf("cannot read string row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + + case sc.FieldTypeInt: + typedVal, err := storage.ParquetReadInt(volatile, se) + if err != nil { + return fmt.Errorf("cannot read int row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + + case sc.FieldTypeFloat: + typedVal, err := storage.ParquetReadFloat(volatile, se) + if err != nil { + return fmt.Errorf("cannot read float row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + + case sc.FieldTypeBool: + typedVal, err := storage.ParquetReadBool(volatile, se) + if err != nil { + return fmt.Errorf("cannot read bool row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + + case sc.FieldTypeDateTime: + typedVal, err := storage.ParquetReadDateTime(volatile, se) + if err != nil { + return fmt.Errorf("cannot read DateTime row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + + case sc.FieldTypeDecimal2: + typedVal, err := storage.ParquetReadDecimal2(volatile, se) + if err != nil { + return fmt.Errorf("cannot read decimal2 row %d, column %s: %s", rowIdx, fieldName, err.Error()) + } + typedData[fieldName] = typedVal + default: + return fmt.Errorf("unsupported data type in %s", fieldName) + } + } + + // Warning: it doesn't handle nulls + key, err := sc.BuildKey(typedData, idxDef) + if err != nil { + return fmt.Errorf("cannot build key for row %v: %s", typedData, err.Error()) + } + + indexedRows = append(indexedRows, IndexedRow{key, d}) + } + + sort.Slice(indexedRows, func(i, j int) bool { return indexedRows[i].Key < indexedRows[j].Key }) + + f.Truncate(0) + + parquetWriter, err := storage.NewParquetWriter(f, sc.ParquetCodecGzip) + if err != nil { + return err + } + + for i, column := range fields { + if err := parquetWriter.AddColumn(column, types[i]); err != nil { + return fmt.Errorf("cannot add column %s: %s", column, err.Error()) + } + } + + for _, indexedRow := range indexedRows { + if err := parquetWriter.FileWriter.AddData(indexedRow.Row); err != nil { + return fmt.Errorf("cannot add row %v: %s", indexedRow.Row, err.Error()) + } + } + + if err := parquetWriter.Close(); err != nil { + return fmt.Errorf("cannot complete parquet file: %s", err.Error()) + } + + return nil +} + func main() { //defer profile.Start().Stop() if len(os.Args) <= 1 { @@ -345,7 +500,7 @@ func main() { os.Exit(1) } case CmdCat: - catCmd := flag.NewFlagSet(CmdDiff, flag.ExitOnError) + catCmd := flag.NewFlagSet(CmdCat, flag.ExitOnError) path := "" if len(os.Args) >= 3 { path = os.Args[2] @@ -358,5 +513,41 @@ func main() { log.Fatalf(err.Error()) os.Exit(1) } + case CmdSort: + sortCmd := flag.NewFlagSet(CmdSort, flag.ExitOnError) + path := "" + if len(os.Args) >= 3 { + path = os.Args[2] + } + if err := sortCmd.Parse(os.Args[2:]); err != nil || path == "" { + usage(sortCmd) + } + + sortParamParts := strings.Split(os.Args[3], ",") + idxDef := sc.IdxDef{Uniqueness: sc.IdxNonUnique, Components: make([]sc.IdxComponentDef, len(sortParamParts))} + + reField := regexp.MustCompile(`([a-zA-Z0-9_ \(\)%]+)(\([^)]+\))?`) + for i, sortParamPart := range sortParamParts { + m := reField.FindStringSubmatch(sortParamPart) + if len(m) < 2 || len(m) == 2 && m[2] != "(asc)" && m[2] != "(desc)" { + usage(sortCmd) + } + idxDef.Components[i] = sc.IdxComponentDef{ + FieldName: m[1], + CaseSensitivity: sc.IdxCaseSensitive, + StringLen: sc.DefaultStringComponentLen, + FieldType: sc.FieldTypeUnknown} // We will figure it out later after reading the file + if m[2] == "(desc)" { + idxDef.Components[i].SortOrder = sc.IdxSortDesc + } else { + idxDef.Components[i].SortOrder = sc.IdxSortAsc + } + + } + + if err := sortFile(path, &idxDef); err != nil { + log.Fatalf(err.Error()) + os.Exit(1) + } } } diff --git a/test/code/portfolio/README.md b/test/code/portfolio/README.md index f21771a..4f1304e 100644 --- a/test/code/portfolio/README.md +++ b/test/code/portfolio/README.md @@ -2,6 +2,17 @@ Created using Ubuntu WSL. Other Linux flavors and MacOS may require edits. +## portfolio_quicktest vs portfolio_bigtest + +This test comes in two flavors. + +portfolio_quicktest has all data ready, it just has to be copied to /tmp/capi_*, and you can run the test. Root-level [copy_demo_data.sh](../../../copy_demo_data.sh) script does that, among other things. + +portfolio_bigtest is a variation of this test that uses: +- large number of accounts +- parquet files for input and output +and requires test data to be generated - see [1_create_test.data.sh](./bigtest/1_create_data.sh). + ## Workflow The [DOT diagram](../../../doc/glossary.md#dot-diagrams) generated with diff --git a/test/code/portfolio/bigtest/1_create_data.sh b/test/code/portfolio/bigtest/1_create_data.sh new file mode 100755 index 0000000..1a8c309 --- /dev/null +++ b/test/code/portfolio/bigtest/1_create_data.sh @@ -0,0 +1,51 @@ +cfgDir=/tmp/capi_cfg/portfolio_bigtest +inDir=/tmp/capi_in/portfolio_bigtest +outDir=/tmp/capi_out/portfolio_bigtest + +if [ ! -d $cfgDir ]; then + mkdir -p $cfgDir +else + rm -fR $cfgDir/* +fi + +if [ ! -d $inDir ]; then + mkdir -p $inDir +else + rm -f $inDir/* +fi + +if [ ! -d $outDir ]; then + mkdir -p $outDir +else + rm -f $outDir/* +fi + +if [ ! -d $cfgDir/py ]; then + mkdir -p $cfgDir/py +else + rm -f $cfgDir/py/* +fi + +echo "Copying config files to "$cfgDir +cp ../../../data/cfg/portfolio_bigtest/* $cfgDir/ + +echo "Copying Python files to "$cfgDir/py +cp -r ../../../data/cfg/portfolio_quicktest/py/* $cfgDir/py/ + +echo "Generating data..." +go run ./generate_bigtest_data.go -accounts=1000 + +echo "Sorting out files..." +go run ../../parquet/capiparquet.go sort $outDir/account_period_sector_perf_baseline.parquet 'ARK fund,Period,Sector' +go run ../../parquet/capiparquet.go sort $outDir/account_year_perf_baseline.parquet 'ARK fund,Period' + + +echo "Packing input and ouput files..." + +pushd $inDir +tar -czf $inDir/all.tgz *.parquet +popd + +pushd $outDir +tar -czf $outDir/all.tgz *.parquet +popd \ No newline at end of file diff --git a/test/code/portfolio/bigtest/2_one_run.sh b/test/code/portfolio/bigtest/2_one_run.sh new file mode 100755 index 0000000..ccce279 --- /dev/null +++ b/test/code/portfolio/bigtest/2_one_run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +source ../../common/util.sh + +keyspace="portfolio_bigtest" +scriptFile=/tmp/capi_cfg/portfolio_bigtest/script.json +paramsFile=/tmp/capi_cfg/portfolio_bigtest/script_params.json +outDir=/tmp/capi_out/portfolio_bigtest + +one_daemon_run $keyspace $scriptFile $paramsFile $outDir '1_read_accounts,1_read_txns,1_read_period_holdings' diff --git a/test/code/portfolio/bigtest/3_compare_results.sh b/test/code/portfolio/bigtest/3_compare_results.sh new file mode 100755 index 0000000..df08a00 --- /dev/null +++ b/test/code/portfolio/bigtest/3_compare_results.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +outDir=/tmp/capi_out/portfolio_bigtest +cmdDiff="go run ../../parquet/capiparquet.go" +if ! $cmdDiff diff $outDir/account_year_perf.parquet $outDir/account_year_perf_baseline.parquet || + ! $cmdDiff diff $outDir/account_period_sector_perf.parquet $outDir/account_period_sector_perf_baseline.parquet; then + echo -e "\033[0;31mdiff FAILED\e[0m" + exit 1 +else + echo -e "\033[0;32mdiff OK\e[0m" +fi diff --git a/test/code/portfolio/bigtest/4_clean.sh b/test/code/portfolio/bigtest/4_clean.sh new file mode 100755 index 0000000..18eecbe --- /dev/null +++ b/test/code/portfolio/bigtest/4_clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +outDir=/tmp/capi_out/portfolio_bigtest + +rm -f $outDir/account_period_sector_perf.parquet $outDir/account_year_perf.parquet +pushd ../../../../pkg/exe/toolbelt + go run capitoolbelt.go drop_keyspace -keyspace=portfolio_bigtest +popd \ No newline at end of file diff --git a/test/code/portfolio/bigtest/generate_bigtest_data.go b/test/code/portfolio/bigtest/generate_bigtest_data.go new file mode 100755 index 0000000..cc4b5ef --- /dev/null +++ b/test/code/portfolio/bigtest/generate_bigtest_data.go @@ -0,0 +1,539 @@ +package main + +import ( + "bufio" + "encoding/csv" + "flag" + "fmt" + "io" + "log" + "os" + "strconv" + "time" + + "github.com/capillariesio/capillaries/pkg/sc" + "github.com/capillariesio/capillaries/pkg/storage" +) + +func readQuickAccounts(fileQuickAccountsPath string) (map[string]string, []string, error) { + f, err := os.Open(fileQuickAccountsPath) + if err != nil { + return nil, nil, fmt.Errorf("cannot open %s: %s", fileQuickAccountsPath, err.Error()) + } + + m := map[string]string{} // ARKK-> 2020-12-31 + r := csv.NewReader(bufio.NewReader(f)) + isHeader := true + for { + line, err := r.Read() + if err == io.EOF { + break + } + if isHeader { + isHeader = false + continue + } + m[line[0]] = line[1] + } + a := make([]string, len(m)) + i := 0 + for k, _ := range m { + a[i] = k + i++ + } + return m, a, nil +} + +func generateHoldings(fileQuickHoldingsPath string, fileInHoldingsPath string, bigAccountsMap map[string][]string, splitCount int) error { + f, err := os.Open(fileQuickHoldingsPath) + if err != nil { + return fmt.Errorf("cannot open %s: %s", fileQuickHoldingsPath, err.Error()) + } + + fileCounter := 0 + + var fParquet *os.File + var w *storage.ParquetWriter + var newElCounter int + var curFilePath string + + r := csv.NewReader(bufio.NewReader(f)) + isHeader := true + for { + line, err := r.Read() + if err == io.EOF { + break + } + if isHeader { + isHeader = false + continue + } + + accPrefix := line[0] + accIds, ok := bigAccountsMap[accPrefix] + if !ok { + return fmt.Errorf("unknown account prefix '%s' in holdings", accPrefix) + } + for _, accId := range accIds { + d, err := time.Parse("2006-01-02", line[1]) + if err != nil { + return fmt.Errorf("cannot parse datetime '%s' in holdings: %s", line[1], err.Error()) + } + qty, err := strconv.ParseInt(line[3], 10, 32) + if err != nil { + return fmt.Errorf("cannot parse qty '%s' in holdings: %s", line[3], err.Error()) + } + + if fParquet == nil { + curFilePath = fmt.Sprintf("%s_%03d.parquet", fileInHoldingsPath, fileCounter) + fParquet, err = os.Create(curFilePath) + if err != nil { + return fmt.Errorf("cannot create file '%s': %s", curFilePath, err.Error()) + } + + fileCounter++ + newElCounter = 0 + + w, err = storage.NewParquetWriter(fParquet, sc.ParquetCodecGzip) + if err != nil { + return err + } + + if err := w.AddColumn("account_id", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("d", sc.FieldTypeDateTime); err != nil { + return err + } + if err := w.AddColumn("ticker", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("qty", sc.FieldTypeInt); err != nil { + return err + } + } + + if err := w.FileWriter.AddData(map[string]interface{}{ + "account_id": accId, + "d": storage.ParquetWriterMilliTs(d), + "ticker": line[2], + "qty": qty, + }); err != nil { + return fmt.Errorf("cannot write '%s' to holdings: %s", accId, err.Error()) + } + + newElCounter++ + + if newElCounter == splitCount { + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", curFilePath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", curFilePath, err.Error()) + } + + fParquet = nil + w = nil + } + } + } + + if fParquet != nil { + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", curFilePath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", curFilePath, err.Error()) + } + } + + return nil +} + +func generateTxns(fileQuickTxnsPath string, fileInTxnsPath string, bigAccountsMap map[string][]string, splitCount int) error { + f, err := os.Open(fileQuickTxnsPath) + if err != nil { + return fmt.Errorf("cannot open %s: %s", fileQuickTxnsPath, err.Error()) + } + + fileCounter := 0 + + var fParquet *os.File + var w *storage.ParquetWriter + var newElCounter int + var curFilePath string + + r := csv.NewReader(bufio.NewReader(f)) + isHeader := true + for { + line, err := r.Read() + if err == io.EOF { + break + } + if isHeader { + isHeader = false + continue + } + + accPrefix := line[1] + accIds, ok := bigAccountsMap[accPrefix] + if !ok { + return fmt.Errorf("unknown account prefix '%s' in txns", accPrefix) + } + for _, accId := range accIds { + d, err := time.Parse("2006-01-02", line[0]) + if err != nil { + return fmt.Errorf("cannot parse datetime '%s' in txns: %s", line[1], err.Error()) + } + qty, err := strconv.ParseInt(line[3], 10, 32) + if err != nil { + return fmt.Errorf("cannot parse qty '%s' in txns: %s", line[3], err.Error()) + } + price, err := strconv.ParseFloat(line[4], 64) + if err != nil { + return fmt.Errorf("cannot parse price '%s' in txns: %s", line[4], err.Error()) + } + + if fParquet == nil { + curFilePath = fmt.Sprintf("%s_%03d.parquet", fileInTxnsPath, fileCounter) + fParquet, err = os.Create(curFilePath) + if err != nil { + return fmt.Errorf("cannot create file '%s': %s", curFilePath, err.Error()) + } + + fileCounter++ + newElCounter = 0 + + w, err = storage.NewParquetWriter(fParquet, sc.ParquetCodecGzip) + if err != nil { + return err + } + if err := w.AddColumn("ts", sc.FieldTypeDateTime); err != nil { + return err + } + if err := w.AddColumn("account_id", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("ticker", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("qty", sc.FieldTypeInt); err != nil { + return err + } + if err := w.AddColumn("price", sc.FieldTypeFloat); err != nil { + return err + } + } + + if err := w.FileWriter.AddData(map[string]interface{}{ + "ts": storage.ParquetWriterMilliTs(d), + "account_id": accId, + "ticker": line[2], + "qty": qty, + "price": price, + }); err != nil { + return fmt.Errorf("cannot write '%s' to txns: %s", accId, err.Error()) + } + + newElCounter++ + + if newElCounter == splitCount { + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", curFilePath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", curFilePath, err.Error()) + } + + fParquet = nil + w = nil + } + } + } + + if fParquet != nil { + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", fileInTxnsPath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", fileInTxnsPath, err.Error()) + } + } + + return nil +} + +func generateOutTotals(fileQuickAccountYearPath string, fileOutAccountYearPath string, bigAccountsMap map[string][]string) error { + f, err := os.Open(fileQuickAccountYearPath) + if err != nil { + return fmt.Errorf("cannot open %s: %s", fileQuickAccountYearPath, err.Error()) + } + + fParquet, err := os.Create(fileOutAccountYearPath) + if err != nil { + return fmt.Errorf("cannot create file '%s': %s", fileOutAccountYearPath, err.Error()) + } + + w, err := storage.NewParquetWriter(fParquet, sc.ParquetCodecGzip) + if err != nil { + return err + } + if err := w.AddColumn("ARK fund", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Period", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Sector", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Time-weighted annualized return %", sc.FieldTypeFloat); err != nil { + return err + } + + r := csv.NewReader(bufio.NewReader(f)) + isHeader := true + for { + line, err := r.Read() + if err == io.EOF { + break + } + if isHeader { + isHeader = false + continue + } + + accPrefix := line[0] + accIds, ok := bigAccountsMap[accPrefix] + if !ok { + return fmt.Errorf("unknown account prefix '%s' in account_year_perf_baseline", accPrefix) + } + for _, accId := range accIds { + ret, err := strconv.ParseFloat(line[3], 64) + if err != nil { + return fmt.Errorf("cannot parse ret '%s' in account_year_perf_baseline: %s", line[3], err.Error()) + } + + if err := w.FileWriter.AddData(map[string]interface{}{ + "ARK fund": accId, + "Period": line[1], + "Sector": line[2], + "Time-weighted annualized return %": ret, + }); err != nil { + return fmt.Errorf("cannot write '%s' to account_year_perf_baseline: %s", accId, err.Error()) + } + } + } + + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", fileOutAccountYearPath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", fileOutAccountYearPath, err.Error()) + } + + return nil +} + +func generateOutBySector(fileQuickAccountPeriodSectorPath string, fileOutAccountPeriodSectorPath string, bigAccountsMap map[string][]string) error { + f, err := os.Open(fileQuickAccountPeriodSectorPath) + if err != nil { + return fmt.Errorf("cannot open %s: %s", fileQuickAccountPeriodSectorPath, err.Error()) + } + + fParquet, err := os.Create(fileOutAccountPeriodSectorPath) + if err != nil { + return fmt.Errorf("cannot create file '%s': %s", fileOutAccountPeriodSectorPath, err.Error()) + } + + w, err := storage.NewParquetWriter(fParquet, sc.ParquetCodecGzip) + if err != nil { + return err + } + if err := w.AddColumn("ARK fund", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Period", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Sector", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("Time-weighted annualized return %", sc.FieldTypeFloat); err != nil { + return err + } + + r := csv.NewReader(bufio.NewReader(f)) + isHeader := true + for { + line, err := r.Read() + if err == io.EOF { + break + } + if isHeader { + isHeader = false + continue + } + + accPrefix := line[0] + accIds, ok := bigAccountsMap[accPrefix] + if !ok { + return fmt.Errorf("unknown account prefix '%s' in account_period_sector_perf_baseline", accPrefix) + } + for _, accId := range accIds { + ret, err := strconv.ParseFloat(line[3], 64) + if err != nil { + return fmt.Errorf("cannot parse ret '%s' in account_period_sector_perf_baseline: %s", line[3], err.Error()) + } + + if err := w.FileWriter.AddData(map[string]interface{}{ + "ARK fund": accId, + "Period": line[1], + "Sector": line[2], + "Time-weighted annualized return %": ret, + }); err != nil { + return fmt.Errorf("cannot write '%s' to account_period_sector_perf_baseline: %s", accId, err.Error()) + } + } + } + + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", fileOutAccountPeriodSectorPath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", fileOutAccountPeriodSectorPath, err.Error()) + } + + return nil +} + +func generateAccounts(fileInAccountsPath string, quickAccountsMap map[string]string, bigAccountsMap map[string][]string) error { + fParquet, err := os.Create(fileInAccountsPath + ".parquet") + if err != nil { + return fmt.Errorf("cannot create file '%s': %s", fileInAccountsPath, err.Error()) + } + + w, err := storage.NewParquetWriter(fParquet, sc.ParquetCodecGzip) + if err != nil { + return err + } + + if err := w.AddColumn("account_id", sc.FieldTypeString); err != nil { + return err + } + if err := w.AddColumn("earliest_period_start", sc.FieldTypeDateTime); err != nil { + return err + } + + for accPrefix, accIds := range bigAccountsMap { + for _, accId := range accIds { + eps, ok := quickAccountsMap[accPrefix] + if !ok { + return fmt.Errorf("cannot find account prefix '%s' in accounts", accPrefix) + } + d, err := time.Parse("2006-01-02", eps) + if err != nil { + return fmt.Errorf("cannot parse account earliest_period_start '%s': %s", eps, err.Error()) + } + if err := w.FileWriter.AddData(map[string]interface{}{ + "account_id": accId, + "earliest_period_start": storage.ParquetWriterMilliTs(d), + }); err != nil { + return fmt.Errorf("cannot write '%s,%v' to accounts: %s", accId, d, err.Error()) + } + } + } + + if err := w.Close(); err != nil { + return fmt.Errorf("cannot close parquet writer '%s': %s", fileInAccountsPath, err.Error()) + } + + if err := fParquet.Close(); err != nil { + return fmt.Errorf("cannot close file '%s': %s", fileInAccountsPath, err.Error()) + } + + return nil +} + +const SOURCE_TXNS int = 88459 +const SOURCE_HOLDINGS int = 4300 + +func main() { + quicktestIn := flag.String("quicktest_in", "../../../data/in/portfolio_quicktest", "Root dir for in quicktest files to be used as a template") + quicktestOut := flag.String("quicktest_out", "../../../data/out/portfolio_quicktest", "Root dir for out quicktest files to be used as a template") + inRoot := flag.String("in_root", "/tmp/capi_in/portfolio_bigtest", "Root dir for generated in files") + outRoot := flag.String("out_root", "/tmp/capi_out/portfolio_bigtest", "Root dir for generated out files") + totalAccountsSuggested := flag.Int("accounts", 100, "Total number of accounts to generate") + flag.Parse() + + // Template files + fileQuickAccountsPath := *quicktestIn + "/accounts.csv" + fileQuickHoldingsPath := *quicktestIn + "/holdings.csv" + fileQuickTxnsPath := *quicktestIn + "/txns.csv" + fileQuickAccountYearPath := *quicktestOut + "/account_year_perf_baseline.csv" + fileQuickAccountPeriodSectorPath := *quicktestOut + "/account_period_sector_perf_baseline.csv" + + // Files to generate + fileInAccountsPath := *inRoot + "/accounts" + fileInHoldingsPath := *inRoot + "/holdings" + fileInTxnsPath := *inRoot + "/txns" + fileOutAccountYearPath := *outRoot + "/account_year_perf_baseline.parquet" + fileOutAccountPeriodSectorPath := *outRoot + "/account_period_sector_perf_baseline.parquet" + + // Map to new acct ids + + quickAccountsMap, quickAccounts, err := readQuickAccounts(fileQuickAccountsPath) + if err != nil { + log.Fatal(err) + } + + accountsPerOriginalQuick := *totalAccountsSuggested / len(quickAccounts) + totalAccounts := accountsPerOriginalQuick * len(quickAccounts) + + bigAccountsMap := map[string][]string{} // ARKK-> [ARKK-000000,ARKK-000001] + for i := 0; i < totalAccounts; i++ { + accLocalIdx := i / len(quickAccounts) //0,0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,2, + accPrefix := quickAccounts[i%len(quickAccounts)] + if _, ok := bigAccountsMap[accPrefix]; !ok { + bigAccountsMap[accPrefix] = make([]string, accountsPerOriginalQuick) + } + bigAccountsMap[accPrefix][accLocalIdx] = fmt.Sprintf("%s-%06d", accPrefix, i) + } + + // Accounts + + if err := generateAccounts(fileInAccountsPath, quickAccountsMap, bigAccountsMap); err != nil { + log.Fatal(err) + } + + // Holdings + + if err := generateHoldings(fileQuickHoldingsPath, fileInHoldingsPath, bigAccountsMap, SOURCE_HOLDINGS*accountsPerOriginalQuick/10+1); err != nil { + log.Fatal(err) + } + + // Txns + + if err := generateTxns(fileQuickTxnsPath, fileInTxnsPath, bigAccountsMap, SOURCE_TXNS*accountsPerOriginalQuick/100+1); err != nil { + log.Fatal(err) + } + + // Out totals + + if err := generateOutTotals(fileQuickAccountYearPath, fileOutAccountYearPath, bigAccountsMap); err != nil { + log.Fatal(err) + } + + // Out by sector + + if err := generateOutBySector(fileQuickAccountPeriodSectorPath, fileOutAccountPeriodSectorPath, bigAccountsMap); err != nil { + log.Fatal(err) + } + +} diff --git a/test/code/portfolio/test_one_run.sh b/test/code/portfolio/bigtest/test_one_run.sh similarity index 87% rename from test/code/portfolio/test_one_run.sh rename to test/code/portfolio/bigtest/test_one_run.sh index 0c0661b..0376692 100755 --- a/test/code/portfolio/test_one_run.sh +++ b/test/code/portfolio/bigtest/test_one_run.sh @@ -3,7 +3,7 @@ echo "Make sure that pkg/exe/toolbelt has access to Cassandra and RabbitMQ" ./4_clean.sh -./1_create_quicktest_data.sh +./1_create_data.sh ./2_one_run.sh if ! ./3_compare_results.sh; then echo "NOT CLEANED" diff --git a/test/code/portfolio/1_create_quicktest_data.sh b/test/code/portfolio/quicktest/1_create_data.sh similarity index 76% rename from test/code/portfolio/1_create_quicktest_data.sh rename to test/code/portfolio/quicktest/1_create_data.sh index c8cf486..27374aa 100755 --- a/test/code/portfolio/1_create_quicktest_data.sh +++ b/test/code/portfolio/quicktest/1_create_data.sh @@ -21,11 +21,11 @@ else fi echo "Copying config files to "$cfgDir -cp -r ../../data/cfg/portfolio_quicktest/* $cfgDir/ +cp -r ../../../data/cfg/portfolio_quicktest/* $cfgDir/ echo "Copying in files to "$inDir -cp -r ../../data/in/portfolio_quicktest/* $inDir/ +cp -r ../../../data/in/portfolio_quicktest/* $inDir/ echo "Copying out files to "$outDir echo "Placeholder for portfolio_quicktest output files" > $outDir/readme.txt -cp -r ../../data/out/portfolio_quicktest/* $outDir/ +cp -r ../../../data/out/portfolio_quicktest/* $outDir/ diff --git a/test/code/portfolio/2_exec_nodes.sh b/test/code/portfolio/quicktest/2_exec_nodes.sh similarity index 98% rename from test/code/portfolio/2_exec_nodes.sh rename to test/code/portfolio/quicktest/2_exec_nodes.sh index 8ba113d..9f81e2a 100755 --- a/test/code/portfolio/2_exec_nodes.sh +++ b/test/code/portfolio/quicktest/2_exec_nodes.sh @@ -6,7 +6,7 @@ paramsFile=/tmp/capi_cfg/portfolio_quicktest/script_params.json SECONDS=0 -pushd ../../../pkg/exe/toolbelt +pushd ../../../../pkg/exe/toolbelt set -x go run capitoolbelt.go drop_keyspace -keyspace=$keyspace diff --git a/test/code/portfolio/2_one_run.sh b/test/code/portfolio/quicktest/2_one_run.sh similarity index 91% rename from test/code/portfolio/2_one_run.sh rename to test/code/portfolio/quicktest/2_one_run.sh index 8bc71f8..9e663ba 100755 --- a/test/code/portfolio/2_one_run.sh +++ b/test/code/portfolio/quicktest/2_one_run.sh @@ -1,6 +1,6 @@ #!/bin/bash -source ../common/util.sh +source ../../common/util.sh keyspace="portfolio_quicktest" scriptFile=/tmp/capi_cfg/portfolio_quicktest/script.json diff --git a/test/code/portfolio/3_compare_results.sh b/test/code/portfolio/quicktest/3_compare_results.sh similarity index 100% rename from test/code/portfolio/3_compare_results.sh rename to test/code/portfolio/quicktest/3_compare_results.sh diff --git a/test/code/portfolio/4_clean.sh b/test/code/portfolio/quicktest/4_clean.sh similarity index 85% rename from test/code/portfolio/4_clean.sh rename to test/code/portfolio/quicktest/4_clean.sh index 4c74723..b36bde7 100755 --- a/test/code/portfolio/4_clean.sh +++ b/test/code/portfolio/quicktest/4_clean.sh @@ -3,6 +3,6 @@ outDir=/tmp/capi_out/portfolio_quicktest rm -f $outDir/account_period_sector_perf.csv $outDir/account_year_perf.csv -pushd ../../../pkg/exe/toolbelt +pushd ../../../../pkg/exe/toolbelt go run capitoolbelt.go drop_keyspace -keyspace=portfolio_quicktest popd \ No newline at end of file diff --git a/test/code/portfolio/test_exec_nodes.sh b/test/code/portfolio/quicktest/test_exec_nodes.sh similarity index 87% rename from test/code/portfolio/test_exec_nodes.sh rename to test/code/portfolio/quicktest/test_exec_nodes.sh index 0dd9cd9..46676ba 100755 --- a/test/code/portfolio/test_exec_nodes.sh +++ b/test/code/portfolio/quicktest/test_exec_nodes.sh @@ -3,7 +3,7 @@ echo "Make sure that pkg/exe/toolbelt has access to Cassandra" ./4_clean.sh -./1_create_quicktest_data.sh +./1_create_data.sh ./2_exec_nodes.sh if ! ./3_compare_results.sh; then echo "NOT CLEANED" diff --git a/test/code/portfolio/quicktest/test_one_run.sh b/test/code/portfolio/quicktest/test_one_run.sh new file mode 100755 index 0000000..0376692 --- /dev/null +++ b/test/code/portfolio/quicktest/test_one_run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +echo "Make sure that pkg/exe/toolbelt has access to Cassandra and RabbitMQ" + +./4_clean.sh +./1_create_data.sh +./2_one_run.sh +if ! ./3_compare_results.sh; then + echo "NOT CLEANED" + exit 1 +else + ./4_clean.sh +fi diff --git a/test/data/cfg/portfolio_bigtest/script.json b/test/data/cfg/portfolio_bigtest/script.json new file mode 100644 index 0000000..5790d72 --- /dev/null +++ b/test/data/cfg/portfolio_bigtest/script.json @@ -0,0 +1,667 @@ +{ + "nodes": { + "1_read_accounts": { + "type": "file_table", + "desc": "Load accounts from parquet", + "explicit_run_only": true, + "r": { + "urls": [ + "{dir_in}/accounts.parquet" + ], + "columns": { + "col_account_id": { + "parquet": { + "col_name": "account_id" + }, + "col_type": "string" + }, + "col_earliest_period_start": { + "parquet": { + "col_name": "earliest_period_start" + }, + "col_type": "datetime" + } + } + }, + "w": { + "name": "accounts", + "having": "!time.After(w.earliest_period_start, time.Parse(`2006-01-02`,`{period_start_eod}`))", + "fields": { + "account_id": { + "expression": "r.col_account_id", + "type": "string" + }, + "earliest_period_start": { + "expression": "r.col_earliest_period_start", + "type": "datetime" + } + } + } + }, + "1_read_txns": { + "type": "file_table", + "desc": "Load txns from parquet", + "explicit_run_only": true, + "r": { + "urls": [ + "{dir_in}/txns_000.parquet", + "{dir_in}/txns_001.parquet", + "{dir_in}/txns_002.parquet", + "{dir_in}/txns_003.parquet", + "{dir_in}/txns_004.parquet", + "{dir_in}/txns_005.parquet", + "{dir_in}/txns_006.parquet", + "{dir_in}/txns_007.parquet", + "{dir_in}/txns_008.parquet", + "{dir_in}/txns_009.parquet", + "{dir_in}/txns_010.parquet", + "{dir_in}/txns_011.parquet", + "{dir_in}/txns_012.parquet", + "{dir_in}/txns_013.parquet", + "{dir_in}/txns_014.parquet", + "{dir_in}/txns_015.parquet", + "{dir_in}/txns_016.parquet", + "{dir_in}/txns_017.parquet", + "{dir_in}/txns_018.parquet", + "{dir_in}/txns_019.parquet", + "{dir_in}/txns_020.parquet", + "{dir_in}/txns_021.parquet", + "{dir_in}/txns_022.parquet", + "{dir_in}/txns_023.parquet", + "{dir_in}/txns_024.parquet", + "{dir_in}/txns_025.parquet", + "{dir_in}/txns_026.parquet", + "{dir_in}/txns_027.parquet", + "{dir_in}/txns_028.parquet", + "{dir_in}/txns_029.parquet", + "{dir_in}/txns_030.parquet", + "{dir_in}/txns_031.parquet", + "{dir_in}/txns_032.parquet", + "{dir_in}/txns_033.parquet", + "{dir_in}/txns_034.parquet", + "{dir_in}/txns_035.parquet", + "{dir_in}/txns_036.parquet", + "{dir_in}/txns_037.parquet", + "{dir_in}/txns_038.parquet", + "{dir_in}/txns_039.parquet", + "{dir_in}/txns_040.parquet", + "{dir_in}/txns_041.parquet", + "{dir_in}/txns_042.parquet", + "{dir_in}/txns_043.parquet", + "{dir_in}/txns_044.parquet", + "{dir_in}/txns_045.parquet", + "{dir_in}/txns_046.parquet", + "{dir_in}/txns_047.parquet", + "{dir_in}/txns_048.parquet", + "{dir_in}/txns_049.parquet", + "{dir_in}/txns_050.parquet", + "{dir_in}/txns_051.parquet", + "{dir_in}/txns_052.parquet", + "{dir_in}/txns_053.parquet", + "{dir_in}/txns_054.parquet", + "{dir_in}/txns_055.parquet", + "{dir_in}/txns_056.parquet", + "{dir_in}/txns_057.parquet", + "{dir_in}/txns_058.parquet", + "{dir_in}/txns_059.parquet", + "{dir_in}/txns_060.parquet", + "{dir_in}/txns_061.parquet", + "{dir_in}/txns_062.parquet", + "{dir_in}/txns_063.parquet", + "{dir_in}/txns_064.parquet", + "{dir_in}/txns_065.parquet", + "{dir_in}/txns_066.parquet", + "{dir_in}/txns_067.parquet", + "{dir_in}/txns_068.parquet", + "{dir_in}/txns_069.parquet", + "{dir_in}/txns_070.parquet", + "{dir_in}/txns_071.parquet", + "{dir_in}/txns_072.parquet", + "{dir_in}/txns_073.parquet", + "{dir_in}/txns_074.parquet", + "{dir_in}/txns_075.parquet", + "{dir_in}/txns_076.parquet", + "{dir_in}/txns_077.parquet", + "{dir_in}/txns_078.parquet", + "{dir_in}/txns_079.parquet", + "{dir_in}/txns_080.parquet", + "{dir_in}/txns_081.parquet", + "{dir_in}/txns_082.parquet", + "{dir_in}/txns_083.parquet", + "{dir_in}/txns_084.parquet", + "{dir_in}/txns_085.parquet", + "{dir_in}/txns_086.parquet", + "{dir_in}/txns_087.parquet", + "{dir_in}/txns_088.parquet", + "{dir_in}/txns_089.parquet", + "{dir_in}/txns_090.parquet", + "{dir_in}/txns_091.parquet", + "{dir_in}/txns_092.parquet", + "{dir_in}/txns_093.parquet", + "{dir_in}/txns_094.parquet", + "{dir_in}/txns_095.parquet", + "{dir_in}/txns_096.parquet", + "{dir_in}/txns_097.parquet", + "{dir_in}/txns_098.parquet", + "{dir_in}/txns_099.parquet" + ], + "columns": { + "col_ts": { + "parquet": { + "col_name": "ts" + }, + "col_type": "datetime" + }, + "col_account_id": { + "parquet": { + "col_name": "account_id" + }, + "col_type": "string" + }, + "col_ticker": { + "parquet": { + "col_name": "ticker" + }, + "col_type": "string" + }, + "col_qty": { + "parquet": { + "col_name": "qty" + }, + "col_type": "int" + }, + "col_price": { + "parquet": { + "col_name": "price" + }, + "col_type": "float" + } + } + }, + "w": { + "name": "txns", + "having": "time.After(w.ts,time.Parse(`2006-01-02`,`{period_start_eod}`)) && !time.After(w.ts,time.Parse(`2006-01-02`,`{period_end_eod}`))", + "fields": { + "account_id": { + "expression": "r.col_account_id", + "type": "string" + }, + "ts": { + "expression": "r.col_ts", + "type": "datetime" + }, + "txn_json": { + "expression": "strings.ReplaceAll(fmt.Sprintf(`{'ts':'%s','t':'%s','q':%d,'p':%s}`, time.Format(r.col_ts, `2006-01-02`), r.col_ticker, r.col_qty, decimal2(r.col_price)), `'`,`\"`)", + "type": "string" + } + }, + "indexes": { + "idx_txns_account_id": "non_unique(account_id)" + } + } + }, + "2_account_txns_outer": { + "type": "table_lookup_table", + "desc": "For each account, merge all txns into single json string", + "r": { + "table": "accounts", + "expected_batches_total": 100 + }, + "l": { + "index_name": "idx_txns_account_id", + "join_on": "r.account_id", + "group": true, + "join_type": "left" + }, + "w": { + "name": "account_txns", + "fields": { + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "txns_json": { + "expression": "string_agg(l.txn_json,\",\")", + "type": "string" + } + } + } + }, + "1_read_period_holdings": { + "type": "file_table", + "desc": "Load holdings from parquet", + "explicit_run_only": true, + "r": { + "urls": [ + "{dir_in}/holdings_000.parquet", + "{dir_in}/holdings_001.parquet", + "{dir_in}/holdings_002.parquet", + "{dir_in}/holdings_003.parquet", + "{dir_in}/holdings_004.parquet", + "{dir_in}/holdings_005.parquet", + "{dir_in}/holdings_006.parquet", + "{dir_in}/holdings_007.parquet", + "{dir_in}/holdings_008.parquet", + "{dir_in}/holdings_009.parquet" + ], + "columns": { + "col_eod": { + "parquet": { + "col_name": "d" + }, + "col_type": "datetime" + }, + "col_account_id": { + "parquet": { + "col_name": "account_id" + }, + "col_type": "string" + }, + "col_ticker": { + "parquet": { + "col_name": "ticker" + }, + "col_type": "string" + }, + "col_qty": { + "parquet": { + "col_name": "qty" + }, + "col_type": "int" + } + } + }, + "w": { + "name": "period_holdings", + "having": "!time.After(time.Parse(`2006-01-02`,`{period_start_eod}`),w.eod) && !time.After(w.eod,time.Parse(`2006-01-02`,`{period_end_eod}`))", + + "fields": { + "account_id": { + "expression": "r.col_account_id", + "type": "string" + }, + "eod": { + "expression": "r.col_eod", + "type": "datetime" + }, + "holding_json": { + "expression": "fmt.Sprintf(`{\"d\":\"%s\",\"t\":\"%s\",\"q\":%d}`, time.Format(r.col_eod, `2006-01-02`), r.col_ticker, r.col_qty)", + "type": "string" + } + }, + "indexes": { + "idx_period_holdings_account_id": "non_unique(account_id)" + } + } + }, + "2_account_period_holdings_outer": { + "type": "table_lookup_table", + "desc": "For each account, merge all holdings into single json string", + "r": { + "table": "accounts", + "expected_batches_total": 100 + }, + "l": { + "index_name": "idx_period_holdings_account_id", + "join_on": "r.account_id", + "group": true, + "join_type": "left" + }, + "w": { + "name": "account_period_holdings", + "fields": { + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "holdings_json": { + "expression": "string_agg(l.holding_json,\",\")", + "type": "string" + } + }, + "indexes": { + "idx_account_period_holdings_account_id": "unique(account_id)" + } + } + }, + "3_build_account_period_activity": { + "type": "table_lookup_table", + "desc": "For each account, merge holdings and txns", + "r": { + "table": "account_txns", + "expected_batches_total": 100 + }, + "l": { + "index_name": "idx_account_period_holdings_account_id", + "join_on": "r.account_id", + "group": false, + "join_type": "left" + }, + "w": { + "name": "account_period_activity", + "fields": { + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "txns_json": { + "expression": " \"[\" + r.txns_json + \"]\" ", + "type": "string" + }, + "holdings_json": { + "expression": " \"[\" + l.holdings_json + \"]\" ", + "type": "string" + } + } + } + }, + "4_calc_account_period_perf": { + "type": "table_custom_tfm_table", + "custom_proc_type": "py_calc", + "desc": "Apply Python-based calculations to account holdings and txns", + "r": { + "table": "account_period_activity", + "expected_batches_total": 100 + }, + "p": { + "python_code_urls": [ + "{dir_py}/portfolio_test_company_info_provider.py", + "{dir_py}/portfolio_test_eod_price_provider.py", + "{dir_py}/portfolio_calc.py" + ], + "calculated_fields": { + "perf_json": { + "expression": "txns_and_holdings_to_twr_cagr_by_sector_year_quarter_json(\"{period_start_eod}\", \"{period_end_eod}\", r.holdings_json, r.txns_json, PortfolioTestEodPriceProvider, PortfolioTestCompanyInfoProvider)", + "type": "string" + } + } + }, + "w": { + "name": "account_period_perf", + "fields": { + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "perf_json": { + "expression": "p.perf_json", + "type": "string" + } + } + } + }, + "5_tag_by_period": { + "type": "table_custom_tfm_table", + "custom_proc_type": "tag_and_denormalize", + "desc": "Tag accounts by period name", + "r": { + "table": "account_period_perf", + "expected_batches_total": 100 + }, + "p": { + "tag_field_name": "period", + "tag_criteria": { + "2021": "re.MatchString(`\"2021\":`, r.perf_json)", + "2021Q1": "re.MatchString(`\"2021Q1\":`, r.perf_json)", + "2021Q2": "re.MatchString(`\"2021Q2\":`, r.perf_json)", + "2021Q3": "re.MatchString(`\"2021Q3\":`, r.perf_json)", + "2021Q4": "re.MatchString(`\"2021Q4\":`, r.perf_json)", + "2022": "re.MatchString(`\"2022\":`, r.perf_json)", + "2022Q1": "re.MatchString(`\"2022Q1\":`, r.perf_json)", + "2022Q2": "re.MatchString(`\"2022Q2\":`, r.perf_json)", + "2022Q3": "re.MatchString(`\"2022Q3\":`, r.perf_json)", + "2022Q4": "re.MatchString(`\"2022Q4\":`, r.perf_json)" + } + }, + "w": { + "name": "account_period_perf_by_period", + "fields": { + "period": { + "expression": "p.period", + "type": "string" + }, + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "perf_json": { + "expression": "r.perf_json", + "type": "string" + } + } + } + }, + "5_tag_by_sector": { + "type": "table_custom_tfm_table", + "custom_proc_type": "tag_and_denormalize", + "desc": "Tag accounts by sector", + "r": { + "table": "account_period_perf_by_period", + "expected_batches_total": 100 + }, + "p": { + "tag_field_name": "sector", + "tag_criteria": { + "All": "re.MatchString(`\"All\":`, r.perf_json)", + "Communication Services": "re.MatchString(`\"Communication Services\":`, r.perf_json)", + "Consumer Cyclical": "re.MatchString(`\"Consumer Cyclical\":`, r.perf_json)", + "Consumer Defensive": "re.MatchString(`\"Consumer Defensive\":`, r.perf_json)", + "Financial Services": "re.MatchString(`\"Financial Services\":`, r.perf_json)", + "Healthcare": "re.MatchString(`\"Healthcare\":`, r.perf_json)", + "Industrials": "re.MatchString(`\"Industrials\":`, r.perf_json)", + "Real Estate": "re.MatchString(`\"Real Estate\":`, r.perf_json)", + "Technology": "re.MatchString(`\"Technology\":`, r.perf_json)" + } + }, + "w": { + "name": "account_period_perf_by_period_sector", + "fields": { + "period": { + "expression": "r.period", + "type": "string" + }, + "sector": { + "expression": "p.sector", + "type": "string" + }, + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "perf_json": { + "expression": "r.perf_json", + "type": "string" + } + } + } + }, + "6_perf_json_to_columns": { + "type": "table_custom_tfm_table", + "custom_proc_type": "py_calc", + "desc": "Use Python to read perf json and save stats as columns", + "r": { + "table": "account_period_perf_by_period_sector", + "expected_batches_total": 100 + }, + "p": { + "python_code_urls": [ + "{dir_py}/json_to_columns.py" + ], + "calculated_fields": { + "twr": { + "expression": "json_to_twr(r.perf_json, r.period, r.sector)", + "type": "float" + }, + "cagr": { + "expression": "json_to_cagr(r.perf_json, r.period, r.sector)", + "type": "float" + } + } + }, + "w": { + "name": "account_period_sector_twr_cagr", + "fields": { + "account_id": { + "expression": "r.account_id", + "type": "string" + }, + "period": { + "expression": "r.period", + "type": "string" + }, + "sector": { + "expression": "r.sector", + "type": "string" + }, + "twr": { + "expression": "p.twr", + "type": "float" + }, + "cagr": { + "expression": "p.cagr", + "type": "float" + } + } + } + }, + "7_file_account_period_sector_perf": { + "type": "table_file", + "desc": "Write yearly/quarterly perf results by sector to parquet file", + "r": { + "table": "account_period_sector_twr_cagr" + }, + "w": { + "top": { + "order": "account_id,period,sector" + }, + "url_template": "{dir_out}/account_period_sector_perf.parquet", + "columns": [ + { + "parquet": { + "column_name": "ARK fund" + }, + "name": "account_id", + "expression": "r.account_id", + "type": "string" + }, + { + "parquet": { + "column_name": "Period" + }, + "name": "period", + "expression": "r.period", + "type": "string" + }, + { + "parquet": { + "column_name": "Sector" + }, + "name": "sector", + "expression": "r.sector", + "type": "string" + }, + { + "parquet": { + "column_name": "Time-weighted annualized return %" + }, + "name": "cagr", + "expression": "math.Round(r.cagr*100)/100", + "type": "float" + } + ] + } + }, + "7_file_account_year_perf": { + "type": "table_file", + "desc": "Write yearly perf results for all sectors to parquet file", + "r": { + "table": "account_period_sector_twr_cagr" + }, + "w": { + "top": { + "order": "account_id,period" + }, + "having": "len(w.period) == 4 && w.sector == \"All\"", + "url_template": "{dir_out}/account_year_perf.parquet", + "columns": [ + { + "parquet": { + "column_name": "ARK fund" + }, + "name": "account_id", + "expression": "r.account_id", + "type": "string" + }, + { + "parquet": { + "column_name": "Period" + }, + "name": "period", + "expression": "r.period", + "type": "string" + }, + { + "parquet": { + "column_name": "Sector" + }, + "name": "sector", + "expression": "r.sector", + "type": "string" + }, + { + "parquet": { + "column_name": "Time-weighted annualized return %" + }, + "name": "cagr", + "expression": "math.Round(r.cagr*100)/100", + "type": "float" + } + ] + } + } + }, + "dependency_policies": { + "current_active_first_stopped_nogo": { + "is_default": true, + "event_priority_order": "run_is_current(desc), node_start_ts(desc)", + "rules": [ + { + "cmd": "go", + "expression": "e.run_is_current == true && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchSuccess" + }, + { + "cmd": "wait", + "expression": "e.run_is_current == true && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchNone" + }, + { + "cmd": "wait", + "expression": "e.run_is_current == true && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchStart" + }, + { + "cmd": "nogo", + "expression": "e.run_is_current == true && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchFail" + }, + { + "cmd": "go", + "expression": "e.run_is_current == false && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchSuccess" + }, + { + "cmd": "wait", + "expression": "e.run_is_current == false && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchNone" + }, + { + "cmd": "wait", + "expression": "e.run_is_current == false && e.run_final_status == wfmodel.RunStart && e.node_status == wfmodel.NodeBatchStart" + }, + { + "cmd": "go", + "expression": "e.run_is_current == false && e.run_final_status == wfmodel.RunComplete && e.node_status == wfmodel.NodeBatchSuccess" + }, + { + "cmd": "nogo", + "expression": "e.run_is_current == false && e.run_final_status == wfmodel.RunComplete && e.node_status == wfmodel.NodeBatchFail" + } + ] + } + } +} \ No newline at end of file diff --git a/test/data/cfg/portfolio_bigtest/script_params.json b/test/data/cfg/portfolio_bigtest/script_params.json new file mode 100644 index 0000000..e31bd71 --- /dev/null +++ b/test/data/cfg/portfolio_bigtest/script_params.json @@ -0,0 +1,7 @@ +{ + "period_start_eod": "2020-12-31", + "period_end_eod": "2022-12-31", + "dir_in": "/tmp/capi_in/portfolio_bigtest", + "dir_out": "/tmp/capi_out/portfolio_bigtest", + "dir_py": "/tmp/capi_cfg/portfolio_bigtest/py" +} \ No newline at end of file diff --git a/test/deploy/README.md b/test/deploy/README.md index 19da1cc..ce74ef2 100644 --- a/test/deploy/README.md +++ b/test/deploy/README.md @@ -21,12 +21,12 @@ Capillaries configuration scripts and in/out data are stored on separate volumes ## Deployment project template (`*.jsonnet`) and deployment project (`*.json`) files -Capideploy tool uses deployment project file (see sample `sampledeployment002.json`) to: +Capideploy tool uses deployment project file (see sample `sampledeployment.json`) to: - configure creation of Openstack objects like instances and volumes and track status of those objects locally - push Capillaries data and binaries to created Openstack deployment - clean Openstack deployment -Deployment project files contain description and status of each instance. When there are a lot of instances that perform the same tesk (like Cassandra nodes or instances running Capillaries [Daemon](../../doc/glossary.md#daemon)) which makes them pretty redundant. To avoid creating repetitive configurations manually, use [jsonnet](https://jsonnet.org) templates like `sampledeployment002.jsonnet`. Before deploying, make sure that you have generated a deployment project `*.json` file from the `*.jsonnet` template, and, under normal circumstances, avoid manual changes in your `*.json` file. Tweak `*.jsonnet` file and regenerate `*.json` instead, using jsonnet interpreter of your choice. Feel free to manually tweak `*.json` file if you really think you know what you are doing. +Deployment project files contain description and status of each instance. When there are a lot of instances that perform the same tesk (like Cassandra nodes or instances running Capillaries [Daemon](../../doc/glossary.md#daemon)) which makes them pretty redundant. To avoid creating repetitive configurations manually, use [jsonnet](https://jsonnet.org) templates like `sampledeployment.jsonnet`. Before deploying, make sure that you have generated a deployment project `*.json` file from the `*.jsonnet` template, and, under normal circumstances, avoid manual changes in your `*.json` file. Tweak `*.jsonnet` file and regenerate `*.json` instead, using jsonnet interpreter of your choice. Feel free to manually tweak `*.json` file if you really think you know what you are doing. ## Before deployment @@ -35,9 +35,9 @@ Deployment project files contain description and status of each instance. When t 2. Make sure you have created the key pair for SSH access to the Openstack instances, key pair name stored in `root_key_name` in the project file. Through this document, we will be assuming the key pair is stored in `~/.ssh/` and the private key file this kind of name: `sampledeployment002_rsa`. -3. If you want to use SFTP (instead of or along with NFS) for file sharing make sure all SFTP key files used referenced in deployment project `sampledeployment002.json` are present. +3. If you want to use SFTP (instead of or along with NFS) for file sharing make sure all SFTP key files used referenced in deployment project `sampledeployment.json` are present. -4. Make sure all environment variables storing Capideploy and Openstack settings are set. For non-production environments, you may want to keep them in a separate private file and activate before deploying `source ~/sampledeployment002.rc`: +4. Make sure all environment variables storing Capideploy and Openstack settings are set. For non-production environments, you may want to keep them in a separate private file and activate before deploying `source ~/sampledeployment.rc`: ``` # capideploy settings @@ -52,7 +52,7 @@ export CAPIDEPLOY_RABBITMQ_USER_PASS=... # OpenStack settings -# Example 002 +# Example export OS_AUTH_URL=https://us-central-1.genesishosting.com:5000/v3 export OS_PROJECT_ID=7abdc4... export OS_PROJECT_NAME="myProject" @@ -106,7 +106,7 @@ From now on, this doc assumes `$capideploy` is present and functional. 6. Prepare Capillaries binaries (build/linux/amd64) and data (/tmp/capi_in, /tmp/capi_cfg, /tmp/capi_out): ``` -$capideploy build_artifacts -prj=sampledeployment002.json +$capideploy build_artifacts -prj=sampledeployment.json ``` 7. Keep in mind that running deploy tool with `-verbose` parameter can be useful for troubleshooting. @@ -117,7 +117,7 @@ $capideploy build_artifacts -prj=sampledeployment002.json # Reserve a floating IP address, it will be assigned to the bastion instance # and will be your gateway to all of your instances: -$capideploy create_floating_ip -prj=sampledeployment002.json +$capideploy create_floating_ip -prj=sampledeployment.json # If successful, create_floating_ip command will ask you to: # - update your ~/.ssh/config with a new jumphost (this is by [start_cluster.sh](./start_cluster.sh), see below) @@ -125,57 +125,61 @@ $capideploy create_floating_ip -prj=sampledeployment002.json # Openstack networking and volumes -$capideploy create_security_groups -prj=sampledeployment002.json; -$capideploy create_networking -prj=sampledeployment002.json; -$capideploy create_volumes '*' -prj=sampledeployment002.json; +$capideploy create_security_groups -prj=sampledeployment.json; +$capideploy create_networking -prj=sampledeployment.json; +$capideploy create_volumes '*' -prj=sampledeployment.json; # Create all instances in one shot -$capideploy create_instances '*' -prj=sampledeployment002.json +$capideploy create_instances '*' -prj=sampledeployment.json # Make sure we can actually login to each instance. If an instance is # missing for too long, go to the provider console/logs for details -until $capideploy ping_instances '*' -prj=sampledeployment002.json; do - echo Ping failed, wait... - sleep 10 -done +until $capideploy ping_instances '*' -prj=sampledeployment.json; do echo "Ping failed, wait..."; sleep 10; done # Install all pre-requisite software -$capideploy install_services '*' -prj=sampledeployment002.json +$capideploy install_services '*' -prj=sampledeployment.json # Create sftp user on bastion host if needed and # allow these instances to connect to data via sftp -$capideploy create_instance_users bastion -prj=sampledeployment002.json -$capideploy copy_private_keys 'bastion,daemon*' -prj=sampledeployment002.json +$capideploy create_instance_users bastion -prj=sampledeployment.json +$capideploy copy_private_keys 'bastion,daemon*' -prj=sampledeployment.json # Attach bastion (and Cassandra, if needed) volumes, # make ssh_user (or sftp_user, if you use sftp instead of nfs) owner -$capideploy attach_volumes '*' -prj=sampledeployment002.json +$capideploy attach_volumes '*' -prj=sampledeployment.json # Now it's a good time to start Cassandra cluster in a SEPARATE shell session (that has `CAPIDEPLOY_*` environment variables set, see above). After strating it, and letting it run in parallel, you continue running command in the original shell session. -./start_cluster.sh sampledeployment002.json +./start_cluster.sh sampledeployment.json -# Upload binaries and their configs in one shot. Make sure you have all binaries and test data built before uploading them (see above). +# Upload binaries and their configs. Make sure you have all binaries and test data built before uploading them (see above). -$capideploy upload_files up_daemon_binary,up_daemon_env_config,up_webapi_env_config,up_webapi_binary,up_ui,up_toolbelt_env_config,up_toolbelt_binary,up_capiparquet_binary,up_diff_scripts -prj=sampledeployment002.json +$capideploy upload_files up_daemon_binary,up_daemon_env_config -prj=sampledeployment.json; +$capideploy upload_files up_webapi_binary,up_webapi_env_config -prj=sampledeployment.json; +$capideploy upload_files up_ui -prj=sampledeployment.json; +$capideploy upload_files up_toolbelt_binary,up_toolbelt_env_config -prj=sampledeployment.json; +$capideploy upload_files up_capiparquet_binary -prj=sampledeployment.json; +$capideploy upload_files up_diff_scripts -prj=sampledeployment.json; -# Upload test files in one shot +# Upload test files (pick those that you need) -$capideploy upload_files up_all_cfg,up_lookup_bigtest_in,up_lookup_bigtest_out,up_lookup_quicktest_in,up_lookup_quicktest_out -prj=sampledeployment002.json - -# If you want to run tag_and_denormalize_quicktest, py_calc_quicktest, and portfolio_quicktest upload corresponding data files - -$capideploy upload_files up_tag_and_denormalize_quicktest_in,up_tag_and_denormalize_quicktest_out,up_py_calc_quicktest_in,up_py_calc_quicktest_out,up_portfolio_quicktest_in,up_portfolio_quicktest_out -prj=sampledeployment002.json +$capideploy upload_files up_all_cfg -prj=sampledeployment.json; +$capideploy upload_files up_portfolio_bigtest_in,up_portfolio_bigtest_out -prj=sampledeployment.json; +$capideploy upload_files up_lookup_bigtest_in,up_lookup_bigtest_out -prj=sampledeployment.json; +$capideploy upload_files up_lookup_quicktest_in,up_lookup_quicktest_out -prj=sampledeployment.json; +$capideploy upload_files up_tag_and_denormalize_quicktest_in,up_tag_and_denormalize_quicktest_out -prj=sampledeployment.json; +$capideploy upload_files up_py_calc_quicktest_in,up_py_calc_quicktest_out -prj=sampledeployment.json; +$capideploy upload_files up_portfolio_quicktest_in,up_portfolio_quicktest_out -prj=sampledeployment.json; # Configure all services except Cassandra (which requires extra care), bastion first (it configs NFS) -$capideploy config_services bastion -prj=sampledeployment002.json -$capideploy config_services 'rabbitmq,prometheus,daemon*' -prj=sampledeployment002.json +$capideploy config_services bastion -prj=sampledeployment.json +$capideploy config_services 'rabbitmq,prometheus,daemon*' -prj=sampledeployment.json ``` ## Monitoring test environment @@ -278,6 +282,21 @@ or ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/capitoolbelt start_run -script_file=/mnt/capi_cfg/portfolio_quicktest/script.json -params_file=/mnt/capi_cfg/portfolio_quicktest/script_params.json -keyspace=portfolio_quicktest -start_nodes=1_read_accounts,1_read_txns,1_read_period_holdings' ``` +### portfolio_bigtest + +| Field | Value | +|- | - | +| Keyspace | portfolio_bigtest | +| Script URI | /mnt/capi_cfg/portfolio_bigtest/script.json | +| Script parameters URI | /mnt/capi_cfg/portfolio_bigtest/script_params.json | +| Start nodes | 1_read_accounts,1_read_txns,1_read_period_holdings | + +or + +``` +ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/capitoolbelt start_run -script_file=/mnt/capi_cfg/portfolio_bigtest/script.json -params_file=/mnt/capi_cfg/portfolio_bigtest/script_params.json -keyspace=portfolio_bigtest -start_nodes=1_read_accounts,1_read_txns,1_read_period_holdings' +``` + ## Results # capi_out @@ -285,15 +304,16 @@ ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTIO Download all results from capi_out (may take a while): ``` -$capideploy download_files down_capi_out -prj=sampledeployment002.json +$capideploy download_files down_capi_out -prj=sampledeployment.json ``` Alternatively, verify results against the baseline remotely: ``` -ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_lookup_bigtest_parquet.sh' ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_lookup_quicktest.sh' -ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_portfolio.sh' +ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_lookup_bigtest_parquet.sh' +ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_portfolio_quicktest.sh' +ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_portfolio_bigtest_parquet.sh' ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_py_calc_quicktest.sh' ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTION_IP '~/bin/compare_results_tag_and_denormalize.sh' ``` @@ -303,7 +323,7 @@ ssh -o StrictHostKeyChecking=no -i ~/.ssh/sampledeployment002_rsa ubuntu@$BASTIO Download consolidated Daemon log (may be large, as debug logging is on by default): ``` -$capideploy download_files down_capi_logs -prj=sampledeployment002.json +$capideploy download_files down_capi_logs -prj=sampledeployment.json ``` Alternatively, check it out on bastion: @@ -319,14 +339,14 @@ less /var/log/capidaemon/capidaemon.log # Delete instances. Keep in mind that instances may be in a `deleting` state # even after this command is complete, check your cloud provider console to verify. -$capideploy delete_instances '*' -prj=sampledeployment002.json +$capideploy delete_instances '*' -prj=sampledeployment.json # Delete volumes, networking, security groups and floating ip -$capideploy delete_volumes '*' -prj=sampledeployment002.json; -$capideploy delete_networking -prj=sampledeployment002.json; -$capideploy delete_security_groups -prj=sampledeployment002.json; -$capideploy delete_floating_ip -prj=sampledeployment002.json; +$capideploy delete_volumes '*' -prj=sampledeployment.json; +$capideploy delete_networking -prj=sampledeployment.json; +$capideploy delete_security_groups -prj=sampledeployment.json; +$capideploy delete_floating_ip -prj=sampledeployment.json; ``` ## Q&A diff --git a/test/deploy/diff/compare_results_portfolio_bigtest_parquet.sh b/test/deploy/diff/compare_results_portfolio_bigtest_parquet.sh new file mode 100644 index 0000000..228a65b --- /dev/null +++ b/test/deploy/diff/compare_results_portfolio_bigtest_parquet.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +outDir=/mnt/capi_out/portfolio_bigtest +cmdDiff=~/bin/capiparquet +if ! $cmdDiff diff $outDir/account_year_perf.parquet $outDir/account_year_perf_baseline.parquet || + ! $cmdDiff diff $outDir/account_period_sector_perf.parquet $outDir/account_period_sector_perf_baseline.parquet; then + echo -e "portfolio_bigtest diff \033[0;31mFAILED\e[0m" + exit 1 +else + echo -e "portfolio_bigtest diff \033[0;32mOK\e[0m" +fi diff --git a/test/deploy/diff/compare_results_portfolio.sh b/test/deploy/diff/compare_results_portfolio_quicktest.sh similarity index 100% rename from test/deploy/diff/compare_results_portfolio.sh rename to test/deploy/diff/compare_results_portfolio_quicktest.sh diff --git a/test/deploy/samledeployment002.jsonnet b/test/deploy/samledeployment.jsonnet similarity index 73% rename from test/deploy/samledeployment002.jsonnet rename to test/deploy/samledeployment.jsonnet index 7b39ecd..c5dde4c 100644 --- a/test/deploy/samledeployment002.jsonnet +++ b/test/deploy/samledeployment.jsonnet @@ -1,17 +1,28 @@ { // Variables to play with - local cassandra_node_flavor = "2x", - local cassandra_total_nodes = 16, - local daemon_total_instances = 8, - local DEFAULT_DAEMON_THREAD_POOL_SIZE = '10', - local DEFAULT_DAEMON_DB_WRITERS = '10', + + // Choose your Openstack provider here. This script supports 002,003,004. + local dep_name = 'sampledeployment002', // Can be any combination of alphanumeric characters. Make it unique. + + // x - test bare minimum, 2x - better, 4x - decent test, 16x - that's where it gets interesting + local cassandra_node_flavor = "4x", + // Cassandra cluster size - 4,8,16 + local cassandra_total_nodes = 8, + // If tasks are CPU-intensive (Python calc), make it equal to cassandra_total_nodes, otherwise cassandra_total_nodes/2 + local daemon_total_instances = cassandra_total_nodes, + local DEFAULT_DAEMON_THREAD_POOL_SIZE = '8', // Depends on instance/cassandra perf + local DEFAULT_DAEMON_DB_WRITERS = '8', // Depends on instance/cassandra perf // Basics - local deployment_name = 'sampledeployment002', // Can be any combination of alphanumeric characters. Make it unique. - local default_root_key_name = deployment_name + '-root-key', // This should match the name of the keypair you already created in Openstack + local default_root_key_name = dep_name + '-root-key', // This should match the name of the keypair you already created in Openstack // Network - local external_gateway_network_name = 'ext-net', // This is what external network is called for this cloud provider, yours may be different + local external_gateway_network_name = // This is what external network is called for this cloud provider, yours may be different + if dep_name == 'sampledeployment002' then 'ext-net' + else if dep_name == 'sampledeployment003' then 'Ext-Net' + else if dep_name == 'sampledeployment004' then 'ext-floating1' + else 'unknown', + local subnet_cidr = '10.5.0.0/24', // Your choice local subnet_allocation_pool = 'start=10.5.0.240,end=10.5.0.254', // We use fixed ip addresses in the .0.2-.0.239 range, the rest is potentially available @@ -23,6 +34,7 @@ if daemon_total_instances == 2 then ['10.5.0.101', '10.5.0.102'] else if daemon_total_instances == 4 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104'] else if daemon_total_instances == 8 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104', '10.5.0.105', '10.5.0.106', '10.5.0.107', '10.5.0.108'] + else if daemon_total_instances == 16 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104', '10.5.0.105', '10.5.0.106', '10.5.0.107', '10.5.0.108', '10.5.0.109', '10.5.0.110', '10.5.0.111', '10.5.0.112', '10.5.0.113', '10.5.0.114', '10.5.0.115', '10.5.0.116'] else [], local cassandra_ips = if cassandra_total_nodes == 4 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14'] @@ -39,35 +51,112 @@ local cassandra_seeds = std.format('%s,%s', [cassandra_ips[0], cassandra_ips[1]]), // Used by cassandra nodes local cassandra_hosts = "'[\"" + std.join('","', cassandra_ips) + "\"]'", // Used by daemons "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", - // Instance details - local default_availability_zone = 'us-central-1a', // Specified when volume/instance is created - local instance_image_name = 'ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw', - local instance_flavor_rabbitmq = 't5sd.large', - local instance_flavor_prometheus = 't5sd.large', - local instance_flavor_bastion = - if cassandra_node_flavor == "x" then 'c5sd.large' - else if cassandra_node_flavor == "2x" then 'c5sd.xlarge' - else if cassandra_node_flavor == "4x" then 'c5sd.2xlarge' - else "unknown", - local instance_flavor_cassandra = - if cassandra_node_flavor == "x" then 'c6asx.xlarge' - else if cassandra_node_flavor == "2x" then 'c6asx.2xlarge' - else if cassandra_node_flavor == "4x" then 'c6asx.4xlarge' - else "unknown", - local instance_flavor_daemon = - if cassandra_node_flavor == "x" then 'c5sd.large' - else if cassandra_node_flavor == "2x" then 'c5sd.xlarge' - else if cassandra_node_flavor == "4x" then 'c5sd.2xlarge' - else "unknown", + // Instances + local instance_availability_zone = + if dep_name == 'sampledeployment002' then 'us-central-1a' + else if dep_name == 'sampledeployment003' then 'nova' + else if dep_name == 'sampledeployment004' then 'dc3-a-09' + else 'unknown', + + local instance_image_name = // You may want to revisit it once a year + if dep_name == 'sampledeployment002' then 'ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw' + else if dep_name == 'sampledeployment003' then 'Ubuntu 23.04' + else if dep_name == 'sampledeployment004' then 'Ubuntu 22.04 LTS Jammy Jellyfish' + else 'unknown', + + local instance_flavor_rabbitmq = // Something modest + if dep_name == 'sampledeployment002' then 't5sd.large' + else if dep_name == 'sampledeployment003' then 'b2-7' + else if dep_name == 'sampledeployment004' then 'a1-ram2-disk20-perf1' + else 'unknown', + + local instance_flavor_prometheus = // Something modest + if dep_name == 'sampledeployment002' then 't5sd.large' + else if dep_name == 'sampledeployment003' then 'b2-7' + else if dep_name == 'sampledeployment004' then 'a1-ram2-disk20-perf1' + else 'unknown', + + local instance_flavor_bastion = // Something modest, but capable of serving as NFS server, Webapi, UI + if dep_name == 'sampledeployment002' then + if cassandra_node_flavor == "x" then 'c5sd.large' + else if cassandra_node_flavor == "2x" then 'c5sd.large' + else if cassandra_node_flavor == "4x" then 'c5sd.xlarge' + else "unknown" + else if dep_name == 'sampledeployment003' then + if cassandra_node_flavor == "x" then 'b2-7' + else if cassandra_node_flavor == "2x" then 'unknown' + else if cassandra_node_flavor == "4x" then 'unknown' + else "unknown" + else if dep_name == 'sampledeployment004' then + if cassandra_node_flavor == "x" then 'a1-ram2-disk20-perf1' + else if cassandra_node_flavor == "2x" then 'a1-ram2-disk20-perf1' + else if cassandra_node_flavor == "4x" then 'a1-ram2-disk20-perf1' + else if cassandra_node_flavor == "8x" then 'a1-ram2-disk20-perf1' + else if cassandra_node_flavor == "16x" then 'a1-ram2-disk20-perf1' + else "unknown" + else 'unknown', + + local instance_flavor_cassandra = // Fast/big everything: CPU, network, disk, RAM. Preferably local disk, preferably bare metal + if dep_name == 'sampledeployment002' then + if cassandra_node_flavor == "x" then 'c5d.xlarge' //'c6asx.xlarge' + else if cassandra_node_flavor == "2x" then 'c5d.2xlarge' //'c6asx.2xlarge' + else if cassandra_node_flavor == "4x" then 'c5d.4xlarge' //'m5d.4xlarge'//'c6asx.4xlarge' + else "unknown" + else if dep_name == 'sampledeployment003' then + if cassandra_node_flavor == "x" then 'b2-7' + else if cassandra_node_flavor == "2x" then 'unknown' + else if cassandra_node_flavor == "4x" then 'unknown' + else "unknown" + else if dep_name == 'sampledeployment004' then + if cassandra_node_flavor == "x" then 'a2-ram4-disk20-perf1' // They don't have perf2 version + else if cassandra_node_flavor == "2x" then 'a4-ram8-disk20-perf2' + else if cassandra_node_flavor == "4x" then 'a8-ram16-disk20-perf2' + else if cassandra_node_flavor == "8x" then 'a16-ram32-disk20-perf1' + else if cassandra_node_flavor == "16x" then 'a32-ram64-disk20-perf2' // They don't have perf1 + else "unknown" + else 'unknown', + + local instance_flavor_daemon = // Fast/big CPU, network, RAM. Disk optional. + if dep_name == 'sampledeployment002' then + if cassandra_node_flavor == "x" then 'c6sd.large' + else if cassandra_node_flavor == "2x" then 'c6sd.xlarge' + else if cassandra_node_flavor == "4x" then 'c6sd.2xlarge' + else "unknown" + else if dep_name == 'sampledeployment003' then + if cassandra_node_flavor == "x" then 'b2-7' + else if cassandra_node_flavor == "2x" then 'unknown' + else if cassandra_node_flavor == "4x" then 'unknown' + else "unknown" + else if dep_name == 'sampledeployment004' then + if cassandra_node_flavor == "x" then 'a2-ram4-disk20-perf1' + else if cassandra_node_flavor == "2x" then 'a4-ram8-disk20-perf1' + else if cassandra_node_flavor == "4x" then 'a8-ram16-disk20-perf1' // For cluster16, need to stay within 200 vCpu quota, so no a8-ram16 for daemons + else if cassandra_node_flavor == "8x" then 'a8-ram16-disk20-perf1' // For cluster16, need to stay within 200 vCpu quota, so no a8-ram16 for daemons + else if cassandra_node_flavor == "16x" then 'a16-ram32-disk20-perf1' + else "unknown" + else 'unknown', + + // Volumes + local volume_availability_zone = + if dep_name == 'sampledeployment002' then 'us-central-1a' + else if dep_name == 'sampledeployment003' then 'nova' + else if dep_name == 'sampledeployment004' then 'nova' + else 'unknown', + + local volume_type = // Something modest to store in/out data and cfg + if dep_name == 'sampledeployment002' then 'gp1' + else if dep_name == 'sampledeployment003' then 'classic' + else if dep_name == 'sampledeployment004' then 'CEPH_1_perf1' + else 'unknown', // Artifacts local buildLinuxAmd64Dir = '../../build/linux/amd64', local pkgExeDir = '../../pkg/exe', // Keys - local sftp_config_public_key_path = '~/.ssh/sampledeployment002_sftp.pub', - local sftp_config_private_key_path = '~/.ssh/sampledeployment002_sftp', - local ssh_config_private_key_path = '~/.ssh/sampledeployment002_rsa', + local sftp_config_public_key_path = '~/.ssh/' + dep_name + '_sftp.pub', + local sftp_config_private_key_path = '~/.ssh/' + dep_name + '_sftp', + local ssh_config_private_key_path = '~/.ssh/' + dep_name + '_rsa', // Prometheus versions local prometheus_node_exporter_version = '1.6.0', @@ -131,20 +220,20 @@ }, network: { - name: deployment_name + '_network', + name: dep_name + '_network', subnet: { - name: deployment_name + '_subnet', + name: dep_name + '_subnet', cidr: subnet_cidr, allocation_pool: subnet_allocation_pool, }, router: { - name: deployment_name + '_router', + name: dep_name + '_router', external_gateway_network_name: external_gateway_network_name, }, }, security_groups: { bastion: { - name: deployment_name + '_bastion_security_group', + name: dep_name + '_bastion_security_group', rules: [ { desc: 'SSH', @@ -237,7 +326,7 @@ ], }, internal: { - name: deployment_name + '_internal_security_group', + name: dep_name + '_internal_security_group', rules: [ { desc: 'SSH', @@ -421,6 +510,36 @@ owner: $.ssh_config.user, after: {}, }, + up_portfolio_bigtest_in: { + src: '/tmp/capi_in/portfolio_bigtest/all.tgz', + dst: '/mnt/capi_in/portfolio_bigtest', + dir_permissions: 777, + file_permissions: 666, + owner: $.ssh_config.user, + after: { + env: { + OWNER_USER: $.ssh_config.user, + }, + cmd: [ + 'sh/capiscripts/unpack_portfolio_big_in.sh', + ], + }, + }, + up_portfolio_bigtest_out: { + src: '/tmp/capi_out/portfolio_bigtest/all.tgz', + dst: '/mnt/capi_out/portfolio_bigtest', + dir_permissions: 777, + file_permissions: 666, + owner: $.ssh_config.user, + after: { + env: { + OWNER_USER: $.ssh_config.user, + }, + cmd: [ + 'sh/capiscripts/unpack_portfolio_big_out.sh', + ], + }, + }, up_portfolio_quicktest_in: { src: '/tmp/capi_in/portfolio_quicktest', dst: '/mnt/capi_in/portfolio_quicktest', @@ -534,39 +653,39 @@ local bastion_instance = { bastion: { - host_name: deployment_name + '-bastion', + host_name: dep_name + '-bastion', security_group: 'bastion', root_key_name: default_root_key_name, ip_address: internal_bastion_ip, uses_ssh_config_external_ip_address: true, flavor: instance_flavor_bastion, image: instance_image_name, - availability_zone: default_availability_zone, + availability_zone: instance_availability_zone, volumes: { cfg: { - name: deployment_name + '_cfg', - availability_zone: default_availability_zone, + name: dep_name + '_cfg', + availability_zone: volume_availability_zone, mount_point: '/mnt/capi_cfg', size: 1, - type: 'gp1', + type: volume_type, permissions: 777, owner: $.ssh_config.user, // If SFTP used: "{CAPIDEPLOY_SFTP_USER}" }, 'in': { - name: deployment_name + '_in', - availability_zone: default_availability_zone, + name: dep_name + '_in', + availability_zone: volume_availability_zone, mount_point: '/mnt/capi_in', size: 1, - type: 'gp1', + type: volume_type, permissions: 777, owner: $.ssh_config.user, }, out: { - name: deployment_name + '_out', - availability_zone: default_availability_zone, + name: dep_name + '_out', + availability_zone: volume_availability_zone, mount_point: '/mnt/capi_out', size: 1, - type: 'gp1', + type: volume_type, permissions: 777, owner: $.ssh_config.user, }, @@ -637,6 +756,8 @@ 'up_tag_and_denormalize_quicktest_out', 'up_py_calc_quicktest_in', 'up_py_calc_quicktest_out', + 'up_portfolio_bigtest_in', + 'up_portfolio_bigtest_out', 'up_portfolio_quicktest_in', 'up_portfolio_quicktest_out', 'up_webapi_binary', @@ -654,13 +775,13 @@ local rabbitmq_instance = { rabbitmq: { - host_name: deployment_name + '-rabbitmq', + host_name: dep_name + '-rabbitmq', security_group: 'internal', root_key_name: default_root_key_name, ip_address: rabbitmq_ip, flavor: instance_flavor_rabbitmq, image: instance_image_name, - availability_zone: default_availability_zone, + availability_zone: instance_availability_zone, service: { env: { PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, @@ -692,13 +813,13 @@ local prometheus_instance = { prometheus: { - host_name: deployment_name + '-prometheus', + host_name: dep_name + '-prometheus', security_group: 'internal', root_key_name: default_root_key_name, ip_address: prometheus_ip, flavor: instance_flavor_prometheus, image: instance_image_name, - availability_zone: default_availability_zone, + availability_zone: instance_availability_zone, service: { env: { PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, @@ -734,7 +855,7 @@ ip_address: e.ip_address, flavor: instance_flavor_cassandra, image: instance_image_name, - availability_zone: default_availability_zone, + availability_zone: instance_availability_zone, service: { env: { CASSANDRA_IP: e.ip_address, @@ -764,7 +885,7 @@ } for e in std.mapWithIndex(function(i, v) { nickname: std.format('cass%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, + host_name: dep_name + '-' + self.nickname, token: cassandra_tokens[i], ip_address: v, }, cassandra_ips) @@ -778,7 +899,7 @@ ip_address: e.ip_address, flavor: instance_flavor_daemon, image: instance_image_name, - availability_zone: default_availability_zone, + availability_zone: instance_availability_zone, private_keys: [ { name: '{CAPIDEPLOY_SFTP_USER}', @@ -826,7 +947,7 @@ } for e in std.mapWithIndex(function(i, v) { nickname: std.format('daemon%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, + host_name: dep_name + '-' + self.nickname, ip_address: v, }, daemon_ips) }, diff --git a/test/deploy/samledeployment003.jsonnet b/test/deploy/samledeployment003.jsonnet deleted file mode 100644 index 5b70f17..0000000 --- a/test/deploy/samledeployment003.jsonnet +++ /dev/null @@ -1,835 +0,0 @@ -{ - // Variables to play with - local cassandra_node_flavor = "x", - local cassandra_total_nodes = 4, - local daemon_total_instances = 2, - local DEFAULT_DAEMON_THREAD_POOL_SIZE = '5', - local DEFAULT_DAEMON_DB_WRITERS = '5', - - // Basics - local deployment_name = 'sampledeployment003', // Can be any combination of alphanumeric characters. Make it unique. - local default_root_key_name = deployment_name + '-root-key', // This should match the name of the keypair you already created in Openstack - - // Network - local external_gateway_network_name = 'Ext-Net', // This is what external network is called for this cloud provider, yours may be different - local subnet_cidr = '10.5.0.0/24', // Your choice - local subnet_allocation_pool = 'start=10.5.0.240,end=10.5.0.254', // We use fixed ip addresses in the .0.2-.0.239 range, the rest is potentially available - - // Internal IPs - local internal_bastion_ip = '10.5.0.10', - local prometheus_ip = '10.5.0.4', - local rabbitmq_ip = '10.5.0.5', - local daemon_ips = - if daemon_total_instances == 2 then ['10.5.0.101', '10.5.0.102'] - else if daemon_total_instances == 4 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104'] - else if daemon_total_instances == 8 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104', '10.5.0.105', '10.5.0.106', '10.5.0.107', '10.5.0.108'] - else [], - local cassandra_ips = - if cassandra_total_nodes == 4 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14'] - else if cassandra_total_nodes == 8 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14', '10.5.0.15', '10.5.0.16', '10.5.0.17', '10.5.0.18'] - else if cassandra_total_nodes == 16 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14', '10.5.0.15', '10.5.0.16', '10.5.0.17', '10.5.0.18', '10.5.0.19', '10.5.0.20', '10.5.0.21', '10.5.0.22', '10.5.0.23', '10.5.0.24', '10.5.0.25', '10.5.0.26'] - else [], - - // Cassandra-specific - local cassandra_tokens = // Initial tokens to speedup bootstrapping - if cassandra_total_nodes == 4 then ['-9223372036854775808', '-4611686018427387904', '0', '4611686018427387904'] - else if cassandra_total_nodes == 8 then ['-9223372036854775808', '-6917529027641081856', '-4611686018427387904', '-2305843009213693952', '0', '2305843009213693952', '4611686018427387904', '6917529027641081856'] - else if cassandra_total_nodes == 16 then ['-9223372036854775808','-8070450532247928832','-6917529027641081856','-5764607523034234880','-4611686018427387904','-3458764513820540928','-2305843009213693952','-1152921504606846976','0','1152921504606846976','2305843009213693952','3458764513820540928','4611686018427387904','5764607523034234880','6917529027641081856','8070450532247928832'] - else [], - local cassandra_seeds = std.format('%s,%s', [cassandra_ips[0], cassandra_ips[1]]), // Used by cassandra nodes - local cassandra_hosts = "'[\"" + std.join('","', cassandra_ips) + "\"]'", // Used by daemons "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", - - // Instance details - local default_availability_zone = 'nova', // Specified when volume/instance is created - local instance_image_name = 'Ubuntu 23.04', - local instance_flavor_rabbitmq = 'b2-7', - local instance_flavor_prometheus = 'b2-7', - local instance_flavor_bastion = - if cassandra_node_flavor == "x" then 'b2-7' - else if cassandra_node_flavor == "2x" then '' - else if cassandra_node_flavor == "4x" then '' - else "unknown", - local instance_flavor_cassandra = - if cassandra_node_flavor == "x" then 'b2-7' - else if cassandra_node_flavor == "2x" then '' - else if cassandra_node_flavor == "4x" then '' - else "unknown", - local instance_flavor_daemon = - if cassandra_node_flavor == "x" then 'b2-7' - else if cassandra_node_flavor == "2x" then '' - else if cassandra_node_flavor == "4x" then '' - else "unknown", - - // Artifacts - local buildLinuxAmd64Dir = '../../build/linux/amd64', - local pkgExeDir = '../../pkg/exe', - - // Keys - local sftp_config_public_key_path = '~/.ssh/sampledeployment003_sftp.pub', - local sftp_config_private_key_path = '~/.ssh/sampledeployment003_sftp', - local ssh_config_private_key_path = '~/.ssh/sampledeployment003_rsa', - - // Prometheus versions - local prometheus_node_exporter_version = '1.6.0', - local prometheus_server_version = '2.45.0', - local prometheus_cassandra_exporter_version = '0.9.12', - - // Used by Prometheus "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\'...", - local prometheus_targets = std.format("\\'localhost:9100\\',\\'%s:9100\\',\\'%s:9100\\',", [internal_bastion_ip, rabbitmq_ip]) + - "\\'" + std.join(":9100\\',\\'", cassandra_ips) + ":9100\\'," + - "\\'" + std.join(":9500\\',\\'", cassandra_ips) + ":9500\\'," + // Cassandra exporter - "\\'" + std.join(":9100\\',\\'", daemon_ips) + ":9100\\'", - - // Full list of env variables expected by capideploy working with this project - env_variables_used: [ - // Used in this config - 'CAPIDEPLOY_SSH_USER', - 'CAPIDEPLOY_SSH_PRIVATE_KEY_PASS', - 'CAPIDEPLOY_SFTP_USER', - 'CAPIDEPLOY_RABBITMQ_ADMIN_NAME', - 'CAPIDEPLOY_RABBITMQ_ADMIN_PASS', - 'CAPIDEPLOY_RABBITMQ_USER_NAME', - 'CAPIDEPLOY_RABBITMQ_USER_PASS', - // Used in by Capideploy Openstack calls - 'OS_AUTH_URL', - 'OS_IDENTITY_API_VERSION', - 'OS_INTERFACE', - 'OS_REGION_NAME', - 'OS_PASSWORD', - 'OS_PROJECT_DOMAIN_ID', - 'OS_PROJECT_ID', - 'OS_PROJECT_NAME', - 'OS_USERNAME', - 'OS_USER_DOMAIN_NAME', - ], - ssh_config: { - external_ip_address: '', - port: 22, - user: '{CAPIDEPLOY_SSH_USER}', - private_key_path: ssh_config_private_key_path, - private_key_password: '{CAPIDEPLOY_SSH_PRIVATE_KEY_PASS}', - }, - timeouts: { - openstack_cmd: 60, - openstack_instance_creation: 240, - attach_volume: 60, - }, - - // It's unlikely that you need to change anything below this line - - artifacts: { - env: { - DIR_BUILD_LINUX_AMD64: '../../' + buildLinuxAmd64Dir, - DIR_PKG_EXE: '../../' + pkgExeDir, - DIR_CODE_PARQUET: '../../../code/parquet', - }, - cmd: [ - 'sh/local/build_binaries.sh', - 'sh/local/build_webui.sh', - 'sh/local/prepare_demo_data.sh', - ], - }, - - network: { - name: deployment_name + '_network', - subnet: { - name: deployment_name + '_subnet', - cidr: subnet_cidr, - allocation_pool: subnet_allocation_pool, - }, - router: { - name: deployment_name + '_router', - external_gateway_network_name: external_gateway_network_name, - }, - }, - security_groups: { - bastion: { - name: deployment_name + '_bastion_security_group', - rules: [ - { - desc: 'SSH', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 22, - direction: 'ingress', - }, - { - desc: 'NFS PortMapper TCP', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 111, - direction: 'ingress', - }, - { - desc: 'NFS PortMapper UDP', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 111, - direction: 'ingress', - }, - { - desc: 'NFS Server TCP', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 2049, - direction: 'ingress', - }, - { - desc: 'NFS Server UDP', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 2049, - direction: 'ingress', - }, - { - desc: 'Prometheus UI reverse proxy', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 9090, - direction: 'ingress', - }, - { - desc: 'Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9100, - direction: 'ingress', - }, - { - desc: 'RabbitMQ UI reverse proxy', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 15672, - direction: 'ingress', - }, - { - desc: 'rsyslog receiver', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 514, - direction: 'ingress', - }, - { - desc: 'Capillaries webapi', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 6543, - direction: 'ingress', - }, - { - desc: 'Capillaries UI nginx', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 80, - direction: 'ingress', - }, - ], - }, - internal: { - name: deployment_name + '_internal_security_group', - rules: [ - { - desc: 'SSH', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 22, - direction: 'ingress', - }, - { - desc: 'Prometheus UI internal', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9090, - direction: 'ingress', - }, - { - desc: 'Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9100, - direction: 'ingress', - }, - { - desc: 'Cassandra Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9500, - direction: 'ingress', - }, - { - desc: 'Cassandra JMX', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 7199, - direction: 'ingress', - }, - { - desc: 'Cassandra cluster comm', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 7000, - direction: 'ingress', - }, - { - desc: 'Cassandra API', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9042, - direction: 'ingress', - }, - { - desc: 'RabbitMQ API', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 5672, - direction: 'ingress', - }, - { - desc: 'RabbitMQ UI', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 15672, - direction: 'ingress', - }, - ], - }, - }, - file_groups_up: { - up_all_cfg: { - src: '/tmp/capi_cfg', - dst: '/mnt/capi_cfg', - dir_permissions: 777, - file_permissions: 666, - owner: '{CAPIDEPLOY_SSH_USER}', - after: { - env: { - LOCAL_CFG_LOCATION: '/mnt/capi_cfg', - MOUNT_POINT_CFG: '/mnt/capi_cfg', // If SFTP used: 'sftp://{CAPIDEPLOY_SFTP_USER}@' + internal_bastion_ip + '/mnt/capi_cfg', - MOUNT_POINT_IN: '/mnt/capi_in', - MOUNT_POINT_OUT: '/mnt/capi_out', - }, - cmd: [ - 'sh/capiscripts/adjust_cfg_in_out.sh', - ], - }, - }, - up_capiparquet_binary: { - src: buildLinuxAmd64Dir + '/capiparquet.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capiparquet', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_daemon_binary: { - src: buildLinuxAmd64Dir + '/capidaemon.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capidaemon', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_daemon_env_config: { - src: pkgExeDir + '/daemon/capidaemon.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - up_diff_scripts: { - src: './diff', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 744, - after: {}, - }, - up_lookup_bigtest_in: { - src: '/tmp/capi_in/lookup_bigtest/all.tgz', - dst: '/mnt/capi_in/lookup_bigtest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: { - env: { - OWNER_USER: $.ssh_config.user, - }, - cmd: [ - 'sh/capiscripts/unpack_lookup_big_in.sh', - ], - }, - }, - up_lookup_bigtest_out: { - src: '/tmp/capi_out/lookup_bigtest/all.tgz', - dst: '/mnt/capi_out/lookup_bigtest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: { - env: { - OWNER_USER: $.ssh_config.user, - }, - cmd: [ - 'sh/capiscripts/unpack_lookup_big_out.sh', - ], - }, - }, - up_lookup_quicktest_in: { - src: '/tmp/capi_in/lookup_quicktest', - dst: '/mnt/capi_in/lookup_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_lookup_quicktest_out: { - src: '/tmp/capi_out/lookup_quicktest', - dst: '/mnt/capi_out/lookup_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_portfolio_quicktest_in: { - src: '/tmp/capi_in/portfolio_quicktest', - dst: '/mnt/capi_in/portfolio_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_portfolio_quicktest_out: { - src: '/tmp/capi_out/portfolio_quicktest', - dst: '/mnt/capi_out/portfolio_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_py_calc_quicktest_in: { - src: '/tmp/capi_in/py_calc_quicktest', - dst: '/mnt/capi_in/py_calc_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_py_calc_quicktest_out: { - src: '/tmp/capi_out/py_calc_quicktest', - dst: '/mnt/capi_out/py_calc_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_tag_and_denormalize_quicktest_in: { - src: '/tmp/capi_in/tag_and_denormalize_quicktest', - dst: '/mnt/capi_in/tag_and_denormalize_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_tag_and_denormalize_quicktest_out: { - src: '/tmp/capi_out/tag_and_denormalize_quicktest', - dst: '/mnt/capi_out/tag_and_denormalize_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_toolbelt_binary: { - src: buildLinuxAmd64Dir + '/capitoolbelt.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capitoolbelt', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_toolbelt_env_config: { - src: pkgExeDir + '/toolbelt/capitoolbelt.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - up_ui: { - src: '../../ui/public', - dst: '/home/' + $.ssh_config.user + '/ui', - dir_permissions: 755, - file_permissions: 644, - after: {}, - }, - up_webapi_binary: { - src: buildLinuxAmd64Dir + '/capiwebapi.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capiwebapi', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_webapi_env_config: { - src: pkgExeDir + '/webapi/capiwebapi.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - }, - file_groups_down: { - down_capi_logs: { - src: '/var/log/capidaemon/', - dst: './tmp/capi_logs', - }, - down_capi_out: { - src: '/mnt/capi_out', - dst: './tmp/capi_out', - }, - }, - - // Only alphanumeric characters allowed in instance names! No underscores, no dashes, no dots, no spaces - nada. - - local bastion_instance = { - bastion: { - host_name: deployment_name + '-bastion', - security_group: 'bastion', - root_key_name: default_root_key_name, - ip_address: internal_bastion_ip, - uses_ssh_config_external_ip_address: true, - flavor: instance_flavor_bastion, - image: instance_image_name, - availability_zone: default_availability_zone, - volumes: { - cfg: { - name: deployment_name + '_cfg', - availability_zone: default_availability_zone, - mount_point: '/mnt/capi_cfg', - size: 1, - type: 'classic', - permissions: 777, - owner: $.ssh_config.user, // If SFTP used: "{CAPIDEPLOY_SFTP_USER}" - }, - 'in': { - name: deployment_name + '_in', - availability_zone: default_availability_zone, - mount_point: '/mnt/capi_in', - size: 1, - type: 'classic', - permissions: 777, - owner: $.ssh_config.user, - }, - out: { - name: deployment_name + '_out', - availability_zone: default_availability_zone, - mount_point: '/mnt/capi_out', - size: 1, - type: 'classic', - permissions: 777, - owner: $.ssh_config.user, - }, - }, - users: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - public_key_path: sftp_config_public_key_path, - }, - ], - private_keys: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - private_key_path: sftp_config_private_key_path, - }, - ], - service: { - env: { - AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', - CASSANDRA_HOSTS: cassandra_hosts, - NFS_DIRS: '/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out', - PROMETHEUS_IP: prometheus_ip, - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - RABBITMQ_IP: rabbitmq_ip, - SFTP_USER: '{CAPIDEPLOY_SFTP_USER}', - SSH_USER: $.ssh_config.user, - SUBNET_CIDR: $.network.subnet.cidr, - EXTERNAL_IP_ADDRESS: '{EXTERNAL_IP_ADDRESS}', // internal: capideploy populates it from ssh_config.external_ip_address after loading project file; used by webui and webapi config.sh - WEBAPI_PORT: '6543', - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/common/increase_ssh_connection_limit.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/nfs/install_server.sh', - 'sh/nginx/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/rsyslog/config_capidaemon_log_receiver.sh', - 'sh/logrotate/config_capidaemon_logrotate.sh', - 'sh/toolbelt/config.sh', - 'sh/webapi/config.sh', - 'sh/ui/config.sh', - 'sh/nginx/config_ui.sh', - 'sh/nfs/config_server.sh', - 'sh/nginx/config_prometheus_reverse_proxy.sh', - 'sh/nginx/config_rabbitmq_reverse_proxy.sh', - ], - start: [ - 'sh/webapi/start.sh', - 'sh/nginx/start.sh', - ], - stop: [ - 'sh/webapi/stop.sh', - 'sh/nginx/stop.sh', - ], - }, - }, - applicable_file_groups: [ - 'up_all_cfg', - 'up_lookup_bigtest_in', - 'up_lookup_bigtest_out', - 'up_lookup_quicktest_in', - 'up_lookup_quicktest_out', - 'up_tag_and_denormalize_quicktest_in', - 'up_tag_and_denormalize_quicktest_out', - 'up_py_calc_quicktest_in', - 'up_py_calc_quicktest_out', - 'up_portfolio_quicktest_in', - 'up_portfolio_quicktest_out', - 'up_webapi_binary', - 'up_webapi_env_config', - 'up_toolbelt_binary', - 'up_toolbelt_env_config', - 'up_capiparquet_binary', - 'up_ui', - 'up_diff_scripts', - 'down_capi_out', - 'down_capi_logs', - ], - }, - }, - - local rabbitmq_instance = { - rabbitmq: { - host_name: deployment_name + '-rabbitmq', - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: rabbitmq_ip, - flavor: instance_flavor_rabbitmq, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - RABBITMQ_ADMIN_NAME: '{CAPIDEPLOY_RABBITMQ_ADMIN_NAME}', - RABBITMQ_ADMIN_PASS: '{CAPIDEPLOY_RABBITMQ_ADMIN_PASS}', - RABBITMQ_USER_NAME: '{CAPIDEPLOY_RABBITMQ_USER_NAME}', - RABBITMQ_USER_PASS: '{CAPIDEPLOY_RABBITMQ_USER_PASS}', - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/rabbitmq/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/rabbitmq/config.sh', - ], - start: [ - 'sh/rabbitmq/start.sh', - ], - stop: [ - 'sh/rabbitmq/stop.sh', - ], - }, - }, - }, - }, - - local prometheus_instance = { - prometheus: { - host_name: deployment_name + '-prometheus', - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: prometheus_ip, - flavor: instance_flavor_prometheus, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - PROMETHEUS_TARGETS: prometheus_targets, - PROMETHEUS_VERSION: prometheus_server_version, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_server.sh', - 'sh/prometheus/install_node_exporter.sh', - ], - config: [ - 'sh/prometheus/config_server.sh', - 'sh/prometheus/config_node_exporter.sh', - ], - start: [ - 'sh/prometheus/start_server.sh', - ], - stop: [ - 'sh/prometheus/stop_server.sh', - ], - }, - }, - }, - }, - - local cass_instances = { - [e.nickname]: { - host_name: e.host_name, - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: e.ip_address, - flavor: instance_flavor_cassandra, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - CASSANDRA_IP: e.ip_address, - CASSANDRA_SEEDS: cassandra_seeds, - INITIAL_TOKEN: e.token, - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - PROMETHEUS_CASSANDRA_EXPORTER_VERSION: prometheus_cassandra_exporter_version, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/cassandra/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/cassandra/config.sh', - ], - start: [ - 'sh/cassandra/start.sh', - ], - stop: [ - 'sh/cassandra/stop.sh', - ], - }, - }, - } - for e in std.mapWithIndex(function(i, v) { - nickname: std.format('cass%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, - token: cassandra_tokens[i], - ip_address: v, - }, cassandra_ips) - }, - - local daemon_instances = { - [e.nickname]: { - host_name: e.host_name, - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: e.ip_address, - flavor: instance_flavor_daemon, - image: instance_image_name, - availability_zone: default_availability_zone, - private_keys: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - private_key_path: sftp_config_private_key_path, - }, - ], - service: { - env: { - AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', - CASSANDRA_HOSTS: cassandra_hosts, - DAEMON_THREAD_POOL_SIZE: DEFAULT_DAEMON_THREAD_POOL_SIZE, - DAEMON_DB_WRITERS: DEFAULT_DAEMON_DB_WRITERS, - INTERNAL_BASTION_IP: internal_bastion_ip, - NFS_DIRS: '/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out', - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - SFTP_USER: '{CAPIDEPLOY_SFTP_USER}', - SSH_USER: $.ssh_config.user, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/nfs/install_client.sh', - "sh/daemon/install.sh", - 'sh/prometheus/install_node_exporter.sh', - ], - config: [ - 'sh/nfs/config_client.sh', - 'sh/logrotate/config_capidaemon_logrotate.sh', - 'sh/rsyslog/config_capidaemon_log_sender.sh', - 'sh/prometheus/config_node_exporter.sh', - 'sh/daemon/config.sh', - ], - start: [ - 'sh/daemon/start.sh', - ], - stop: [ - 'sh/daemon/stop.sh', - ], - }, - }, - applicable_file_groups: [ - 'up_daemon_binary', - 'up_daemon_env_config', - ], - } - for e in std.mapWithIndex(function(i, v) { - nickname: std.format('daemon%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, - ip_address: v, - }, daemon_ips) - }, - - instances: bastion_instance + rabbitmq_instance + prometheus_instance + cass_instances + daemon_instances, -} diff --git a/test/deploy/samledeployment004.jsonnet b/test/deploy/samledeployment004.jsonnet deleted file mode 100644 index 287bba4..0000000 --- a/test/deploy/samledeployment004.jsonnet +++ /dev/null @@ -1,835 +0,0 @@ -{ - // Variables to play with - local cassandra_node_flavor = "4x", // x, 2x,4x - local cassandra_total_nodes = 4, // 4,8,16 - local daemon_total_instances = 2, // 2,4,8 - local DEFAULT_DAEMON_THREAD_POOL_SIZE = '10', - local DEFAULT_DAEMON_DB_WRITERS = '10', - - // Basics - local deployment_name = 'sampledeployment004', // Can be any combination of alphanumeric characters. Make it unique. - local default_root_key_name = deployment_name + '-root-key', // This should match the name of the keypair you already created in Openstack - - // Network - local external_gateway_network_name = 'ext-floating1', // This is what external network is called for this cloud provider, yours may be different - local subnet_cidr = '10.5.0.0/24', // Your choice - local subnet_allocation_pool = 'start=10.5.0.240,end=10.5.0.254', // We use fixed ip addresses in the .0.2-.0.239 range, the rest is potentially available - - // Internal IPs - local internal_bastion_ip = '10.5.0.10', - local prometheus_ip = '10.5.0.4', - local rabbitmq_ip = '10.5.0.5', - local daemon_ips = - if daemon_total_instances == 2 then ['10.5.0.101', '10.5.0.102'] - else if daemon_total_instances == 4 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104'] - else if daemon_total_instances == 8 then ['10.5.0.101', '10.5.0.102', '10.5.0.103', '10.5.0.104', '10.5.0.105', '10.5.0.106', '10.5.0.107', '10.5.0.108'] - else [], - local cassandra_ips = - if cassandra_total_nodes == 4 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14'] - else if cassandra_total_nodes == 8 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14', '10.5.0.15', '10.5.0.16', '10.5.0.17', '10.5.0.18'] - else if cassandra_total_nodes == 16 then ['10.5.0.11', '10.5.0.12', '10.5.0.13', '10.5.0.14', '10.5.0.15', '10.5.0.16', '10.5.0.17', '10.5.0.18', '10.5.0.19', '10.5.0.20', '10.5.0.21', '10.5.0.22', '10.5.0.23', '10.5.0.24', '10.5.0.25', '10.5.0.26'] - else [], - - // Cassandra-specific - local cassandra_tokens = // Initial tokens to speedup bootstrapping - if cassandra_total_nodes == 4 then ['-9223372036854775808', '-4611686018427387904', '0', '4611686018427387904'] - else if cassandra_total_nodes == 8 then ['-9223372036854775808', '-6917529027641081856', '-4611686018427387904', '-2305843009213693952', '0', '2305843009213693952', '4611686018427387904', '6917529027641081856'] - else if cassandra_total_nodes == 16 then ['-9223372036854775808','-8070450532247928832','-6917529027641081856','-5764607523034234880','-4611686018427387904','-3458764513820540928','-2305843009213693952','-1152921504606846976','0','1152921504606846976','2305843009213693952','3458764513820540928','4611686018427387904','5764607523034234880','6917529027641081856','8070450532247928832'] - else [], - local cassandra_seeds = std.format('%s,%s', [cassandra_ips[0], cassandra_ips[1]]), // Used by cassandra nodes - local cassandra_hosts = "'[\"" + std.join('","', cassandra_ips) + "\"]'", // Used by daemons "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", - - // Instance details - local default_availability_zone = 'dc3-a-09', // Specified when volume/instance is created - local instance_image_name = 'Ubuntu 22.04 LTS Jammy Jellyfish', - local instance_flavor_rabbitmq = 'a1-ram2-disk20-perf1', - local instance_flavor_prometheus = 'a1-ram2-disk20-perf1', - local instance_flavor_bastion = - if cassandra_node_flavor == "x" then 'a1-ram2-disk20-perf1' - else if cassandra_node_flavor == "2x" then 'a1-ram2-disk20-perf1' - else if cassandra_node_flavor == "4x" then 'a1-ram2-disk20-perf1' - else "unknown", - local instance_flavor_cassandra = - if cassandra_node_flavor == "x" then 'a2-ram4-disk20-perf1' - else if cassandra_node_flavor == "2x" then 'a4-ram8-disk20-perf2' - else if cassandra_node_flavor == "4x" then 'a8-ram16-disk20-perf2' - else "unknown", - local instance_flavor_daemon = - if cassandra_node_flavor == "x" then 'a1-ram2-disk20-perf1' - else if cassandra_node_flavor == "2x" then 'a2-ram4-disk20-perf1' - else if cassandra_node_flavor == "4x" then 'a4-ram8-disk20-perf1' - else "unknown", - - // Artifacts - local buildLinuxAmd64Dir = '../../build/linux/amd64', - local pkgExeDir = '../../pkg/exe', - - // Keys - local sftp_config_public_key_path = '~/.ssh/sampledeployment004_sftp.pub', - local sftp_config_private_key_path = '~/.ssh/sampledeployment004_sftp', - local ssh_config_private_key_path = '~/.ssh/sampledeployment004_rsa', - - // Prometheus versions - local prometheus_node_exporter_version = '1.6.0', - local prometheus_server_version = '2.45.0', - local prometheus_cassandra_exporter_version = '0.9.12', - - // Used by Prometheus "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\'...", - local prometheus_targets = std.format("\\'localhost:9100\\',\\'%s:9100\\',\\'%s:9100\\',", [internal_bastion_ip, rabbitmq_ip]) + - "\\'" + std.join(":9100\\',\\'", cassandra_ips) + ":9100\\'," + - "\\'" + std.join(":9500\\',\\'", cassandra_ips) + ":9500\\'," + // Cassandra exporter - "\\'" + std.join(":9100\\',\\'", daemon_ips) + ":9100\\'", - - // Full list of env variables expected by capideploy working with this project - env_variables_used: [ - // Used in this config - 'CAPIDEPLOY_SSH_USER', - 'CAPIDEPLOY_SSH_PRIVATE_KEY_PASS', - 'CAPIDEPLOY_SFTP_USER', - 'CAPIDEPLOY_RABBITMQ_ADMIN_NAME', - 'CAPIDEPLOY_RABBITMQ_ADMIN_PASS', - 'CAPIDEPLOY_RABBITMQ_USER_NAME', - 'CAPIDEPLOY_RABBITMQ_USER_PASS', - // Used in by Capideploy Openstack calls - 'OS_AUTH_URL', - 'OS_IDENTITY_API_VERSION', - 'OS_INTERFACE', - 'OS_REGION_NAME', - 'OS_PASSWORD', - 'OS_PROJECT_DOMAIN_ID', - 'OS_PROJECT_ID', - 'OS_PROJECT_NAME', - 'OS_USERNAME', - 'OS_USER_DOMAIN_NAME', - ], - ssh_config: { - external_ip_address: '', - port: 22, - user: '{CAPIDEPLOY_SSH_USER}', - private_key_path: ssh_config_private_key_path, - private_key_password: '{CAPIDEPLOY_SSH_PRIVATE_KEY_PASS}', - }, - timeouts: { - openstack_cmd: 60, - openstack_instance_creation: 240, - attach_volume: 60, - }, - - // It's unlikely that you need to change anything below this line - - artifacts: { - env: { - DIR_BUILD_LINUX_AMD64: '../../' + buildLinuxAmd64Dir, - DIR_PKG_EXE: '../../' + pkgExeDir, - DIR_CODE_PARQUET: '../../../code/parquet', - }, - cmd: [ - 'sh/local/build_binaries.sh', - 'sh/local/build_webui.sh', - 'sh/local/prepare_demo_data.sh', - ], - }, - - network: { - name: deployment_name + '_network', - subnet: { - name: deployment_name + '_subnet', - cidr: subnet_cidr, - allocation_pool: subnet_allocation_pool, - }, - router: { - name: deployment_name + '_router', - external_gateway_network_name: external_gateway_network_name, - }, - }, - security_groups: { - bastion: { - name: deployment_name + '_bastion_security_group', - rules: [ - { - desc: 'SSH', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 22, - direction: 'ingress', - }, - { - desc: 'NFS PortMapper TCP', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 111, - direction: 'ingress', - }, - { - desc: 'NFS PortMapper UDP', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 111, - direction: 'ingress', - }, - { - desc: 'NFS Server TCP', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 2049, - direction: 'ingress', - }, - { - desc: 'NFS Server UDP', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 2049, - direction: 'ingress', - }, - { - desc: 'Prometheus UI reverse proxy', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 9090, - direction: 'ingress', - }, - { - desc: 'Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9100, - direction: 'ingress', - }, - { - desc: 'RabbitMQ UI reverse proxy', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 15672, - direction: 'ingress', - }, - { - desc: 'rsyslog receiver', - protocol: 'udp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 514, - direction: 'ingress', - }, - { - desc: 'Capillaries webapi', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 6543, - direction: 'ingress', - }, - { - desc: 'Capillaries UI nginx', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: '0.0.0.0/0', - port: 80, - direction: 'ingress', - }, - ], - }, - internal: { - name: deployment_name + '_internal_security_group', - rules: [ - { - desc: 'SSH', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 22, - direction: 'ingress', - }, - { - desc: 'Prometheus UI internal', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9090, - direction: 'ingress', - }, - { - desc: 'Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9100, - direction: 'ingress', - }, - { - desc: 'Cassandra Prometheus node exporter', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9500, - direction: 'ingress', - }, - { - desc: 'Cassandra JMX', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 7199, - direction: 'ingress', - }, - { - desc: 'Cassandra cluster comm', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 7000, - direction: 'ingress', - }, - { - desc: 'Cassandra API', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 9042, - direction: 'ingress', - }, - { - desc: 'RabbitMQ API', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 5672, - direction: 'ingress', - }, - { - desc: 'RabbitMQ UI', - protocol: 'tcp', - ethertype: 'IPv4', - remote_ip: $.network.subnet.cidr, - port: 15672, - direction: 'ingress', - }, - ], - }, - }, - file_groups_up: { - up_all_cfg: { - src: '/tmp/capi_cfg', - dst: '/mnt/capi_cfg', - dir_permissions: 777, - file_permissions: 666, - owner: '{CAPIDEPLOY_SSH_USER}', - after: { - env: { - LOCAL_CFG_LOCATION: '/mnt/capi_cfg', - MOUNT_POINT_CFG: '/mnt/capi_cfg', // If SFTP used: 'sftp://{CAPIDEPLOY_SFTP_USER}@' + internal_bastion_ip + '/mnt/capi_cfg', - MOUNT_POINT_IN: '/mnt/capi_in', - MOUNT_POINT_OUT: '/mnt/capi_out', - }, - cmd: [ - 'sh/capiscripts/adjust_cfg_in_out.sh', - ], - }, - }, - up_capiparquet_binary: { - src: buildLinuxAmd64Dir + '/capiparquet.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capiparquet', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_daemon_binary: { - src: buildLinuxAmd64Dir + '/capidaemon.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capidaemon', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_daemon_env_config: { - src: pkgExeDir + '/daemon/capidaemon.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - up_diff_scripts: { - src: './diff', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 744, - after: {}, - }, - up_lookup_bigtest_in: { - src: '/tmp/capi_in/lookup_bigtest/all.tgz', - dst: '/mnt/capi_in/lookup_bigtest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: { - env: { - OWNER_USER: $.ssh_config.user, - }, - cmd: [ - 'sh/capiscripts/unpack_lookup_big_in.sh', - ], - }, - }, - up_lookup_bigtest_out: { - src: '/tmp/capi_out/lookup_bigtest/all.tgz', - dst: '/mnt/capi_out/lookup_bigtest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: { - env: { - OWNER_USER: $.ssh_config.user, - }, - cmd: [ - 'sh/capiscripts/unpack_lookup_big_out.sh', - ], - }, - }, - up_lookup_quicktest_in: { - src: '/tmp/capi_in/lookup_quicktest', - dst: '/mnt/capi_in/lookup_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_lookup_quicktest_out: { - src: '/tmp/capi_out/lookup_quicktest', - dst: '/mnt/capi_out/lookup_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_portfolio_quicktest_in: { - src: '/tmp/capi_in/portfolio_quicktest', - dst: '/mnt/capi_in/portfolio_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_portfolio_quicktest_out: { - src: '/tmp/capi_out/portfolio_quicktest', - dst: '/mnt/capi_out/portfolio_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_py_calc_quicktest_in: { - src: '/tmp/capi_in/py_calc_quicktest', - dst: '/mnt/capi_in/py_calc_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_py_calc_quicktest_out: { - src: '/tmp/capi_out/py_calc_quicktest', - dst: '/mnt/capi_out/py_calc_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_tag_and_denormalize_quicktest_in: { - src: '/tmp/capi_in/tag_and_denormalize_quicktest', - dst: '/mnt/capi_in/tag_and_denormalize_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_tag_and_denormalize_quicktest_out: { - src: '/tmp/capi_out/tag_and_denormalize_quicktest', - dst: '/mnt/capi_out/tag_and_denormalize_quicktest', - dir_permissions: 777, - file_permissions: 666, - owner: $.ssh_config.user, - after: {}, - }, - up_toolbelt_binary: { - src: buildLinuxAmd64Dir + '/capitoolbelt.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capitoolbelt', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_toolbelt_env_config: { - src: pkgExeDir + '/toolbelt/capitoolbelt.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - up_ui: { - src: '../../ui/public', - dst: '/home/' + $.ssh_config.user + '/ui', - dir_permissions: 755, - file_permissions: 644, - after: {}, - }, - up_webapi_binary: { - src: buildLinuxAmd64Dir + '/capiwebapi.gz', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: { - env: { - CAPI_BINARY: '/home/' + $.ssh_config.user + '/bin/capiwebapi', - }, - cmd: [ - 'sh/capiscripts/unpack_capi_binary.sh', - ], - }, - }, - up_webapi_env_config: { - src: pkgExeDir + '/webapi/capiwebapi.json', - dst: '/home/' + $.ssh_config.user + '/bin', - dir_permissions: 744, - file_permissions: 644, - after: {}, - }, - }, - file_groups_down: { - down_capi_logs: { - src: '/var/log/capidaemon/', - dst: './tmp/capi_logs', - }, - down_capi_out: { - src: '/mnt/capi_out', - dst: './tmp/capi_out', - }, - }, - - // Only alphanumeric characters allowed in instance names! No underscores, no dashes, no dots, no spaces - nada. - - local bastion_instance = { - bastion: { - host_name: deployment_name + '-bastion', - security_group: 'bastion', - root_key_name: default_root_key_name, - ip_address: internal_bastion_ip, - uses_ssh_config_external_ip_address: true, - flavor: instance_flavor_bastion, - image: instance_image_name, - availability_zone: default_availability_zone, - volumes: { - cfg: { - name: deployment_name + '_cfg', - availability_zone: 'nova', - mount_point: '/mnt/capi_cfg', - size: 1, - type: 'CEPH_1_perf1', - permissions: 777, - owner: $.ssh_config.user, // If SFTP used: "{CAPIDEPLOY_SFTP_USER}" - }, - 'in': { - name: deployment_name + '_in', - availability_zone: 'nova', - mount_point: '/mnt/capi_in', - size: 1, - type: 'CEPH_1_perf1', - permissions: 777, - owner: $.ssh_config.user, - }, - out: { - name: deployment_name + '_out', - availability_zone: 'nova', - mount_point: '/mnt/capi_out', - size: 1, - type: 'CEPH_1_perf1', - permissions: 777, - owner: $.ssh_config.user, - }, - }, - users: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - public_key_path: sftp_config_public_key_path, - }, - ], - private_keys: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - private_key_path: sftp_config_private_key_path, - }, - ], - service: { - env: { - AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', - CASSANDRA_HOSTS: cassandra_hosts, - NFS_DIRS: '/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out', - PROMETHEUS_IP: prometheus_ip, - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - RABBITMQ_IP: rabbitmq_ip, - SFTP_USER: '{CAPIDEPLOY_SFTP_USER}', - SSH_USER: $.ssh_config.user, - SUBNET_CIDR: $.network.subnet.cidr, - EXTERNAL_IP_ADDRESS: '{EXTERNAL_IP_ADDRESS}', // internal: capideploy populates it from ssh_config.external_ip_address after loading project file; used by webui and webapi config.sh - WEBAPI_PORT: '6543', - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/common/increase_ssh_connection_limit.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/nfs/install_server.sh', - 'sh/nginx/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/rsyslog/config_capidaemon_log_receiver.sh', - 'sh/logrotate/config_capidaemon_logrotate.sh', - 'sh/toolbelt/config.sh', - 'sh/webapi/config.sh', - 'sh/ui/config.sh', - 'sh/nginx/config_ui.sh', - 'sh/nfs/config_server.sh', - 'sh/nginx/config_prometheus_reverse_proxy.sh', - 'sh/nginx/config_rabbitmq_reverse_proxy.sh', - ], - start: [ - 'sh/webapi/start.sh', - 'sh/nginx/start.sh', - ], - stop: [ - 'sh/webapi/stop.sh', - 'sh/nginx/stop.sh', - ], - }, - }, - applicable_file_groups: [ - 'up_all_cfg', - 'up_lookup_bigtest_in', - 'up_lookup_bigtest_out', - 'up_lookup_quicktest_in', - 'up_lookup_quicktest_out', - 'up_tag_and_denormalize_quicktest_in', - 'up_tag_and_denormalize_quicktest_out', - 'up_py_calc_quicktest_in', - 'up_py_calc_quicktest_out', - 'up_portfolio_quicktest_in', - 'up_portfolio_quicktest_out', - 'up_webapi_binary', - 'up_webapi_env_config', - 'up_toolbelt_binary', - 'up_toolbelt_env_config', - 'up_capiparquet_binary', - 'up_ui', - 'up_diff_scripts', - 'down_capi_out', - 'down_capi_logs', - ], - }, - }, - - local rabbitmq_instance = { - rabbitmq: { - host_name: deployment_name + '-rabbitmq', - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: rabbitmq_ip, - flavor: instance_flavor_rabbitmq, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - RABBITMQ_ADMIN_NAME: '{CAPIDEPLOY_RABBITMQ_ADMIN_NAME}', - RABBITMQ_ADMIN_PASS: '{CAPIDEPLOY_RABBITMQ_ADMIN_PASS}', - RABBITMQ_USER_NAME: '{CAPIDEPLOY_RABBITMQ_USER_NAME}', - RABBITMQ_USER_PASS: '{CAPIDEPLOY_RABBITMQ_USER_PASS}', - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/rabbitmq/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/rabbitmq/config.sh', - ], - start: [ - 'sh/rabbitmq/start.sh', - ], - stop: [ - 'sh/rabbitmq/stop.sh', - ], - }, - }, - }, - }, - - local prometheus_instance = { - prometheus: { - host_name: deployment_name + '-prometheus', - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: prometheus_ip, - flavor: instance_flavor_prometheus, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - PROMETHEUS_TARGETS: prometheus_targets, - PROMETHEUS_VERSION: prometheus_server_version, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_server.sh', - 'sh/prometheus/install_node_exporter.sh', - ], - config: [ - 'sh/prometheus/config_server.sh', - 'sh/prometheus/config_node_exporter.sh', - ], - start: [ - 'sh/prometheus/start_server.sh', - ], - stop: [ - 'sh/prometheus/stop_server.sh', - ], - }, - }, - }, - }, - - local cass_instances = { - [e.nickname]: { - host_name: e.host_name, - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: e.ip_address, - flavor: instance_flavor_cassandra, - image: instance_image_name, - availability_zone: default_availability_zone, - service: { - env: { - CASSANDRA_IP: e.ip_address, - CASSANDRA_SEEDS: cassandra_seeds, - INITIAL_TOKEN: e.token, - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - PROMETHEUS_CASSANDRA_EXPORTER_VERSION: prometheus_cassandra_exporter_version, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/prometheus/install_node_exporter.sh', - 'sh/cassandra/install.sh', - ], - config: [ - 'sh/prometheus/config_node_exporter.sh', - 'sh/cassandra/config.sh', - ], - start: [ - 'sh/cassandra/start.sh', - ], - stop: [ - 'sh/cassandra/stop.sh', - ], - }, - }, - } - for e in std.mapWithIndex(function(i, v) { - nickname: std.format('cass%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, - token: cassandra_tokens[i], - ip_address: v, - }, cassandra_ips) - }, - - local daemon_instances = { - [e.nickname]: { - host_name: e.host_name, - security_group: 'internal', - root_key_name: default_root_key_name, - ip_address: e.ip_address, - flavor: instance_flavor_daemon, - image: instance_image_name, - availability_zone: default_availability_zone, - private_keys: [ - { - name: '{CAPIDEPLOY_SFTP_USER}', - private_key_path: sftp_config_private_key_path, - }, - ], - service: { - env: { - AMQP_URL: 'amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@' + rabbitmq_ip + '/', - CASSANDRA_HOSTS: cassandra_hosts, - DAEMON_THREAD_POOL_SIZE: DEFAULT_DAEMON_THREAD_POOL_SIZE, - DAEMON_DB_WRITERS: DEFAULT_DAEMON_DB_WRITERS, - INTERNAL_BASTION_IP: internal_bastion_ip, - NFS_DIRS: '/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out', - PROMETHEUS_NODE_EXPORTER_VERSION: prometheus_node_exporter_version, - SFTP_USER: '{CAPIDEPLOY_SFTP_USER}', - SSH_USER: $.ssh_config.user, - }, - cmd: { - install: [ - 'sh/common/replace_nameserver.sh', - 'sh/nfs/install_client.sh', - "sh/daemon/install.sh", - 'sh/prometheus/install_node_exporter.sh', - ], - config: [ - 'sh/nfs/config_client.sh', - 'sh/logrotate/config_capidaemon_logrotate.sh', - 'sh/rsyslog/config_capidaemon_log_sender.sh', - 'sh/prometheus/config_node_exporter.sh', - 'sh/daemon/config.sh', - ], - start: [ - 'sh/daemon/start.sh', - ], - stop: [ - 'sh/daemon/stop.sh', - ], - }, - }, - applicable_file_groups: [ - 'up_daemon_binary', - 'up_daemon_env_config', - ], - } - for e in std.mapWithIndex(function(i, v) { - nickname: std.format('daemon%03d', i + 1), - host_name: deployment_name + '-' + self.nickname, - ip_address: v, - }, daemon_ips) - }, - - instances: bastion_instance + rabbitmq_instance + prometheus_instance + cass_instances + daemon_instances, -} diff --git a/test/deploy/sampledeployment002.json b/test/deploy/sampledeployment.json similarity index 78% rename from test/deploy/sampledeployment002.json rename to test/deploy/sampledeployment.json index b99b0a7..d35cac6 100644 --- a/test/deploy/sampledeployment002.json +++ b/test/deploy/sampledeployment.json @@ -358,6 +358,36 @@ "owner": "{CAPIDEPLOY_SSH_USER}", "after": {} }, + "up_portfolio_bigtest_in": { + "src": "/tmp/capi_in/portfolio_bigtest/all.tgz", + "dst": "/mnt/capi_in/portfolio_bigtest", + "dir_permissions": 777, + "file_permissions": 666, + "owner": "{CAPIDEPLOY_SSH_USER}", + "after": { + "env": { + "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" + }, + "cmd": [ + "sh/capiscripts/unpack_portfolio_big_in.sh" + ] + } + }, + "up_portfolio_bigtest_out": { + "src": "/tmp/capi_out/portfolio_bigtest/all.tgz", + "dst": "/mnt/capi_out/portfolio_bigtest", + "dir_permissions": 777, + "file_permissions": 666, + "owner": "{CAPIDEPLOY_SSH_USER}", + "after": { + "env": { + "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" + }, + "cmd": [ + "sh/capiscripts/unpack_portfolio_big_out.sh" + ] + } + }, "up_portfolio_quicktest_in": { "src": "/tmp/capi_in/portfolio_quicktest", "dst": "/mnt/capi_in/portfolio_quicktest", @@ -533,7 +563,7 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", "EXTERNAL_IP_ADDRESS": "{EXTERNAL_IP_ADDRESS}", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_IP": "10.5.0.4", @@ -584,6 +614,8 @@ "up_tag_and_denormalize_quicktest_out", "up_py_calc_quicktest_in", "up_py_calc_quicktest_out", + "up_portfolio_bigtest_in", + "up_portfolio_bigtest_out", "up_portfolio_quicktest_in", "up_portfolio_quicktest_out", "up_webapi_binary", @@ -602,7 +634,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.11", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -638,7 +670,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.12", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -646,7 +678,7 @@ "env": { "CASSANDRA_IP": "10.5.0.12", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-8070450532247928832", + "INITIAL_TOKEN": "-6917529027641081856", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -674,7 +706,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.13", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -682,7 +714,7 @@ "env": { "CASSANDRA_IP": "10.5.0.13", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-6917529027641081856", + "INITIAL_TOKEN": "-4611686018427387904", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -710,7 +742,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.14", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -718,7 +750,7 @@ "env": { "CASSANDRA_IP": "10.5.0.14", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-5764607523034234880", + "INITIAL_TOKEN": "-2305843009213693952", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -746,7 +778,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.15", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -754,7 +786,7 @@ "env": { "CASSANDRA_IP": "10.5.0.15", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-4611686018427387904", + "INITIAL_TOKEN": "0", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -782,7 +814,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.16", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -790,7 +822,7 @@ "env": { "CASSANDRA_IP": "10.5.0.16", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-3458764513820540928", + "INITIAL_TOKEN": "2305843009213693952", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -818,7 +850,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.17", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -826,7 +858,7 @@ "env": { "CASSANDRA_IP": "10.5.0.17", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-2305843009213693952", + "INITIAL_TOKEN": "4611686018427387904", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" }, @@ -854,7 +886,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.18", - "flavor": "c6asx.2xlarge", + "flavor": "c5d.4xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -862,258 +894,6 @@ "env": { "CASSANDRA_IP": "10.5.0.18", "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-1152921504606846976", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass009": { - "host_name": "sampledeployment002-cass009", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.19", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.19", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "0", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass010": { - "host_name": "sampledeployment002-cass010", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.20", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.20", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "1152921504606846976", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass011": { - "host_name": "sampledeployment002-cass011", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.21", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.21", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "2305843009213693952", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass012": { - "host_name": "sampledeployment002-cass012", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.22", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.22", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "3458764513820540928", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass013": { - "host_name": "sampledeployment002-cass013", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.23", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.23", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "4611686018427387904", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass014": { - "host_name": "sampledeployment002-cass014", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.24", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.24", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "5764607523034234880", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass015": { - "host_name": "sampledeployment002-cass015", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.25", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.25", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", "INITIAL_TOKEN": "6917529027641081856", "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" @@ -1137,48 +917,12 @@ } } }, - "cass016": { - "host_name": "sampledeployment002-cass016", - "security_group": "internal", - "root_key_name": "sampledeployment002-root-key", - "ip_address": "10.5.0.26", - "flavor": "c6asx.2xlarge", - "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", - "availability_zone": "us-central-1a", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.26", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "8070450532247928832", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, "daemon001": { "host_name": "sampledeployment002-daemon001", "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.101", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1191,9 +935,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1232,7 +976,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.102", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1245,9 +989,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1286,7 +1030,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.103", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1299,9 +1043,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1340,7 +1084,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.104", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1353,9 +1097,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1394,7 +1138,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.105", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1407,9 +1151,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1448,7 +1192,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.106", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1461,9 +1205,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1502,7 +1246,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.107", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1515,9 +1259,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1556,7 +1300,7 @@ "security_group": "internal", "root_key_name": "sampledeployment002-root-key", "ip_address": "10.5.0.108", - "flavor": "c5sd.xlarge", + "flavor": "c6sd.2xlarge", "image": "ubuntu-23.04_LTS-lunar-server-cloudimg-amd64-20221217_raw", "availability_zone": "us-central-1a", "id": "", @@ -1569,9 +1313,9 @@ "service": { "env": { "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\",\"10.5.0.19\",\"10.5.0.20\",\"10.5.0.21\",\"10.5.0.22\",\"10.5.0.23\",\"10.5.0.24\",\"10.5.0.25\",\"10.5.0.26\"]'", - "DAEMON_DB_WRITERS": "10", - "DAEMON_THREAD_POOL_SIZE": "10", + "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\",\"10.5.0.15\",\"10.5.0.16\",\"10.5.0.17\",\"10.5.0.18\"]'", + "DAEMON_DB_WRITERS": "8", + "DAEMON_THREAD_POOL_SIZE": "8", "INTERNAL_BASTION_IP": "10.5.0.10", "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", @@ -1617,7 +1361,7 @@ "service": { "env": { "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "PROMETHEUS_TARGETS": "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\',\\'10.5.0.12:9100\\',\\'10.5.0.13:9100\\',\\'10.5.0.14:9100\\',\\'10.5.0.15:9100\\',\\'10.5.0.16:9100\\',\\'10.5.0.17:9100\\',\\'10.5.0.18:9100\\',\\'10.5.0.19:9100\\',\\'10.5.0.20:9100\\',\\'10.5.0.21:9100\\',\\'10.5.0.22:9100\\',\\'10.5.0.23:9100\\',\\'10.5.0.24:9100\\',\\'10.5.0.25:9100\\',\\'10.5.0.26:9100\\',\\'10.5.0.11:9500\\',\\'10.5.0.12:9500\\',\\'10.5.0.13:9500\\',\\'10.5.0.14:9500\\',\\'10.5.0.15:9500\\',\\'10.5.0.16:9500\\',\\'10.5.0.17:9500\\',\\'10.5.0.18:9500\\',\\'10.5.0.19:9500\\',\\'10.5.0.20:9500\\',\\'10.5.0.21:9500\\',\\'10.5.0.22:9500\\',\\'10.5.0.23:9500\\',\\'10.5.0.24:9500\\',\\'10.5.0.25:9500\\',\\'10.5.0.26:9500\\',\\'10.5.0.101:9100\\',\\'10.5.0.102:9100\\',\\'10.5.0.103:9100\\',\\'10.5.0.104:9100\\',\\'10.5.0.105:9100\\',\\'10.5.0.106:9100\\',\\'10.5.0.107:9100\\',\\'10.5.0.108:9100\\'", + "PROMETHEUS_TARGETS": "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\',\\'10.5.0.12:9100\\',\\'10.5.0.13:9100\\',\\'10.5.0.14:9100\\',\\'10.5.0.15:9100\\',\\'10.5.0.16:9100\\',\\'10.5.0.17:9100\\',\\'10.5.0.18:9100\\',\\'10.5.0.11:9500\\',\\'10.5.0.12:9500\\',\\'10.5.0.13:9500\\',\\'10.5.0.14:9500\\',\\'10.5.0.15:9500\\',\\'10.5.0.16:9500\\',\\'10.5.0.17:9500\\',\\'10.5.0.18:9500\\',\\'10.5.0.101:9100\\',\\'10.5.0.102:9100\\',\\'10.5.0.103:9100\\',\\'10.5.0.104:9100\\',\\'10.5.0.105:9100\\',\\'10.5.0.106:9100\\',\\'10.5.0.107:9100\\',\\'10.5.0.108:9100\\'", "PROMETHEUS_VERSION": "2.45.0" }, "cmd": { diff --git a/test/deploy/sampledeployment003.json b/test/deploy/sampledeployment003.json deleted file mode 100644 index e81de55..0000000 --- a/test/deploy/sampledeployment003.json +++ /dev/null @@ -1,924 +0,0 @@ -{ - "artifacts": { - "env": { - "DIR_BUILD_LINUX_AMD64": "../../../../build/linux/amd64", - "DIR_CODE_PARQUET": "../../../code/parquet", - "DIR_PKG_EXE": "../../../../pkg/exe" - }, - "cmd": [ - "sh/local/build_binaries.sh", - "sh/local/build_webui.sh", - "sh/local/prepare_demo_data.sh" - ] - }, - "ssh_config": { - "external_ip_address": "", - "port": 22, - "user": "{CAPIDEPLOY_SSH_USER}", - "private_key_path": "~/.ssh/sampledeployment003_rsa", - "private_key_password": "{CAPIDEPLOY_SSH_PRIVATE_KEY_PASS}" - }, - "timeouts": { - "openstack_cmd": 60, - "openstack_instance_creation": 240, - "attach_volume": 60 - }, - "env_variables_used": [ - "CAPIDEPLOY_SSH_USER", - "CAPIDEPLOY_SSH_PRIVATE_KEY_PASS", - "CAPIDEPLOY_SFTP_USER", - "CAPIDEPLOY_RABBITMQ_ADMIN_NAME", - "CAPIDEPLOY_RABBITMQ_ADMIN_PASS", - "CAPIDEPLOY_RABBITMQ_USER_NAME", - "CAPIDEPLOY_RABBITMQ_USER_PASS", - "OS_AUTH_URL", - "OS_IDENTITY_API_VERSION", - "OS_INTERFACE", - "OS_REGION_NAME", - "OS_PASSWORD", - "OS_PROJECT_DOMAIN_ID", - "OS_PROJECT_ID", - "OS_PROJECT_NAME", - "OS_USERNAME", - "OS_USER_DOMAIN_NAME" - ], - "security_groups": { - "bastion": { - "name": "sampledeployment003_bastion_security_group", - "id": "", - "rules": [ - { - "desc": "SSH", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 22, - "direction": "ingress" - }, - { - "desc": "NFS PortMapper TCP", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 111, - "direction": "ingress" - }, - { - "desc": "NFS PortMapper UDP", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 111, - "direction": "ingress" - }, - { - "desc": "NFS Server TCP", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 2049, - "direction": "ingress" - }, - { - "desc": "NFS Server UDP", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 2049, - "direction": "ingress" - }, - { - "desc": "Prometheus UI reverse proxy", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 9090, - "direction": "ingress" - }, - { - "desc": "Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9100, - "direction": "ingress" - }, - { - "desc": "RabbitMQ UI reverse proxy", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 15672, - "direction": "ingress" - }, - { - "desc": "rsyslog receiver", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 514, - "direction": "ingress" - }, - { - "desc": "Capillaries webapi", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 6543, - "direction": "ingress" - }, - { - "desc": "Capillaries UI nginx", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 80, - "direction": "ingress" - } - ] - }, - "internal": { - "name": "sampledeployment003_internal_security_group", - "id": "", - "rules": [ - { - "desc": "SSH", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 22, - "direction": "ingress" - }, - { - "desc": "Prometheus UI internal", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9090, - "direction": "ingress" - }, - { - "desc": "Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9100, - "direction": "ingress" - }, - { - "desc": "Cassandra Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9500, - "direction": "ingress" - }, - { - "desc": "Cassandra JMX", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 7199, - "direction": "ingress" - }, - { - "desc": "Cassandra cluster comm", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 7000, - "direction": "ingress" - }, - { - "desc": "Cassandra API", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9042, - "direction": "ingress" - }, - { - "desc": "RabbitMQ API", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 5672, - "direction": "ingress" - }, - { - "desc": "RabbitMQ UI", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 15672, - "direction": "ingress" - } - ] - } - }, - "network": { - "name": "sampledeployment003_network", - "id": "", - "subnet": { - "name": "sampledeployment003_subnet", - "id": "", - "cidr": "10.5.0.0/24", - "allocation_pool": "start=10.5.0.240,end=10.5.0.254" - }, - "router": { - "name": "sampledeployment003_router", - "id": "", - "external_gateway_network_name": "Ext-Net" - } - }, - "file_groups_up": { - "up_all_cfg": { - "src": "/tmp/capi_cfg", - "dst": "/mnt/capi_cfg", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "LOCAL_CFG_LOCATION": "/mnt/capi_cfg", - "MOUNT_POINT_CFG": "/mnt/capi_cfg", - "MOUNT_POINT_IN": "/mnt/capi_in", - "MOUNT_POINT_OUT": "/mnt/capi_out" - }, - "cmd": [ - "sh/capiscripts/adjust_cfg_in_out.sh" - ] - } - }, - "up_capiparquet_binary": { - "src": "../../build/linux/amd64/capiparquet.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capiparquet" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_daemon_binary": { - "src": "../../build/linux/amd64/capidaemon.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capidaemon" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_daemon_env_config": { - "src": "../../pkg/exe/daemon/capidaemon.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - }, - "up_diff_scripts": { - "src": "./diff", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 744, - "after": {} - }, - "up_lookup_bigtest_in": { - "src": "/tmp/capi_in/lookup_bigtest/all.tgz", - "dst": "/mnt/capi_in/lookup_bigtest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": [ - "sh/capiscripts/unpack_lookup_big_in.sh" - ] - } - }, - "up_lookup_bigtest_out": { - "src": "/tmp/capi_out/lookup_bigtest/all.tgz", - "dst": "/mnt/capi_out/lookup_bigtest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": [ - "sh/capiscripts/unpack_lookup_big_out.sh" - ] - } - }, - "up_lookup_quicktest_in": { - "src": "/tmp/capi_in/lookup_quicktest", - "dst": "/mnt/capi_in/lookup_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_lookup_quicktest_out": { - "src": "/tmp/capi_out/lookup_quicktest", - "dst": "/mnt/capi_out/lookup_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_portfolio_quicktest_in": { - "src": "/tmp/capi_in/portfolio_quicktest", - "dst": "/mnt/capi_in/portfolio_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_portfolio_quicktest_out": { - "src": "/tmp/capi_out/portfolio_quicktest", - "dst": "/mnt/capi_out/portfolio_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_py_calc_quicktest_in": { - "src": "/tmp/capi_in/py_calc_quicktest", - "dst": "/mnt/capi_in/py_calc_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_py_calc_quicktest_out": { - "src": "/tmp/capi_out/py_calc_quicktest", - "dst": "/mnt/capi_out/py_calc_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_tag_and_denormalize_quicktest_in": { - "src": "/tmp/capi_in/tag_and_denormalize_quicktest", - "dst": "/mnt/capi_in/tag_and_denormalize_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_tag_and_denormalize_quicktest_out": { - "src": "/tmp/capi_out/tag_and_denormalize_quicktest", - "dst": "/mnt/capi_out/tag_and_denormalize_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_toolbelt_binary": { - "src": "../../build/linux/amd64/capitoolbelt.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capitoolbelt" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_toolbelt_env_config": { - "src": "../../pkg/exe/toolbelt/capitoolbelt.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - }, - "up_ui": { - "src": "../../ui/public", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/ui", - "dir_permissions": 755, - "file_permissions": 644, - "after": {} - }, - "up_webapi_binary": { - "src": "../../build/linux/amd64/capiwebapi.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capiwebapi" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_webapi_env_config": { - "src": "../../pkg/exe/webapi/capiwebapi.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - } - }, - "file_groups_down": { - "down_capi_logs": { - "src": "/var/log/capidaemon/", - "dst": "./tmp/capi_logs" - }, - "down_capi_out": { - "src": "/mnt/capi_out", - "dst": "./tmp/capi_out" - } - }, - "instances": { - "bastion": { - "host_name": "sampledeployment003-bastion", - "security_group": "bastion", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.10", - "uses_ssh_config_external_ip_address": true, - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "volumes": { - "cfg": { - "name": "sampledeployment003_cfg", - "mount_point": "/mnt/capi_cfg", - "size": 1, - "type": "classic", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - }, - "in": { - "name": "sampledeployment003_in", - "mount_point": "/mnt/capi_in", - "size": 1, - "type": "classic", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - }, - "out": { - "name": "sampledeployment003_out", - "mount_point": "/mnt/capi_out", - "size": 1, - "type": "classic", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - } - }, - "id": "", - "users": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "public_key_path": "~/.ssh/sampledeployment003_sftp.pub" - } - ], - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment003_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "EXTERNAL_IP_ADDRESS": "{EXTERNAL_IP_ADDRESS}", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_IP": "10.5.0.4", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "RABBITMQ_IP": "10.5.0.5", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}", - "SUBNET_CIDR": "10.5.0.0/24", - "WEBAPI_PORT": "6543" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/common/increase_ssh_connection_limit.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/nfs/install_server.sh", - "sh/nginx/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/rsyslog/config_capidaemon_log_receiver.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/toolbelt/config.sh", - "sh/webapi/config.sh", - "sh/ui/config.sh", - "sh/nginx/config_ui.sh", - "sh/nfs/config_server.sh", - "sh/nginx/config_prometheus_reverse_proxy.sh", - "sh/nginx/config_rabbitmq_reverse_proxy.sh" - ], - "start": [ - "sh/webapi/start.sh", - "sh/nginx/start.sh" - ], - "stop": [ - "sh/webapi/stop.sh", - "sh/nginx/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_all_cfg", - "up_lookup_bigtest_in", - "up_lookup_bigtest_out", - "up_lookup_quicktest_in", - "up_lookup_quicktest_out", - "up_tag_and_denormalize_quicktest_in", - "up_tag_and_denormalize_quicktest_out", - "up_py_calc_quicktest_in", - "up_py_calc_quicktest_out", - "up_portfolio_quicktest_in", - "up_portfolio_quicktest_out", - "up_webapi_binary", - "up_webapi_env_config", - "up_toolbelt_binary", - "up_toolbelt_env_config", - "up_capiparquet_binary", - "up_ui", - "up_diff_scripts", - "down_capi_out", - "down_capi_logs" - ] - }, - "cass001": { - "host_name": "sampledeployment003-cass001", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.11", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.11", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-9223372036854775808", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass002": { - "host_name": "sampledeployment003-cass002", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.12", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.12", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-4611686018427387904", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass003": { - "host_name": "sampledeployment003-cass003", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.13", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.13", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "0", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass004": { - "host_name": "sampledeployment003-cass004", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.14", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.14", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "4611686018427387904", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "daemon001": { - "host_name": "sampledeployment003-daemon001", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.101", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment003_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "DAEMON_DB_WRITERS": "5", - "DAEMON_THREAD_POOL_SIZE": "5", - "INTERNAL_BASTION_IP": "10.5.0.10", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/nfs/install_client.sh", - "sh/daemon/install.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/nfs/config_client.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/rsyslog/config_capidaemon_log_sender.sh", - "sh/prometheus/config_node_exporter.sh", - "sh/daemon/config.sh" - ], - "start": [ - "sh/daemon/start.sh" - ], - "stop": [ - "sh/daemon/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_daemon_binary", - "up_daemon_env_config" - ] - }, - "daemon002": { - "host_name": "sampledeployment003-daemon002", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.102", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment003_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "DAEMON_DB_WRITERS": "5", - "DAEMON_THREAD_POOL_SIZE": "5", - "INTERNAL_BASTION_IP": "10.5.0.10", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/nfs/install_client.sh", - "sh/daemon/install.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/nfs/config_client.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/rsyslog/config_capidaemon_log_sender.sh", - "sh/prometheus/config_node_exporter.sh", - "sh/daemon/config.sh" - ], - "start": [ - "sh/daemon/start.sh" - ], - "stop": [ - "sh/daemon/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_daemon_binary", - "up_daemon_env_config" - ] - }, - "prometheus": { - "host_name": "sampledeployment003-prometheus", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.4", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "PROMETHEUS_TARGETS": "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\',\\'10.5.0.12:9100\\',\\'10.5.0.13:9100\\',\\'10.5.0.14:9100\\',\\'10.5.0.11:9500\\',\\'10.5.0.12:9500\\',\\'10.5.0.13:9500\\',\\'10.5.0.14:9500\\',\\'10.5.0.101:9100\\',\\'10.5.0.102:9100\\'", - "PROMETHEUS_VERSION": "2.45.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_server.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/prometheus/config_server.sh", - "sh/prometheus/config_node_exporter.sh" - ], - "start": [ - "sh/prometheus/start_server.sh" - ], - "stop": [ - "sh/prometheus/stop_server.sh" - ] - } - } - }, - "rabbitmq": { - "host_name": "sampledeployment003-rabbitmq", - "security_group": "internal", - "root_key_name": "sampledeployment003-root-key", - "ip_address": "10.5.0.5", - "flavor": "b2-7", - "image": "Ubuntu 23.04", - "availability_zone": "nova", - "id": "", - "service": { - "env": { - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "RABBITMQ_ADMIN_NAME": "{CAPIDEPLOY_RABBITMQ_ADMIN_NAME}", - "RABBITMQ_ADMIN_PASS": "{CAPIDEPLOY_RABBITMQ_ADMIN_PASS}", - "RABBITMQ_USER_NAME": "{CAPIDEPLOY_RABBITMQ_USER_NAME}", - "RABBITMQ_USER_PASS": "{CAPIDEPLOY_RABBITMQ_USER_PASS}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/rabbitmq/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/rabbitmq/config.sh" - ], - "start": [ - "sh/rabbitmq/start.sh" - ], - "stop": [ - "sh/rabbitmq/stop.sh" - ] - } - } - } - }, - "OpenstackVars": null -} \ No newline at end of file diff --git a/test/deploy/sampledeployment004.json b/test/deploy/sampledeployment004.json deleted file mode 100644 index b3ed07c..0000000 --- a/test/deploy/sampledeployment004.json +++ /dev/null @@ -1,924 +0,0 @@ -{ - "artifacts": { - "env": { - "DIR_BUILD_LINUX_AMD64": "../../../../build/linux/amd64", - "DIR_CODE_PARQUET": "../../../code/parquet", - "DIR_PKG_EXE": "../../../../pkg/exe" - }, - "cmd": [ - "sh/local/build_binaries.sh", - "sh/local/build_webui.sh", - "sh/local/prepare_demo_data.sh" - ] - }, - "ssh_config": { - "external_ip_address": "", - "port": 22, - "user": "{CAPIDEPLOY_SSH_USER}", - "private_key_path": "~/.ssh/sampledeployment004_rsa", - "private_key_password": "{CAPIDEPLOY_SSH_PRIVATE_KEY_PASS}" - }, - "timeouts": { - "openstack_cmd": 60, - "openstack_instance_creation": 240, - "attach_volume": 60 - }, - "env_variables_used": [ - "CAPIDEPLOY_SSH_USER", - "CAPIDEPLOY_SSH_PRIVATE_KEY_PASS", - "CAPIDEPLOY_SFTP_USER", - "CAPIDEPLOY_RABBITMQ_ADMIN_NAME", - "CAPIDEPLOY_RABBITMQ_ADMIN_PASS", - "CAPIDEPLOY_RABBITMQ_USER_NAME", - "CAPIDEPLOY_RABBITMQ_USER_PASS", - "OS_AUTH_URL", - "OS_IDENTITY_API_VERSION", - "OS_INTERFACE", - "OS_REGION_NAME", - "OS_PASSWORD", - "OS_PROJECT_DOMAIN_ID", - "OS_PROJECT_ID", - "OS_PROJECT_NAME", - "OS_USERNAME", - "OS_USER_DOMAIN_NAME" - ], - "security_groups": { - "bastion": { - "name": "sampledeployment004_bastion_security_group", - "id": "", - "rules": [ - { - "desc": "SSH", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 22, - "direction": "ingress" - }, - { - "desc": "NFS PortMapper TCP", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 111, - "direction": "ingress" - }, - { - "desc": "NFS PortMapper UDP", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 111, - "direction": "ingress" - }, - { - "desc": "NFS Server TCP", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 2049, - "direction": "ingress" - }, - { - "desc": "NFS Server UDP", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 2049, - "direction": "ingress" - }, - { - "desc": "Prometheus UI reverse proxy", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 9090, - "direction": "ingress" - }, - { - "desc": "Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9100, - "direction": "ingress" - }, - { - "desc": "RabbitMQ UI reverse proxy", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 15672, - "direction": "ingress" - }, - { - "desc": "rsyslog receiver", - "id": "", - "protocol": "udp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 514, - "direction": "ingress" - }, - { - "desc": "Capillaries webapi", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 6543, - "direction": "ingress" - }, - { - "desc": "Capillaries UI nginx", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "0.0.0.0/0", - "port": 80, - "direction": "ingress" - } - ] - }, - "internal": { - "name": "sampledeployment004_internal_security_group", - "id": "", - "rules": [ - { - "desc": "SSH", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 22, - "direction": "ingress" - }, - { - "desc": "Prometheus UI internal", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9090, - "direction": "ingress" - }, - { - "desc": "Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9100, - "direction": "ingress" - }, - { - "desc": "Cassandra Prometheus node exporter", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9500, - "direction": "ingress" - }, - { - "desc": "Cassandra JMX", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 7199, - "direction": "ingress" - }, - { - "desc": "Cassandra cluster comm", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 7000, - "direction": "ingress" - }, - { - "desc": "Cassandra API", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 9042, - "direction": "ingress" - }, - { - "desc": "RabbitMQ API", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 5672, - "direction": "ingress" - }, - { - "desc": "RabbitMQ UI", - "id": "", - "protocol": "tcp", - "ethertype": "IPv4", - "remote_ip": "10.5.0.0/24", - "port": 15672, - "direction": "ingress" - } - ] - } - }, - "network": { - "name": "sampledeployment004_network", - "id": "", - "subnet": { - "name": "sampledeployment004_subnet", - "id": "", - "cidr": "10.5.0.0/24", - "allocation_pool": "start=10.5.0.240,end=10.5.0.254" - }, - "router": { - "name": "sampledeployment004_router", - "id": "", - "external_gateway_network_name": "ext-floating1" - } - }, - "file_groups_up": { - "up_all_cfg": { - "src": "/tmp/capi_cfg", - "dst": "/mnt/capi_cfg", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "LOCAL_CFG_LOCATION": "/mnt/capi_cfg", - "MOUNT_POINT_CFG": "/mnt/capi_cfg", - "MOUNT_POINT_IN": "/mnt/capi_in", - "MOUNT_POINT_OUT": "/mnt/capi_out" - }, - "cmd": [ - "sh/capiscripts/adjust_cfg_in_out.sh" - ] - } - }, - "up_capiparquet_binary": { - "src": "../../build/linux/amd64/capiparquet.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capiparquet" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_daemon_binary": { - "src": "../../build/linux/amd64/capidaemon.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capidaemon" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_daemon_env_config": { - "src": "../../pkg/exe/daemon/capidaemon.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - }, - "up_diff_scripts": { - "src": "./diff", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 744, - "after": {} - }, - "up_lookup_bigtest_in": { - "src": "/tmp/capi_in/lookup_bigtest/all.tgz", - "dst": "/mnt/capi_in/lookup_bigtest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": [ - "sh/capiscripts/unpack_lookup_big_in.sh" - ] - } - }, - "up_lookup_bigtest_out": { - "src": "/tmp/capi_out/lookup_bigtest/all.tgz", - "dst": "/mnt/capi_out/lookup_bigtest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": { - "env": { - "OWNER_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": [ - "sh/capiscripts/unpack_lookup_big_out.sh" - ] - } - }, - "up_lookup_quicktest_in": { - "src": "/tmp/capi_in/lookup_quicktest", - "dst": "/mnt/capi_in/lookup_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_lookup_quicktest_out": { - "src": "/tmp/capi_out/lookup_quicktest", - "dst": "/mnt/capi_out/lookup_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_portfolio_quicktest_in": { - "src": "/tmp/capi_in/portfolio_quicktest", - "dst": "/mnt/capi_in/portfolio_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_portfolio_quicktest_out": { - "src": "/tmp/capi_out/portfolio_quicktest", - "dst": "/mnt/capi_out/portfolio_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_py_calc_quicktest_in": { - "src": "/tmp/capi_in/py_calc_quicktest", - "dst": "/mnt/capi_in/py_calc_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_py_calc_quicktest_out": { - "src": "/tmp/capi_out/py_calc_quicktest", - "dst": "/mnt/capi_out/py_calc_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_tag_and_denormalize_quicktest_in": { - "src": "/tmp/capi_in/tag_and_denormalize_quicktest", - "dst": "/mnt/capi_in/tag_and_denormalize_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_tag_and_denormalize_quicktest_out": { - "src": "/tmp/capi_out/tag_and_denormalize_quicktest", - "dst": "/mnt/capi_out/tag_and_denormalize_quicktest", - "dir_permissions": 777, - "file_permissions": 666, - "owner": "{CAPIDEPLOY_SSH_USER}", - "after": {} - }, - "up_toolbelt_binary": { - "src": "../../build/linux/amd64/capitoolbelt.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capitoolbelt" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_toolbelt_env_config": { - "src": "../../pkg/exe/toolbelt/capitoolbelt.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - }, - "up_ui": { - "src": "../../ui/public", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/ui", - "dir_permissions": 755, - "file_permissions": 644, - "after": {} - }, - "up_webapi_binary": { - "src": "../../build/linux/amd64/capiwebapi.gz", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": { - "env": { - "CAPI_BINARY": "/home/{CAPIDEPLOY_SSH_USER}/bin/capiwebapi" - }, - "cmd": [ - "sh/capiscripts/unpack_capi_binary.sh" - ] - } - }, - "up_webapi_env_config": { - "src": "../../pkg/exe/webapi/capiwebapi.json", - "dst": "/home/{CAPIDEPLOY_SSH_USER}/bin", - "dir_permissions": 744, - "file_permissions": 644, - "after": {} - } - }, - "file_groups_down": { - "down_capi_logs": { - "src": "/var/log/capidaemon/", - "dst": "./tmp/capi_logs" - }, - "down_capi_out": { - "src": "/mnt/capi_out", - "dst": "./tmp/capi_out" - } - }, - "instances": { - "bastion": { - "host_name": "sampledeployment004-bastion", - "security_group": "bastion", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.10", - "uses_ssh_config_external_ip_address": true, - "flavor": "a1-ram2-disk20-perf1", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "volumes": { - "cfg": { - "name": "sampledeployment004_cfg", - "mount_point": "/mnt/capi_cfg", - "size": 1, - "type": "CEPH_1_perf1", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - }, - "in": { - "name": "sampledeployment004_in", - "mount_point": "/mnt/capi_in", - "size": 1, - "type": "CEPH_1_perf1", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - }, - "out": { - "name": "sampledeployment004_out", - "mount_point": "/mnt/capi_out", - "size": 1, - "type": "CEPH_1_perf1", - "permissions": 777, - "owner": "{CAPIDEPLOY_SSH_USER}", - "availability_zone": "nova", - "id": "", - "attachment_id": "", - "device": "", - "block_device_id": "" - } - }, - "id": "", - "users": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "public_key_path": "~/.ssh/sampledeployment004_sftp.pub" - } - ], - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment004_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "EXTERNAL_IP_ADDRESS": "{EXTERNAL_IP_ADDRESS}", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_IP": "10.5.0.4", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "RABBITMQ_IP": "10.5.0.5", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}", - "SUBNET_CIDR": "10.5.0.0/24", - "WEBAPI_PORT": "6543" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/common/increase_ssh_connection_limit.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/nfs/install_server.sh", - "sh/nginx/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/rsyslog/config_capidaemon_log_receiver.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/toolbelt/config.sh", - "sh/webapi/config.sh", - "sh/ui/config.sh", - "sh/nginx/config_ui.sh", - "sh/nfs/config_server.sh", - "sh/nginx/config_prometheus_reverse_proxy.sh", - "sh/nginx/config_rabbitmq_reverse_proxy.sh" - ], - "start": [ - "sh/webapi/start.sh", - "sh/nginx/start.sh" - ], - "stop": [ - "sh/webapi/stop.sh", - "sh/nginx/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_all_cfg", - "up_lookup_bigtest_in", - "up_lookup_bigtest_out", - "up_lookup_quicktest_in", - "up_lookup_quicktest_out", - "up_tag_and_denormalize_quicktest_in", - "up_tag_and_denormalize_quicktest_out", - "up_py_calc_quicktest_in", - "up_py_calc_quicktest_out", - "up_portfolio_quicktest_in", - "up_portfolio_quicktest_out", - "up_webapi_binary", - "up_webapi_env_config", - "up_toolbelt_binary", - "up_toolbelt_env_config", - "up_capiparquet_binary", - "up_ui", - "up_diff_scripts", - "down_capi_out", - "down_capi_logs" - ] - }, - "cass001": { - "host_name": "sampledeployment004-cass001", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.11", - "flavor": "a8-ram16-disk20-perf2", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.11", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-9223372036854775808", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass002": { - "host_name": "sampledeployment004-cass002", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.12", - "flavor": "a8-ram16-disk20-perf2", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.12", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "-4611686018427387904", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass003": { - "host_name": "sampledeployment004-cass003", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.13", - "flavor": "a8-ram16-disk20-perf2", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.13", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "0", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "cass004": { - "host_name": "sampledeployment004-cass004", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.14", - "flavor": "a8-ram16-disk20-perf2", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "CASSANDRA_IP": "10.5.0.14", - "CASSANDRA_SEEDS": "10.5.0.11,10.5.0.12", - "INITIAL_TOKEN": "4611686018427387904", - "PROMETHEUS_CASSANDRA_EXPORTER_VERSION": "0.9.12", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/cassandra/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/cassandra/config.sh" - ], - "start": [ - "sh/cassandra/start.sh" - ], - "stop": [ - "sh/cassandra/stop.sh" - ] - } - } - }, - "daemon001": { - "host_name": "sampledeployment004-daemon001", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.101", - "flavor": "a4-ram8-disk20-perf1", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment004_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "DAEMON_DB_WRITERS": "15", - "DAEMON_THREAD_POOL_SIZE": "15", - "INTERNAL_BASTION_IP": "10.5.0.10", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/nfs/install_client.sh", - "sh/daemon/install.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/nfs/config_client.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/rsyslog/config_capidaemon_log_sender.sh", - "sh/prometheus/config_node_exporter.sh", - "sh/daemon/config.sh" - ], - "start": [ - "sh/daemon/start.sh" - ], - "stop": [ - "sh/daemon/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_daemon_binary", - "up_daemon_env_config" - ] - }, - "daemon002": { - "host_name": "sampledeployment004-daemon002", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.102", - "flavor": "a4-ram8-disk20-perf1", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "private_keys": [ - { - "name": "{CAPIDEPLOY_SFTP_USER}", - "private_key_path": "~/.ssh/sampledeployment004_sftp" - } - ], - "service": { - "env": { - "AMQP_URL": "amqp://{CAPIDEPLOY_RABBITMQ_USER_NAME}:{CAPIDEPLOY_RABBITMQ_USER_PASS}@10.5.0.5/", - "CASSANDRA_HOSTS": "'[\"10.5.0.11\",\"10.5.0.12\",\"10.5.0.13\",\"10.5.0.14\"]'", - "DAEMON_DB_WRITERS": "15", - "DAEMON_THREAD_POOL_SIZE": "15", - "INTERNAL_BASTION_IP": "10.5.0.10", - "NFS_DIRS": "/mnt/capi_cfg,/mnt/capi_in,/mnt/capi_out", - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "SFTP_USER": "{CAPIDEPLOY_SFTP_USER}", - "SSH_USER": "{CAPIDEPLOY_SSH_USER}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/nfs/install_client.sh", - "sh/daemon/install.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/nfs/config_client.sh", - "sh/logrotate/config_capidaemon_logrotate.sh", - "sh/rsyslog/config_capidaemon_log_sender.sh", - "sh/prometheus/config_node_exporter.sh", - "sh/daemon/config.sh" - ], - "start": [ - "sh/daemon/start.sh" - ], - "stop": [ - "sh/daemon/stop.sh" - ] - } - }, - "applicable_file_groups": [ - "up_daemon_binary", - "up_daemon_env_config" - ] - }, - "prometheus": { - "host_name": "sampledeployment004-prometheus", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.4", - "flavor": "a1-ram2-disk20-perf1", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "PROMETHEUS_TARGETS": "\\'localhost:9100\\',\\'10.5.0.10:9100\\',\\'10.5.0.5:9100\\',\\'10.5.0.11:9100\\',\\'10.5.0.12:9100\\',\\'10.5.0.13:9100\\',\\'10.5.0.14:9100\\',\\'10.5.0.11:9500\\',\\'10.5.0.12:9500\\',\\'10.5.0.13:9500\\',\\'10.5.0.14:9500\\',\\'10.5.0.101:9100\\',\\'10.5.0.102:9100\\'", - "PROMETHEUS_VERSION": "2.45.0" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_server.sh", - "sh/prometheus/install_node_exporter.sh" - ], - "config": [ - "sh/prometheus/config_server.sh", - "sh/prometheus/config_node_exporter.sh" - ], - "start": [ - "sh/prometheus/start_server.sh" - ], - "stop": [ - "sh/prometheus/stop_server.sh" - ] - } - } - }, - "rabbitmq": { - "host_name": "sampledeployment004-rabbitmq", - "security_group": "internal", - "root_key_name": "sampledeployment004-root-key", - "ip_address": "10.5.0.5", - "flavor": "a1-ram2-disk20-perf1", - "image": "Ubuntu 22.04 LTS Jammy Jellyfish", - "availability_zone": "dc3-a-09", - "id": "", - "service": { - "env": { - "PROMETHEUS_NODE_EXPORTER_VERSION": "1.6.0", - "RABBITMQ_ADMIN_NAME": "{CAPIDEPLOY_RABBITMQ_ADMIN_NAME}", - "RABBITMQ_ADMIN_PASS": "{CAPIDEPLOY_RABBITMQ_ADMIN_PASS}", - "RABBITMQ_USER_NAME": "{CAPIDEPLOY_RABBITMQ_USER_NAME}", - "RABBITMQ_USER_PASS": "{CAPIDEPLOY_RABBITMQ_USER_PASS}" - }, - "cmd": { - "install": [ - "sh/common/replace_nameserver.sh", - "sh/prometheus/install_node_exporter.sh", - "sh/rabbitmq/install.sh" - ], - "config": [ - "sh/prometheus/config_node_exporter.sh", - "sh/rabbitmq/config.sh" - ], - "start": [ - "sh/rabbitmq/start.sh" - ], - "stop": [ - "sh/rabbitmq/stop.sh" - ] - } - } - } - }, - "OpenstackVars": null -} \ No newline at end of file diff --git a/test/deploy/sh/capiscripts/adjust_cfg_in_out.sh b/test/deploy/sh/capiscripts/adjust_cfg_in_out.sh index ba802ce..7ac3773 100755 --- a/test/deploy/sh/capiscripts/adjust_cfg_in_out.sh +++ b/test/deploy/sh/capiscripts/adjust_cfg_in_out.sh @@ -29,6 +29,7 @@ if [ "$MOUNT_POINT_OUT" = "" ]; then exit 1 fi +# In all script params files, replace /tmp/... with /mnt/... for filepath in $(find $LOCAL_CFG_LOCATION -name 'script_params*.json' -type f -print); do sudo sed -i -e 's~/tmp/capi_cfg~'$MOUNT_POINT_CFG'~g' $filepath sudo sed -i -e 's~/tmp/capi_in~'$MOUNT_POINT_IN'~g' $filepath diff --git a/test/deploy/sh/capiscripts/unpack_capi_binary.sh b/test/deploy/sh/capiscripts/unpack_capi_binary.sh index c40a393..fc76241 100755 --- a/test/deploy/sh/capiscripts/unpack_capi_binary.sh +++ b/test/deploy/sh/capiscripts/unpack_capi_binary.sh @@ -1,6 +1,6 @@ if [ "$CAPI_BINARY" = "" ]; then echo Error, missing: CAPI_BINARY=/home/$SSH_USER/bin/capitoolbelt - exit 1 + exit 1 fi gzip -d -f $CAPI_BINARY.gz diff --git a/test/deploy/sh/capiscripts/unpack_lookup_big_in.sh b/test/deploy/sh/capiscripts/unpack_lookup_big_in.sh index 1928913..3139c6d 100755 --- a/test/deploy/sh/capiscripts/unpack_lookup_big_in.sh +++ b/test/deploy/sh/capiscripts/unpack_lookup_big_in.sh @@ -1,6 +1,6 @@ if [ "$OWNER_USER" = "" ]; then echo Error, missing: OWNER_USER=sftpuser or OWNER_USER=ubuntu - exit 1 + exit 1 fi rm -f /mnt/capi_in/lookup_bigtest/*.csv /mnt/capi_in/lookup_bigtest/*.parquet diff --git a/test/deploy/sh/capiscripts/unpack_lookup_big_out.sh b/test/deploy/sh/capiscripts/unpack_lookup_big_out.sh index b503966..9300708 100644 --- a/test/deploy/sh/capiscripts/unpack_lookup_big_out.sh +++ b/test/deploy/sh/capiscripts/unpack_lookup_big_out.sh @@ -1,6 +1,6 @@ if [ "$OWNER_USER" = "" ]; then echo Error, missing: OWNER_USER=sftpuser or OWNER_USER=ubuntu - exit 1 + exit 1 fi rm -f /mnt/capi_out/lookup_bigtest/*.csv /mnt/capi_out/lookup_bigtest/*.parquet diff --git a/test/deploy/sh/capiscripts/unpack_portfolio_big_in.sh b/test/deploy/sh/capiscripts/unpack_portfolio_big_in.sh new file mode 100755 index 0000000..123d81c --- /dev/null +++ b/test/deploy/sh/capiscripts/unpack_portfolio_big_in.sh @@ -0,0 +1,15 @@ +if [ "$OWNER_USER" = "" ]; then + echo Error, missing: OWNER_USER=sftpuser or OWNER_USER=ubuntu + exit 1 +fi + +rm -f /mnt/capi_in/portfolio_bigtest/*.csv /mnt/capi_in/portfolio_bigtest/*.parquet + +sudo tar -zxf /mnt/capi_in/portfolio_bigtest/all.tgz --directory /mnt/capi_in/portfolio_bigtest +rm /mnt/capi_in/portfolio_bigtest/all.tgz + +sudo chown $OWNER_USER /mnt/capi_in/portfolio_bigtest/*.csv +sudo chmod 644 /mnt/capi_in/portfolio_bigtest/*.csv +sudo chown $OWNER_USER /mnt/capi_in/portfolio_bigtest/*.parquet +sudo chmod 644 /mnt/capi_in/portfolio_bigtest/*.parquet + diff --git a/test/deploy/sh/capiscripts/unpack_portfolio_big_out.sh b/test/deploy/sh/capiscripts/unpack_portfolio_big_out.sh new file mode 100644 index 0000000..f873693 --- /dev/null +++ b/test/deploy/sh/capiscripts/unpack_portfolio_big_out.sh @@ -0,0 +1,15 @@ +if [ "$OWNER_USER" = "" ]; then + echo Error, missing: OWNER_USER=sftpuser or OWNER_USER=ubuntu + exit 1 +fi + +rm -f /mnt/capi_out/portfolio_bigtest/*.csv /mnt/capi_out/portfolio_bigtest/*.parquet + +sudo tar -zxf /mnt/capi_out/portfolio_bigtest/all.tgz --directory /mnt/capi_out/portfolio_bigtest +rm /mnt/capi_out/portfolio_bigtest/all.tgz + +sudo chown $OWNER_USER /mnt/capi_out/portfolio_bigtest/*.csv +sudo chmod 644 /mnt/capi_out/portfolio_bigtest/*.csv +sudo chown $OWNER_USER /mnt/capi_out/portfolio_bigtest/*.parquet +sudo chmod 644 /mnt/capi_out/portfolio_bigtest/*.parquet + diff --git a/test/deploy/sh/cassandra/config.sh b/test/deploy/sh/cassandra/config.sh index 931df55..c4c663d 100755 --- a/test/deploy/sh/cassandra/config.sh +++ b/test/deploy/sh/cassandra/config.sh @@ -21,9 +21,11 @@ sudo sed -i -e "s~endpoint_snitch:[\: \"a-zA-Z0-9\.]*~endpoint_snitch: SimpleSni # Data on attached volume. Comment out to store data on the ephemeral instance volume at /var/lib/cassandra/data. #sudo sed -i -e "s~- /var/lib/cassandra/data~- /mnt/data~g" /etc/cassandra/cassandra.yaml +sudo sed -i -e "s~- /var/lib/cassandra/data~- /mnt/ramdisk/data~g" /etc/cassandra/cassandra.yaml # Commitlog on attached volume. Comment out to store commitlog on the ephemeral instance volume at /var/lib/cassandra/commitlog. #sudo sed -i -e "s~/var/lib/cassandra/commitlog~/mnt/commitlog~g" /etc/cassandra/cassandra.yaml +sudo sed -i -e "s~/var/lib/cassandra/commitlog~/mnt/ramdisk/commitlog~g" /etc/cassandra/cassandra.yaml # Minimal number of vnodes, we do not need elasticity sudo sed -i -e "s~num_tokens:[ 0-9]*~num_tokens: 1~g" /etc/cassandra/cassandra.yaml diff --git a/test/deploy/sh/cassandra/install.sh b/test/deploy/sh/cassandra/install.sh index 01f17ff..65fc77b 100755 --- a/test/deploy/sh/cassandra/install.sh +++ b/test/deploy/sh/cassandra/install.sh @@ -19,4 +19,17 @@ if [ "$?" -ne "0" ]; then exit $? fi -sudo mv cassandra-exporter-agent-${PROMETHEUS_CASSANDRA_EXPORTER_VERSION}-SNAPSHOT.jar /usr/share/cassandra/lib/ \ No newline at end of file +sudo mv cassandra-exporter-agent-${PROMETHEUS_CASSANDRA_EXPORTER_VERSION}-SNAPSHOT.jar /usr/share/cassandra/lib/ + +# RAM disk size in GB +export RAM_DISK_SIZE=$(awk '/MemFree/ { printf "%.0f\n", $2/1024/2 }' /proc/meminfo) +echo $RAM_DISK_SIZE +sudo mkdir /mnt/ramdisk +sudo chmod 777 /mnt/ramdisk +sudo mount -t tmpfs -o size="$RAM_DISK_SIZE"m myramdisk /mnt/ramdisk +if [ "$?" -ne "0" ]; then + echo Cannot mount ramdisk, exiting + exit $? +fi + + diff --git a/test/deploy/sh/local/prepare_demo_data.sh b/test/deploy/sh/local/prepare_demo_data.sh index f3f792d..402570c 100755 --- a/test/deploy/sh/local/prepare_demo_data.sh +++ b/test/deploy/sh/local/prepare_demo_data.sh @@ -20,8 +20,15 @@ cp -r ./test/data/cfg/* /tmp/capi_cfg cp -r ./test/data/in/* /tmp/capi_in cp -r ./test/data/out/* /tmp/capi_out -cd ./test/code/lookup/bigtest +# Generate big data + +pushd ./test/code/lookup/bigtest +./1_create_data.sh +popd + +pushd ./test/code/portfolio/bigtest ./1_create_data.sh +popd diff --git a/test/deploy/start_cluster.sh b/test/deploy/start_cluster.sh index c300df1..1bf9843 100755 --- a/test/deploy/start_cluster.sh +++ b/test/deploy/start_cluster.sh @@ -3,6 +3,17 @@ # This script configures and starts Cassandra cluster configured in $1. # It is not called by capideploy, it should be executed manually. See test/deploy/README.md for details. +# Troubleshooting. Sometimes, a node cannot start because of schema version mismatch. Then: +# on the failing node: +# sudo systemctl stop cassandra +# sudo rm -fR /var/lib/cassandra/data/* +# sudo rm -fR /var/lib/cassandra/commitlog/* +# on a working node - remove failing node from cluster: +# nodetool removenode +# on the failing node: +# sudo systemctl start cassandra +# and wait until the node actually starts (1-2 min) + if ! command -v jq &> /dev/null then echo "jq command could not be found, please install it before running this script, e.g. 'apt-get install -y jq'" @@ -90,13 +101,3 @@ do done done ssh -o StrictHostKeyChecking=no -i $sshKeyFile -J $externalIpAddress $CAPIDEPLOY_SSH_USER@${cassIpArray[0]} 'nodetool describecluster;nodetool status' -echo 'Troubleshoot:' -echo 'On the failing node:' -echo sudo systemctl stop cassandra -echo sudo rm -fR /var/lib/cassandra/data/* -echo sudo rm -fR /var/lib/cassandra/commitlog/* -echo 'On a working node:' -echo 'nodetool removenode ' -echo 'On the failing node:' -echo sudo systemctl start cassandra -echo 'and wait!'