Skip to content

Commit

Permalink
COD-3782 - Terraform extend create database parameters (#108)
Browse files Browse the repository at this point in the history
Co-authored-by: Richard Antal <richard.antal@cloudera.com>
  • Loading branch information
richardantal and Richard Antal authored Apr 29, 2024
1 parent 743f03a commit f3364fc
Show file tree
Hide file tree
Showing 7 changed files with 560 additions and 8 deletions.
71 changes: 67 additions & 4 deletions docs/resources/operational_database.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ resource "cdp_operational_database" "opdb" {
// scale_type = "MICRO" // valid options are "MICRO","LIGHT","HEAVY"
// storage_type = "HDFS" // valid options are "CLOUD_WITH_EPHEMERAL","CLOUD","HDFS"
disable_multi_az = false
// num_edge_nodes = 1
}
```
Expand All @@ -58,20 +57,75 @@ resource "cdp_operational_database" "opdb" {

### Optional

- `disable_external_db` (Boolean) Disable external database creation or not, it is only available in the BETA cdpcli
- `attached_storage_for_workers` (Attributes) Attached storage for the worker nodes for AWS, Azure, and GCP cloud providers. (see [below for nested schema](#nestedatt--attached_storage_for_workers))
- `auto_scaling_parameters` (Attributes) (see [below for nested schema](#nestedatt--auto_scaling_parameters))
- `custom_user_tags` (Attributes Set) Optional tags to apply to launched infrastructure resources (see [below for nested schema](#nestedatt--custom_user_tags))
- `disable_external_db` (Boolean) Disable external database creation or not. It is only available in the BETA cdpcli.
- `disable_jwt_auth` (Boolean) Disable OAuth Bearer (JWT) authentication scheme.
- `disable_kerberos` (Boolean) Disable Kerberos authentication.
- `disable_multi_az` (Boolean) Disable deployment to multiple availability zones or not
- `java_version` (Number) Java version, it is only available in the BETA cdpcli
- `enable_grafana` (Boolean) To enable grafana server for the database.
- `enable_region_canary` (Boolean) To enable the region canary for the database.
- `image` (Attributes) Details of an Image. (see [below for nested schema](#nestedatt--image))
- `java_version` (Number) Java version. It is only available in the BETA cdpcli.
- `num_edge_nodes` (Number) Number of edge nodes
- `polling_options` (Attributes) Polling related configuration options that could specify various values that will be used during CDP resource creation. (see [below for nested schema](#nestedatt--polling_options))
- `recipes` (Attributes Set) Custom recipes for the database. (see [below for nested schema](#nestedatt--recipes))
- `scale_type` (String) Scale type, MICRO, LIGHT or HEAVY
- `storage_location` (String) Storage Location for OPDB. It is only available in the BETA cdpcli.
- `storage_type` (String) Storage type for clusters, CLOUD_WITH_EPHEMERAL, CLOUD or HDFS
- `subnet_id` (String) ID of the subnet to deploy to

### Read-Only

- `crn` (String) The CRN of the cluster.
- `status` (String) The last known state of the cluster
- `storage_location` (String) Storage Location for OPDB

<a id="nestedatt--attached_storage_for_workers"></a>
### Nested Schema for `attached_storage_for_workers`

Optional:

- `volume_count` (Number) The number of Volumes. Default is 4. Valid Range: Minimum value of 1, maximum value 8.
- `volume_size` (Number) The target size of the volume, in GiB. Default is 2048.
- `volume_type` (String) Volume Type. HDD - Hard disk drives (HDD) volume type. Default is HDD. SSD - Solid disk drives (SSD) volume type. LOCAL_SSD - Local SSD volume type.


<a id="nestedatt--auto_scaling_parameters"></a>
### Nested Schema for `auto_scaling_parameters`

Optional:

- `evaluation_period` (Number) Period of metrics(in seconds) needs to be considered.
- `max_compute_nodes_for_database` (Number) The maximum number of compute nodes, as per these metrics, that can be scaled up to. It is only available in the BETA cdpcli.
- `max_cpu_utilization` (Number) The maximum percentage threshold for the CPU utilization of the worker nodes. The CPU utilization is obtained from the Cloudera Manager metric ‘cpu_percent’ across worker nodes. Set 100 or more to disable the CPU metrics. It is only available in the BETA cdpcli.
- `max_hdfs_usage_percentage` (Number) The maximum percentage of HDFS utilization for the database before we trigger the scaling. It is only available in the BETA cdpcli.
- `max_regions_per_region_server` (Number) The maximum number of regions per region server. It is only available in the BETA cdpcli.
- `max_workers_for_database` (Number) Maximum number of worker nodes as per this metrics can be scaled up to.
- `max_workers_per_batch` (Number) Maximum number of worker nodes as per this metrics can be scaled up to in one batch.
- `min_compute_nodes_for_database` (Number) The minimum number of compute nodes, as per these metrics, that can be scaled down to. It is only available in the BETA cdpcli.
- `min_workers_for_database` (Number) Minimum number of worker nodes as per this metrics can be scaled down to.
- `minimum_block_cache_gb` (Number) The amount of block cache, in Gigabytes, which the database should have.
- `targeted_value_for_metric` (Number) The target value of the metric a user expect to maintain for the cluster


<a id="nestedatt--custom_user_tags"></a>
### Nested Schema for `custom_user_tags`

Required:

- `key` (String)
- `value` (String)


<a id="nestedatt--image"></a>
### Nested Schema for `image`

Required:

- `catalog` (String) Catalog name for the image.
- `id` (String) Image ID for the database.


<a id="nestedatt--polling_options"></a>
### Nested Schema for `polling_options`
Expand All @@ -81,3 +135,12 @@ Optional:
- `polling_timeout` (Number) Timeout value in minutes that specifies for how long should the polling go for resource creation/deletion.


<a id="nestedatt--recipes"></a>
### Nested Schema for `recipes`

Required:

- `instance_group` (String) The name of the designated instance group.
- `names` (Set of String) The set of recipe names that are going to be applied on the given instance group.


1 change: 0 additions & 1 deletion examples/resources/cdp_operational_database/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,5 @@ resource "cdp_operational_database" "opdb" {
// scale_type = "MICRO" // valid options are "MICRO","LIGHT","HEAVY"
// storage_type = "HDFS" // valid options are "CLOUD_WITH_EPHEMERAL","CLOUD","HDFS"

disable_multi_az = false
// num_edge_nodes = 1
}
95 changes: 95 additions & 0 deletions resources/opdb/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ package opdb
import (
"context"
"fmt"
"github.com/cloudera/terraform-provider-cdp/utils"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"

Expand All @@ -34,10 +35,104 @@ func fromModelToDatabaseRequest(model databaseResourceModel, ctx context.Context
req.JavaVersion = int64To32(model.JavaVersion)
req.NumEdgeNodes = int64To32(model.NumEdgeNodes)

if model.AutoScalingParameters != nil {
tflog.Info(ctx, fmt.Sprintf("Autoscaling parameters %+v.", model.AutoScalingParameters))
req.AutoScalingParameters = createAutoScalingParameters(*model.AutoScalingParameters, ctx)
}

if model.AttachedStorageForWorkers != nil {
req.AttachedStorageForWorkers = createAttachedStorageForWorkers(*model.AttachedStorageForWorkers, ctx)
}

req.DisableKerberos = model.DisableKerberos.ValueBool()
req.DisableJwtAuth = model.DisableJwtAuth.ValueBool()

if model.Image != nil {
req.Image = createImage(*model.Image, ctx)
}

req.EnableGrafana = model.EnableGrafana.ValueBool()

req.CustomUserTags = createCustomUserTags(ctx, model.CustomUserTags)
req.EnableRegionCanary = model.EnableRegionCanary.ValueBool()

req.Recipes = createRecipes(ctx, model.Recipes)
req.StorageLocation = model.StorageLocation.ValueString()

tflog.Debug(ctx, fmt.Sprintf("Conversion from databaseResourceModel to CreateDatabaseRequest has finished with request: %+v.", req))
return &req
}

func createAutoScalingParameters(autoScalingParameters AutoScalingParametersStruct, ctx context.Context) *opdbmodels.AutoScalingParameters {
return &opdbmodels.AutoScalingParameters{
TargetedValueForMetric: autoScalingParameters.TargetedValueForMetric.ValueInt64(),
MaxWorkersForDatabase: int64To32(autoScalingParameters.MaxWorkersForDatabase),
MaxWorkersPerBatch: int64To32(autoScalingParameters.MaxWorkersPerBatch),
MinWorkersForDatabase: int64To32(autoScalingParameters.MinWorkersForDatabase),
EvaluationPeriod: autoScalingParameters.EvaluationPeriod.ValueInt64(),
MinimumBlockCacheGb: int64To32(autoScalingParameters.MinimumBlockCacheGb),

MaxCPUUtilization: int64To32(autoScalingParameters.MaxCPUUtilization),
MaxComputeNodesForDatabase: int64To32Pointer(autoScalingParameters.MaxComputeNodesForDatabase),
MinComputeNodesForDatabase: int64To32Pointer(autoScalingParameters.MinComputeNodesForDatabase),
MaxHdfsUsagePercentage: int64To32(autoScalingParameters.MaxHdfsUsagePercentage),
MaxRegionsPerRegionServer: int64To32(autoScalingParameters.MaxRegionsPerRegionServer),
}
}

func createAttachedStorageForWorkers(attachedStorageForWorkers AttachedStorageForWorkersStruct, ctx context.Context) *opdbmodels.AttachedStorageForWorkers {
return &opdbmodels.AttachedStorageForWorkers{
VolumeCount: int64To32(attachedStorageForWorkers.VolumeCount),
VolumeSize: int64To32(attachedStorageForWorkers.VolumeSize),
VolumeType: opdbmodels.VolumeType(attachedStorageForWorkers.VolumeType.ValueString()),
}
}

func createImage(image Image, ctx context.Context) *opdbmodels.Image {
return &opdbmodels.Image{
ID: image.ID.ValueStringPointer(),
Catalog: image.Catalog.ValueStringPointer(),
}
}

func createCustomUserTags(ctx context.Context, keyValuePairs []KeyValuePair) []*opdbmodels.KeyValuePair {
var kvList []*opdbmodels.KeyValuePair
for _, vrs := range keyValuePairs {
tflog.Debug(ctx, fmt.Sprintf("Converting KeyValuePair: %+v.", vrs))
kvList = append(kvList, createKeyValuePair(vrs))
}
return kvList
}

func createKeyValuePair(keyValuePair KeyValuePair) *opdbmodels.KeyValuePair {
return &opdbmodels.KeyValuePair{
Key: keyValuePair.Key.ValueString(),
Value: keyValuePair.Value.ValueString(),
}
}

func createRecipes(ctx context.Context, recipes []Recipe) []*opdbmodels.CustomRecipe {
var recipeList []*opdbmodels.CustomRecipe
for _, vrs := range recipes {
tflog.Debug(ctx, fmt.Sprintf("Converting KeyValuePair: %+v.", vrs))
recipeList = append(recipeList, createRecipe(vrs))
}
return recipeList
}

func createRecipe(customRecipe Recipe) *opdbmodels.CustomRecipe {
return &opdbmodels.CustomRecipe{
InstanceGroup: opdbmodels.NewInstanceGroupType(opdbmodels.InstanceGroupType(customRecipe.InstanceGroup.ValueString())),
Names: utils.FromSetValueToStringList(customRecipe.Names),
}
}

func int64To32Pointer(in types.Int64) *int32 {
n64 := in.ValueInt64()
var n2 = int32(n64)
return &n2
}

func int64To32(in types.Int64) int32 {
n64 := in.ValueInt64()
return int32(n64)
Expand Down
Loading

0 comments on commit f3364fc

Please sign in to comment.