diff --git a/docs/resources/operational_database.md b/docs/resources/operational_database.md
index ac31fc45..52ad96b4 100644
--- a/docs/resources/operational_database.md
+++ b/docs/resources/operational_database.md
@@ -43,7 +43,6 @@ resource "cdp_operational_database" "opdb" {
// scale_type = "MICRO" // valid options are "MICRO","LIGHT","HEAVY"
// storage_type = "HDFS" // valid options are "CLOUD_WITH_EPHEMERAL","CLOUD","HDFS"
- disable_multi_az = false
// num_edge_nodes = 1
}
```
@@ -58,12 +57,22 @@ resource "cdp_operational_database" "opdb" {
### Optional
-- `disable_external_db` (Boolean) Disable external database creation or not, it is only available in the BETA cdpcli
+- `attached_storage_for_workers` (Attributes) Attached storage for the worker nodes for AWS, Azure, and GCP cloud providers. (see [below for nested schema](#nestedatt--attached_storage_for_workers))
+- `auto_scaling_parameters` (Attributes) (see [below for nested schema](#nestedatt--auto_scaling_parameters))
+- `custom_user_tags` (Attributes Set) Optional tags to apply to launched infrastructure resources (see [below for nested schema](#nestedatt--custom_user_tags))
+- `disable_external_db` (Boolean) Disable external database creation or not. It is only available in the BETA cdpcli.
+- `disable_jwt_auth` (Boolean) Disable OAuth Bearer (JWT) authentication scheme.
+- `disable_kerberos` (Boolean) Disable Kerberos authentication.
- `disable_multi_az` (Boolean) Disable deployment to multiple availability zones or not
-- `java_version` (Number) Java version, it is only available in the BETA cdpcli
+- `enable_grafana` (Boolean) To enable grafana server for the database.
+- `enable_region_canary` (Boolean) To enable the region canary for the database.
+- `image` (Attributes) Details of an Image. (see [below for nested schema](#nestedatt--image))
+- `java_version` (Number) Java version. It is only available in the BETA cdpcli.
- `num_edge_nodes` (Number) Number of edge nodes
- `polling_options` (Attributes) Polling related configuration options that could specify various values that will be used during CDP resource creation. (see [below for nested schema](#nestedatt--polling_options))
+- `recipes` (Attributes Set) Custom recipes for the database. (see [below for nested schema](#nestedatt--recipes))
- `scale_type` (String) Scale type, MICRO, LIGHT or HEAVY
+- `storage_location` (String) Storage Location for OPDB. It is only available in the BETA cdpcli.
- `storage_type` (String) Storage type for clusters, CLOUD_WITH_EPHEMERAL, CLOUD or HDFS
- `subnet_id` (String) ID of the subnet to deploy to
@@ -71,7 +80,52 @@ resource "cdp_operational_database" "opdb" {
- `crn` (String) The CRN of the cluster.
- `status` (String) The last known state of the cluster
-- `storage_location` (String) Storage Location for OPDB
+
+
+### Nested Schema for `attached_storage_for_workers`
+
+Optional:
+
+- `volume_count` (Number) The number of Volumes. Default is 4. Valid Range: Minimum value of 1, maximum value 8.
+- `volume_size` (Number) The target size of the volume, in GiB. Default is 2048.
+- `volume_type` (String) Volume Type. HDD - Hard disk drives (HDD) volume type. Default is HDD. SSD - Solid disk drives (SSD) volume type. LOCAL_SSD - Local SSD volume type.
+
+
+
+### Nested Schema for `auto_scaling_parameters`
+
+Optional:
+
+- `evaluation_period` (Number) Period of metrics(in seconds) needs to be considered.
+- `max_compute_nodes_for_database` (Number) The maximum number of compute nodes, as per these metrics, that can be scaled up to. It is only available in the BETA cdpcli.
+- `max_cpu_utilization` (Number) The maximum percentage threshold for the CPU utilization of the worker nodes. The CPU utilization is obtained from the Cloudera Manager metric ‘cpu_percent’ across worker nodes. Set 100 or more to disable the CPU metrics. It is only available in the BETA cdpcli.
+- `max_hdfs_usage_percentage` (Number) The maximum percentage of HDFS utilization for the database before we trigger the scaling. It is only available in the BETA cdpcli.
+- `max_regions_per_region_server` (Number) The maximum number of regions per region server. It is only available in the BETA cdpcli.
+- `max_workers_for_database` (Number) Maximum number of worker nodes as per this metrics can be scaled up to.
+- `max_workers_per_batch` (Number) Maximum number of worker nodes as per this metrics can be scaled up to in one batch.
+- `min_compute_nodes_for_database` (Number) The minimum number of compute nodes, as per these metrics, that can be scaled down to. It is only available in the BETA cdpcli.
+- `min_workers_for_database` (Number) Minimum number of worker nodes as per this metrics can be scaled down to.
+- `minimum_block_cache_gb` (Number) The amount of block cache, in Gigabytes, which the database should have.
+- `targeted_value_for_metric` (Number) The target value of the metric a user expect to maintain for the cluster
+
+
+
+### Nested Schema for `custom_user_tags`
+
+Required:
+
+- `key` (String)
+- `value` (String)
+
+
+
+### Nested Schema for `image`
+
+Required:
+
+- `catalog` (String) Catalog name for the image.
+- `id` (String) Image ID for the database.
+
### Nested Schema for `polling_options`
@@ -81,3 +135,12 @@ Optional:
- `polling_timeout` (Number) Timeout value in minutes that specifies for how long should the polling go for resource creation/deletion.
+
+### Nested Schema for `recipes`
+
+Required:
+
+- `instance_group` (String) The name of the designated instance group.
+- `names` (Set of String) The set of recipe names that are going to be applied on the given instance group.
+
+
diff --git a/examples/resources/cdp_operational_database/resource.tf b/examples/resources/cdp_operational_database/resource.tf
index 027813a6..3a26248f 100644
--- a/examples/resources/cdp_operational_database/resource.tf
+++ b/examples/resources/cdp_operational_database/resource.tf
@@ -28,6 +28,5 @@ resource "cdp_operational_database" "opdb" {
// scale_type = "MICRO" // valid options are "MICRO","LIGHT","HEAVY"
// storage_type = "HDFS" // valid options are "CLOUD_WITH_EPHEMERAL","CLOUD","HDFS"
- disable_multi_az = false
// num_edge_nodes = 1
}
\ No newline at end of file
diff --git a/resources/opdb/converter.go b/resources/opdb/converter.go
index 96a29892..e96188ce 100644
--- a/resources/opdb/converter.go
+++ b/resources/opdb/converter.go
@@ -13,6 +13,7 @@ package opdb
import (
"context"
"fmt"
+ "github.com/cloudera/terraform-provider-cdp/utils"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
@@ -34,10 +35,104 @@ func fromModelToDatabaseRequest(model databaseResourceModel, ctx context.Context
req.JavaVersion = int64To32(model.JavaVersion)
req.NumEdgeNodes = int64To32(model.NumEdgeNodes)
+ if model.AutoScalingParameters != nil {
+ tflog.Info(ctx, fmt.Sprintf("Autoscaling parameters %+v.", model.AutoScalingParameters))
+ req.AutoScalingParameters = createAutoScalingParameters(*model.AutoScalingParameters, ctx)
+ }
+
+ if model.AttachedStorageForWorkers != nil {
+ req.AttachedStorageForWorkers = createAttachedStorageForWorkers(*model.AttachedStorageForWorkers, ctx)
+ }
+
+ req.DisableKerberos = model.DisableKerberos.ValueBool()
+ req.DisableJwtAuth = model.DisableJwtAuth.ValueBool()
+
+ if model.Image != nil {
+ req.Image = createImage(*model.Image, ctx)
+ }
+
+ req.EnableGrafana = model.EnableGrafana.ValueBool()
+
+ req.CustomUserTags = createCustomUserTags(ctx, model.CustomUserTags)
+ req.EnableRegionCanary = model.EnableRegionCanary.ValueBool()
+
+ req.Recipes = createRecipes(ctx, model.Recipes)
+ req.StorageLocation = model.StorageLocation.ValueString()
+
tflog.Debug(ctx, fmt.Sprintf("Conversion from databaseResourceModel to CreateDatabaseRequest has finished with request: %+v.", req))
return &req
}
+func createAutoScalingParameters(autoScalingParameters AutoScalingParametersStruct, ctx context.Context) *opdbmodels.AutoScalingParameters {
+ return &opdbmodels.AutoScalingParameters{
+ TargetedValueForMetric: autoScalingParameters.TargetedValueForMetric.ValueInt64(),
+ MaxWorkersForDatabase: int64To32(autoScalingParameters.MaxWorkersForDatabase),
+ MaxWorkersPerBatch: int64To32(autoScalingParameters.MaxWorkersPerBatch),
+ MinWorkersForDatabase: int64To32(autoScalingParameters.MinWorkersForDatabase),
+ EvaluationPeriod: autoScalingParameters.EvaluationPeriod.ValueInt64(),
+ MinimumBlockCacheGb: int64To32(autoScalingParameters.MinimumBlockCacheGb),
+
+ MaxCPUUtilization: int64To32(autoScalingParameters.MaxCPUUtilization),
+ MaxComputeNodesForDatabase: int64To32Pointer(autoScalingParameters.MaxComputeNodesForDatabase),
+ MinComputeNodesForDatabase: int64To32Pointer(autoScalingParameters.MinComputeNodesForDatabase),
+ MaxHdfsUsagePercentage: int64To32(autoScalingParameters.MaxHdfsUsagePercentage),
+ MaxRegionsPerRegionServer: int64To32(autoScalingParameters.MaxRegionsPerRegionServer),
+ }
+}
+
+func createAttachedStorageForWorkers(attachedStorageForWorkers AttachedStorageForWorkersStruct, ctx context.Context) *opdbmodels.AttachedStorageForWorkers {
+ return &opdbmodels.AttachedStorageForWorkers{
+ VolumeCount: int64To32(attachedStorageForWorkers.VolumeCount),
+ VolumeSize: int64To32(attachedStorageForWorkers.VolumeSize),
+ VolumeType: opdbmodels.VolumeType(attachedStorageForWorkers.VolumeType.ValueString()),
+ }
+}
+
+func createImage(image Image, ctx context.Context) *opdbmodels.Image {
+ return &opdbmodels.Image{
+ ID: image.ID.ValueStringPointer(),
+ Catalog: image.Catalog.ValueStringPointer(),
+ }
+}
+
+func createCustomUserTags(ctx context.Context, keyValuePairs []KeyValuePair) []*opdbmodels.KeyValuePair {
+ var kvList []*opdbmodels.KeyValuePair
+ for _, vrs := range keyValuePairs {
+ tflog.Debug(ctx, fmt.Sprintf("Converting KeyValuePair: %+v.", vrs))
+ kvList = append(kvList, createKeyValuePair(vrs))
+ }
+ return kvList
+}
+
+func createKeyValuePair(keyValuePair KeyValuePair) *opdbmodels.KeyValuePair {
+ return &opdbmodels.KeyValuePair{
+ Key: keyValuePair.Key.ValueString(),
+ Value: keyValuePair.Value.ValueString(),
+ }
+}
+
+func createRecipes(ctx context.Context, recipes []Recipe) []*opdbmodels.CustomRecipe {
+ var recipeList []*opdbmodels.CustomRecipe
+ for _, vrs := range recipes {
+ tflog.Debug(ctx, fmt.Sprintf("Converting KeyValuePair: %+v.", vrs))
+ recipeList = append(recipeList, createRecipe(vrs))
+ }
+ return recipeList
+}
+
+func createRecipe(customRecipe Recipe) *opdbmodels.CustomRecipe {
+ return &opdbmodels.CustomRecipe{
+ InstanceGroup: opdbmodels.NewInstanceGroupType(opdbmodels.InstanceGroupType(customRecipe.InstanceGroup.ValueString())),
+ Names: utils.FromSetValueToStringList(customRecipe.Names),
+ }
+}
+
+func int64To32Pointer(in types.Int64) *int32 {
+ n64 := in.ValueInt64()
+ var n2 = int32(n64)
+ return &n2
+}
+
func int64To32(in types.Int64) int32 {
n64 := in.ValueInt64()
return int32(n64)
diff --git a/resources/opdb/converter_test.go b/resources/opdb/converter_test.go
new file mode 100644
index 00000000..c86dc3df
--- /dev/null
+++ b/resources/opdb/converter_test.go
@@ -0,0 +1,191 @@
+// Copyright 2024 Cloudera. All Rights Reserved.
+//
+// This file is licensed under the Apache License Version 2.0 (the "License").
+// You may not use this file except in compliance with the License.
+// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
+//
+// This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+// OF ANY KIND, either express or implied. Refer to the License for the specific
+// permissions and limitations governing your use of the file.
+
+package opdb
+
+import (
+ "context"
+ "testing"
+
+ "github.com/cloudera/terraform-provider-cdp/utils/test"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func TestFromSimplestModelToRequestBasicFields(t *testing.T) {
+ input := databaseResourceModel{
+ DatabaseName: types.StringValue("someName"),
+ Environment: types.StringValue("someEnvironment"),
+ }
+ got := fromModelToDatabaseRequest(input, context.TODO())
+
+ test.CompareStrings(*got.DatabaseName, input.DatabaseName.ValueString(), t)
+ test.CompareStrings(*got.EnvironmentName, input.Environment.ValueString(), t)
+
+ // Default false values
+ test.CompareBools(got.DisableExternalDB, false, t)
+ test.CompareBools(got.DisableMultiAz, false, t)
+ test.CompareBools(got.DisableKerberos, false, t)
+ test.CompareBools(got.DisableJwtAuth, false, t)
+ test.CompareBools(got.EnableGrafana, false, t)
+ test.CompareBools(got.EnableRegionCanary, false, t)
+}
+
+func TestFromModelToRequestMoreFields(t *testing.T) {
+ input := databaseResourceModel{
+ DatabaseName: types.StringValue("someName"),
+ Environment: types.StringValue("someEnvironment"),
+ ScaleType: types.StringValue("MICRO"),
+ StorageType: types.StringValue("SSD"),
+ DisableExternalDB: types.BoolValue(true),
+ DisableMultiAz: types.BoolValue(true),
+ SubnetID: types.StringValue("someSubnetID"),
+ JavaVersion: types.Int64Value(11),
+ NumEdgeNodes: types.Int64Value(2),
+
+ DisableKerberos: types.BoolValue(true),
+ DisableJwtAuth: types.BoolValue(true),
+ EnableGrafana: types.BoolValue(true),
+ EnableRegionCanary: types.BoolValue(true),
+ StorageLocation: types.StringValue("someStorageLocation"),
+ }
+ got := fromModelToDatabaseRequest(input, context.TODO())
+
+ test.CompareStrings(*got.DatabaseName, input.DatabaseName.ValueString(), t)
+ test.CompareStrings(*got.EnvironmentName, input.Environment.ValueString(), t)
+ test.CompareStrings(string(got.ScaleType), input.ScaleType.ValueString(), t)
+ test.CompareStrings(string(got.StorageType), input.StorageType.ValueString(), t)
+
+ test.CompareBools(got.DisableExternalDB, input.DisableExternalDB.ValueBool(), t)
+ test.CompareBools(got.DisableMultiAz, input.DisableMultiAz.ValueBool(), t)
+
+ test.CompareStrings(got.SubnetID, input.SubnetID.ValueString(), t)
+
+ test.CompareInt32PointerToTypesInt64(&got.JavaVersion, input.JavaVersion, t)
+ test.CompareInt32PointerToTypesInt64(&got.NumEdgeNodes, input.NumEdgeNodes, t)
+
+ test.CompareBools(got.DisableKerberos, input.DisableKerberos.ValueBool(), t)
+ test.CompareBools(got.DisableJwtAuth, input.DisableJwtAuth.ValueBool(), t)
+ test.CompareBools(got.EnableGrafana, input.EnableGrafana.ValueBool(), t)
+ test.CompareBools(got.EnableRegionCanary, input.EnableRegionCanary.ValueBool(), t)
+
+ test.CompareStrings(got.StorageLocation, input.StorageLocation.ValueString(), t)
+
+}
+
+func TestCreateAutoScalingParams(t *testing.T) {
+ autoScalingParameters := AutoScalingParametersStruct{
+ TargetedValueForMetric: types.Int64Value(234),
+ MaxWorkersForDatabase: types.Int64Value(4),
+ MaxWorkersPerBatch: types.Int64Value(2),
+ MinWorkersForDatabase: types.Int64Value(3),
+ EvaluationPeriod: types.Int64Value(2400),
+ MinimumBlockCacheGb: types.Int64Value(1),
+
+ MaxCPUUtilization: types.Int64Value(-1),
+ MaxComputeNodesForDatabase: types.Int64Value(-1),
+ MinComputeNodesForDatabase: types.Int64Value(-1),
+ MaxHdfsUsagePercentage: types.Int64Value(80),
+ MaxRegionsPerRegionServer: types.Int64Value(200),
+ }
+
+ got := createAutoScalingParameters(autoScalingParameters, context.TODO())
+
+ test.CompareInt32PointerToTypesInt64(Int64PointerTo32Pointer(&got.TargetedValueForMetric), autoScalingParameters.TargetedValueForMetric, t)
+ test.CompareInt32PointerToTypesInt64(&got.MaxWorkersForDatabase, autoScalingParameters.MaxWorkersForDatabase, t)
+ test.CompareInt32PointerToTypesInt64(&got.MaxWorkersPerBatch, autoScalingParameters.MaxWorkersPerBatch, t)
+ test.CompareInt32PointerToTypesInt64(&got.MinWorkersForDatabase, autoScalingParameters.MinWorkersForDatabase, t)
+ test.CompareInt32PointerToTypesInt64(Int64PointerTo32Pointer(&got.EvaluationPeriod), autoScalingParameters.EvaluationPeriod, t)
+ test.CompareInt32PointerToTypesInt64(&got.MinimumBlockCacheGb, autoScalingParameters.MinimumBlockCacheGb, t)
+
+ test.CompareInt32PointerToTypesInt64(&got.MaxCPUUtilization, autoScalingParameters.MaxCPUUtilization, t)
+ test.CompareInt32PointerToTypesInt64(got.MaxComputeNodesForDatabase, autoScalingParameters.MaxComputeNodesForDatabase, t)
+ test.CompareInt32PointerToTypesInt64(got.MinComputeNodesForDatabase, autoScalingParameters.MinComputeNodesForDatabase, t)
+ test.CompareInt32PointerToTypesInt64(&got.MaxHdfsUsagePercentage, autoScalingParameters.MaxHdfsUsagePercentage, t)
+ test.CompareInt32PointerToTypesInt64(&got.MaxRegionsPerRegionServer, autoScalingParameters.MaxRegionsPerRegionServer, t)
+}
+
+func Int64PointerTo32Pointer(in *int64) *int32 {
+ var n2 = int32(*in)
+ return &n2
+}
+
+func TestCreateAttachedStorageForWorkers(t *testing.T) {
+ attachedStorageForWorkers := AttachedStorageForWorkersStruct{
+ VolumeCount: types.Int64Value(2),
+ VolumeSize: types.Int64Value(2024),
+ VolumeType: types.StringValue("LOCAL_SSD"),
+ }
+
+ got := createAttachedStorageForWorkers(attachedStorageForWorkers, context.TODO())
+
+ test.CompareInt32PointerToTypesInt64(&got.VolumeCount, attachedStorageForWorkers.VolumeCount, t)
+ test.CompareInt32PointerToTypesInt64(&got.VolumeSize, attachedStorageForWorkers.VolumeSize, t)
+ test.CompareStrings(string(got.VolumeType), attachedStorageForWorkers.VolumeType.ValueString(), t)
+}
+
+func TestCreateImage(t *testing.T) {
+ image := Image{
+ ID: types.StringValue("FOO"),
+ Catalog: types.StringValue("BAR"),
+ }
+
+ got := createImage(image, context.TODO())
+
+ test.CompareStrings(*got.ID, image.ID.ValueString(), t)
+ test.CompareStrings(*got.Catalog, image.Catalog.ValueString(), t)
+}
+
+func TestCreateCustomUserTags(t *testing.T) {
+ var tags []KeyValuePair
+
+ a := KeyValuePair{
+ Key: types.StringValue("k1"),
+ Value: types.StringValue("v1"),
+ }
+ b := KeyValuePair{
+ Key: types.StringValue("k2"),
+ Value: types.StringValue("v2"),
+ }
+ tags = append(tags, a)
+ tags = append(tags, b)
+
+ got := createCustomUserTags(context.TODO(), tags)
+
+ test.CompareStrings(got[0].Key, a.Key.ValueString(), t)
+ test.CompareStrings(got[0].Value, a.Value.ValueString(), t)
+
+ test.CompareStrings(got[1].Key, b.Key.ValueString(), t)
+ test.CompareStrings(got[1].Value, b.Value.ValueString(), t)
+}
+
+func TestCreateRecipes(t *testing.T) {
+ var recipes []Recipe
+ recipesA, _ := types.SetValue(types.StringType, []attr.Value{types.StringValue("recipeA1"), types.StringValue("recipeA2")})
+ recipesB, _ := types.SetValue(types.StringType, []attr.Value{types.StringValue("recipeB1"), types.StringValue("recipeB2")})
+
+ a := Recipe{
+ Names: recipesA,
+ InstanceGroup: types.StringValue("i1"),
+ }
+ b := Recipe{
+ Names: recipesB,
+ InstanceGroup: types.StringValue("i2"),
+ }
+ recipes = append(recipes, a)
+ recipes = append(recipes, b)
+
+ got := createRecipes(context.TODO(), recipes)
+
+ test.CompareStrings(string(*got[0].InstanceGroup), a.InstanceGroup.ValueString(), t)
+ test.CompareStringValueSlices(got[0].Names, a.Names.Elements(), t)
+ test.CompareStrings(string(*got[1].InstanceGroup), b.InstanceGroup.ValueString(), t)
+ test.CompareStringValueSlices(got[1].Names, b.Names.Elements(), t)
+}
diff --git a/resources/opdb/model_database.go b/resources/opdb/model_database.go
index 5495eeb4..7dc953fe 100644
--- a/resources/opdb/model_database.go
+++ b/resources/opdb/model_database.go
@@ -32,4 +32,52 @@ type databaseResourceModel struct {
NumEdgeNodes types.Int64 `tfsdk:"num_edge_nodes"`
JavaVersion types.Int64 `tfsdk:"java_version"`
SubnetID types.String `tfsdk:"subnet_id"`
+
+ AutoScalingParameters *AutoScalingParametersStruct `tfsdk:"auto_scaling_parameters"`
+ AttachedStorageForWorkers *AttachedStorageForWorkersStruct `tfsdk:"attached_storage_for_workers"`
+
+ DisableKerberos types.Bool `tfsdk:"disable_kerberos"`
+ DisableJwtAuth types.Bool `tfsdk:"disable_jwt_auth"`
+ Image *Image `tfsdk:"image"`
+ EnableGrafana types.Bool `tfsdk:"enable_grafana"`
+
+ CustomUserTags []KeyValuePair `tfsdk:"custom_user_tags"`
+ EnableRegionCanary types.Bool `tfsdk:"enable_region_canary"`
+ Recipes []Recipe `tfsdk:"recipes"`
+}
+
+type AutoScalingParametersStruct struct {
+ TargetedValueForMetric types.Int64 `tfsdk:"targeted_value_for_metric"`
+ MaxWorkersForDatabase types.Int64 `tfsdk:"max_workers_for_database"`
+ MaxWorkersPerBatch types.Int64 `tfsdk:"max_workers_per_batch"`
+ MinWorkersForDatabase types.Int64 `tfsdk:"min_workers_for_database"`
+ EvaluationPeriod types.Int64 `tfsdk:"evaluation_period"`
+ MinimumBlockCacheGb types.Int64 `tfsdk:"minimum_block_cache_gb"`
+
+ MaxHdfsUsagePercentage types.Int64 `tfsdk:"max_hdfs_usage_percentage"`
+ MaxRegionsPerRegionServer types.Int64 `tfsdk:"max_regions_per_region_server"`
+ MaxCPUUtilization types.Int64 `tfsdk:"max_cpu_utilization"`
+ MaxComputeNodesForDatabase types.Int64 `tfsdk:"max_compute_nodes_for_database"`
+ MinComputeNodesForDatabase types.Int64 `tfsdk:"min_compute_nodes_for_database"`
+}
+
+type AttachedStorageForWorkersStruct struct {
+ VolumeCount types.Int64 `tfsdk:"volume_count"`
+ VolumeSize types.Int64 `tfsdk:"volume_size"`
+ VolumeType types.String `tfsdk:"volume_type"`
+}
+
+type Image struct {
+ ID types.String `tfsdk:"id"`
+ Catalog types.String `tfsdk:"catalog"`
+}
+
+type KeyValuePair struct {
+ Key types.String `tfsdk:"key"`
+ Value types.String `tfsdk:"value"`
+}
+
+type Recipe struct {
+ Names types.Set `tfsdk:"names"`
+ InstanceGroup types.String `tfsdk:"instance_group"`
}
diff --git a/resources/opdb/resource_database.go b/resources/opdb/resource_database.go
index 71a796c7..19f839fd 100644
--- a/resources/opdb/resource_database.go
+++ b/resources/opdb/resource_database.go
@@ -138,6 +138,18 @@ func getCommonDatabaseDetails(data *databaseResourceModel, databaseDetails *opdb
data.StorageLocation = types.StringValue(databaseDetails.StorageLocation)
data.NumEdgeNodes = types.Int64Value(int64(databaseDetails.DbEdgeNodeCount))
+
+ if len(databaseDetails.StorageDetailsForWorkers) >= 1 {
+ data.AttachedStorageForWorkers = createStorageDetailsForWorkers(databaseDetails.StorageDetailsForWorkers[0])
+ }
+}
+
+func createStorageDetailsForWorkers(storageDetailsForWorker *opdbmodels.StorageDetailsForWorker) *AttachedStorageForWorkersStruct {
+ return &AttachedStorageForWorkersStruct{
+ VolumeCount: types.Int64Value(int64(storageDetailsForWorker.VolumeCount)),
+ VolumeSize: types.Int64Value(int64(storageDetailsForWorker.VolumeSize)),
+ VolumeType: types.StringValue(string(storageDetailsForWorker.VolumeType)),
+ }
}
func (r *databaseResource) Update(ctx context.Context, _ resource.UpdateRequest, _ *resource.UpdateResponse) {
diff --git a/resources/opdb/schema_database.go b/resources/opdb/schema_database.go
index f8a76391..2188b88b 100644
--- a/resources/opdb/schema_database.go
+++ b/resources/opdb/schema_database.go
@@ -15,9 +15,14 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+
"github.com/cloudera/terraform-provider-cdp/utils"
)
@@ -44,7 +49,7 @@ func (r *databaseResource) Schema(_ context.Context, _ resource.SchemaRequest, r
Optional: true,
},
"disable_external_db": schema.BoolAttribute{
- MarkdownDescription: "Disable external database creation or not, it is only available in the BETA cdpcli",
+ MarkdownDescription: "Disable external database creation or not. It is only available in the BETA cdpcli.",
Optional: true,
},
"disable_multi_az": schema.BoolAttribute{
@@ -62,13 +67,152 @@ func (r *databaseResource) Schema(_ context.Context, _ resource.SchemaRequest, r
Default: int64default.StaticInt64(0),
},
"java_version": schema.Int64Attribute{
- MarkdownDescription: "Java version, it is only available in the BETA cdpcli",
+ MarkdownDescription: "Java version. It is only available in the BETA cdpcli.",
Optional: true,
},
"storage_location": schema.StringAttribute{
- MarkdownDescription: "Storage Location for OPDB",
+ MarkdownDescription: "Storage Location for OPDB. It is only available in the BETA cdpcli.",
Computed: true,
+ Optional: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "auto_scaling_parameters": schema.SingleNestedAttribute{
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "targeted_value_for_metric": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The target value of the metric a user expect to maintain for the cluster",
+ },
+ "max_workers_for_database": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "Maximum number of worker nodes as per this metrics can be scaled up to.",
+ },
+ "max_workers_per_batch": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "Maximum number of worker nodes as per this metrics can be scaled up to in one batch.",
+ },
+ "min_workers_for_database": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "Minimum number of worker nodes as per this metrics can be scaled down to.",
+ },
+ "evaluation_period": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "Period of metrics(in seconds) needs to be considered.",
+ },
+ "minimum_block_cache_gb": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The amount of block cache, in Gigabytes, which the database should have.",
+ },
+ "max_hdfs_usage_percentage": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The maximum percentage of HDFS utilization for the database before we trigger the scaling. It is only available in the BETA cdpcli.",
+ },
+ "max_regions_per_region_server": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The maximum number of regions per region server. It is only available in the BETA cdpcli.",
+ },
+ "max_cpu_utilization": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The maximum percentage threshold for the CPU utilization of the worker nodes. The CPU utilization is obtained from the Cloudera Manager metric ‘cpu_percent’ across worker nodes. Set 100 or more to disable the CPU metrics. It is only available in the BETA cdpcli.",
+ },
+ "max_compute_nodes_for_database": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The maximum number of compute nodes, as per these metrics, that can be scaled up to. It is only available in the BETA cdpcli.",
+ },
+ "min_compute_nodes_for_database": schema.Int64Attribute{
+ Optional: true,
+ MarkdownDescription: "The minimum number of compute nodes, as per these metrics, that can be scaled down to. It is only available in the BETA cdpcli.",
+ },
+ },
+ },
+ "attached_storage_for_workers": schema.SingleNestedAttribute{
+ Optional: true,
+ MarkdownDescription: "Attached storage for the worker nodes for AWS, Azure, and GCP cloud providers.",
+ Attributes: map[string]schema.Attribute{
+ "volume_count": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ MarkdownDescription: "The number of Volumes. Default is 4. Valid Range: Minimum value of 1, maximum value 8.",
+ Default: int64default.StaticInt64(4),
+ },
+ "volume_size": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ MarkdownDescription: "The target size of the volume, in GiB. Default is 2048.",
+ Default: int64default.StaticInt64(2048),
+ },
+ "volume_type": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ MarkdownDescription: "Volume Type. HDD - Hard disk drives (HDD) volume type. Default is HDD. SSD - Solid disk drives (SSD) volume type. LOCAL_SSD - Local SSD volume type.",
+ Default: stringdefault.StaticString("HDD"),
+ },
+ },
+ },
+ "disable_kerberos": schema.BoolAttribute{
+ MarkdownDescription: "Disable Kerberos authentication. ",
+ Optional: true,
+ },
+ "disable_jwt_auth": schema.BoolAttribute{
+ MarkdownDescription: "Disable OAuth Bearer (JWT) authentication scheme. ",
+ Optional: true,
+ },
+ "image": schema.SingleNestedAttribute{
+ Optional: true,
+ MarkdownDescription: "Details of an Image.",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Required: true,
+ MarkdownDescription: "Image ID for the database.",
+ },
+ "catalog": schema.StringAttribute{
+ Required: true,
+ MarkdownDescription: "Catalog name for the image.",
+ },
+ },
+ },
+
+ "enable_grafana": schema.BoolAttribute{
+ MarkdownDescription: "To enable grafana server for the database.",
+ Optional: true,
+ },
+ "custom_user_tags": schema.SetNestedAttribute{
+ Optional: true,
+ MarkdownDescription: "Optional tags to apply to launched infrastructure resources",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "key": schema.StringAttribute{
+ Required: true,
+ },
+ "value": schema.StringAttribute{
+ Required: true,
+ },
+ },
+ },
+ },
+ "enable_region_canary": schema.BoolAttribute{
+ MarkdownDescription: "To enable the region canary for the database.",
+ Optional: true,
+ },
+ "recipes": schema.SetNestedAttribute{
+ Optional: true,
+ MarkdownDescription: "Custom recipes for the database.",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "names": schema.SetAttribute{
+ MarkdownDescription: "The set of recipe names that are going to be applied on the given instance group.",
+ ElementType: types.StringType,
+ Required: true,
+ },
+ "instance_group": schema.StringAttribute{
+ MarkdownDescription: "The name of the designated instance group.",
+ Required: true,
+ },
+ },
+ },
},
})
resp.Schema = schema.Schema{