From 292f74cf440aa527b71c5dc7f367f1d690e339f4 Mon Sep 17 00:00:00 2001 From: Attila Laszlo Tabori Date: Fri, 13 May 2022 11:35:17 +0200 Subject: [PATCH 1/5] added kms_instance_id and crk to vpc cluster, vpc workerpool resources and to vpc workerpool datasource, for boot volume encryption --- go.mod | 2 +- go.sum | 7 +++ ibm/acctest/acctest.go | 18 ++++++ ...ta_source_ibm_container_vpc_worker_pool.go | 13 ++++- ...urce_ibm_container_vpc_worker_pool_test.go | 27 +++++++++ .../resource_ibm_container_vpc_cluster.go | 27 +++++++-- ...resource_ibm_container_vpc_cluster_test.go | 58 +++++++++++++++++++ .../resource_ibm_container_vpc_worker_pool.go | 37 ++++++++---- ...urce_ibm_container_vpc_worker_pool_test.go | 55 +++++++++++++++++- .../d/container_vpc_cluster.html.markdown | 2 + .../d/container_worker_pool.html.markdown | 2 + 11 files changed, 230 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index ef90dabf46..49925b2cb4 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/IBM-Cloud/terraform-provider-ibm go 1.16 require ( - github.com/IBM-Cloud/bluemix-go v0.0.0-20220407050707-b4cd0d4da813 + github.com/IBM-Cloud/bluemix-go v0.0.0-20220512101601-701829d22dbc github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20210705152127-41ca00fc9a62 github.com/IBM-Cloud/power-go-client v1.1.4 github.com/IBM/apigateway-go-sdk v0.0.0-20210714141226-a5d5d49caaca diff --git a/go.sum b/go.sum index d2e8bd4c1f..7d8436e1e4 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/IBM-Cloud/bluemix-go v0.0.0-20220407050707-b4cd0d4da813 h1:UgPApMMM6SglqB+U/EaFHyaoyaEM16RzxyiVah70g4o= github.com/IBM-Cloud/bluemix-go v0.0.0-20220407050707-b4cd0d4da813/go.mod h1:UOhxo7T8CdX6sdTY9Dn7rJSgyoTlz1KM9641XcPraH0= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220506091544-f67089f8d5d9 h1:zxeiJfE5hRMUH/h4hd+g9M98a4DRfUrD8nNbcteSlrc= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220506091544-f67089f8d5d9/go.mod h1:UOhxo7T8CdX6sdTY9Dn7rJSgyoTlz1KM9641XcPraH0= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220510085630-12dc05e70f29 h1:42c4MfIYppIOBymVz0WHNN3WO9zNPH+9aFNDP2ektYg= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220510085630-12dc05e70f29/go.mod h1:tfNN3lCKuA2+SQvndt0+5CjPr2qn/wdNLjrue1GrOhY= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220512101601-701829d22dbc h1:X3zq0LZyqBzsiX5KJvwzxH7Btl6M/oHqskS/2tL4B5A= +github.com/IBM-Cloud/bluemix-go v0.0.0-20220512101601-701829d22dbc/go.mod h1:tfNN3lCKuA2+SQvndt0+5CjPr2qn/wdNLjrue1GrOhY= github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20210705152127-41ca00fc9a62 h1:MOkcr6qQGk4tY542ZJ1DggVh2WUP72EEyLB79llFVH8= github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20210705152127-41ca00fc9a62/go.mod h1:xUQL9SGAjoZFd4GNjrjjtEpjpkgU7RFXRyHesbKTjiY= github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.5.3/go.mod h1:RiUvKuHKTBmBApDMUQzBL14pQUGKcx/IioKQPIcRQjs= @@ -660,6 +666,7 @@ github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= diff --git a/ibm/acctest/acctest.go b/ibm/acctest/acctest.go index a89e19aa5d..a0fe5d0fe9 100644 --- a/ibm/acctest/acctest.go +++ b/ibm/acctest/acctest.go @@ -90,10 +90,13 @@ var HpcsAdmin2 string var HpcsToken2 string var RealmName string var IksSa string +var IksClusterID string var IksClusterVpcID string var IksClusterSubnetID string var IksClusterResourceGroupID string var IcdDbRegion string +var KmsInstanceID string +var CrkID string // For Power Colo @@ -877,6 +880,21 @@ func init() { if ClusterName == "" { fmt.Println("[INFO] Set the environment variable IBM_CONTAINER_CLUSTER_NAME for ibm_container_nlb_dns resource or datasource else tests will fail if this is not set correctly") } + + KmsInstanceID = os.Getenv("IBM_KMS_INSTANCE_ID") + if KmsInstanceID == "" { + fmt.Println("[INFO] Set the environment variable IBM_KMS_INSTANCE_ID for ibm_container_vpc_cluster resource or datasource else tests will fail if this is not set correctly") + } + + CrkID = os.Getenv("IBM_CRK_ID") + if CrkID == "" { + fmt.Println("[INFO] Set the environment variable IBM_CRK_ID for ibm_container_vpc_cluster resource or datasource else tests will fail if this is not set correctly") + } + + IksClusterID = os.Getenv("IBM_CLUSTER_ID") + if IksClusterID == "" { + fmt.Println("[INFO] Set the environment variable IBM_CLUSTER_ID for ibm_container_vpc_worker_pool resource or datasource else tests will fail if this is not set correctly") + } } var TestAccProviders map[string]*schema.Provider diff --git a/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool.go index 95ba423f11..ce44fae1b7 100644 --- a/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool.go @@ -64,6 +64,14 @@ func DataSourceIBMContainerVpcClusterWorkerPool() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "kms_instance_id": { + Type: schema.TypeString, + Computed: true, + }, + "crk": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -98,13 +106,16 @@ func dataSourceIBMContainerVpcClusterWorkerPoolRead(d *schema.ResourceData, meta d.Set("worker_pool_name", workerPool.PoolName) d.Set("flavor", workerPool.Flavor) d.Set("worker_count", workerPool.WorkerCount) - d.Set("provider", workerPool.Provider) d.Set("labels", workerPool.Labels) d.Set("zones", zones) d.Set("cluster", clusterName) d.Set("vpc_id", workerPool.VpcID) d.Set("isolation", workerPool.Isolation) d.Set("resource_group_id", targetEnv.ResourceGroup) + if workerPool.WorkerVolumeEncryption != nil { + d.Set("kms_instance_id", workerPool.WorkerVolumeEncryption.KmsInstanceID) + d.Set("crk", workerPool.WorkerVolumeEncryption.WorkerVolumeCRKID) + } d.SetId(workerPool.ID) return nil } diff --git a/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool_test.go index 6e23da9cab..b3ec53ebc1 100644 --- a/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/data_source_ibm_container_vpc_worker_pool_test.go @@ -37,3 +37,30 @@ func testAccCheckIBMContainerVPCClusterWorkerPoolDataSourceConfig(name string) s } ` } + +func TestAccIBMContainerVPCClusterWorkerPoolDataSourceEnvvar(t *testing.T) { + name := fmt.Sprintf("tf-vpc-wp-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerVPCClusterWorkerPoolDataSourceEnvvar(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_container_vpc_cluster_worker_pool.testacc_ds_worker_pool", "id"), + resource.TestCheckResourceAttr("data.ibm_container_vpc_cluster_worker_pool.testacc_ds_worker_pool", "crk", acc.CrkID), + resource.TestCheckResourceAttr("data.ibm_container_vpc_cluster_worker_pool.testacc_ds_worker_pool", "kms_instance_id", acc.KmsInstanceID), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerVPCClusterWorkerPoolDataSourceEnvvar(name string) string { + return testAccCheckIBMVpcContainerWorkerPoolEnvvar(name) + ` + data "ibm_container_vpc_cluster_worker_pool" "testacc_ds_worker_pool" { + cluster = "${ibm_container_vpc_worker_pool.test_pool.cluster}" + worker_pool_name = "${ibm_container_vpc_worker_pool.test_pool.worker_pool_name}" + } +` +} diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go index 5dcb015e76..774350e393 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go @@ -276,6 +276,16 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { Computed: true, Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", }, + "kms_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "Instance ID for boot volume encryption", + }, + "crk": { + Type: schema.TypeString, + Optional: true, + Description: "Root Key ID for boot volume encryption", + }, //Get Cluster info Request "state": { @@ -472,11 +482,20 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface } } + kmsid := d.Get("kms_instance_id").(string) + crk := d.Get("crk").(string) + + wve := v2.WorkerVolumeEncryption{ + KmsInstanceID: kmsid, + WorkerVolumeCRKID: crk, + } + workerpool := v2.WorkerPoolConfig{ - VpcID: vpcID, - Flavor: flavor, - WorkerCount: workerCount, - Zones: zonesList, + VpcID: vpcID, + Flavor: flavor, + WorkerCount: workerCount, + Zones: zonesList, + WorkerVolumeEncryption: &wve, } if l, ok := d.GetOk("worker_labels"); ok { diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go index c1a038c08b..979ef046a8 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go @@ -365,3 +365,61 @@ func testAccCheckIBMContainerVpcClusterImageSecuritySetting(name, setting string image_security_enforcement = %s }`, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.SubnetID, setting) } + +// This test is here to help to focus on given resources, but requires everything else existing already +func TestAccIBMContainerVpcClusterEnvvar(t *testing.T) { + name := fmt.Sprintf("tf-vpc-cluster-%d", acctest.RandIntRange(10, 100)) + var conf *v2.ClusterInfo + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMContainerVpcClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerVpcClusterEnvvar(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMContainerVpcExists("ibm_container_vpc_cluster.cluster", conf), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "name", name), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "worker_count", "1"), + ), + }, + { + ResourceName: "ibm_container_vpc_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "wait_till", "update_all_workers", "kms_config", "force_delete_storage", "wait_for_worker_update", "kms_instance_id", "crk"}, + }, + }, + }) +} + +// You need to set up env vars: +// export IBM_CLUSTER_VPC_ID +// export IBM_CLUSTER_VPC_SUBNET_ID +// export IBM_CLUSTER_VPC_RESOURCE_GROUP_ID +// export IBM_KMS_INSTANCE_ID +// export IBM_CRK_ID +// for acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, acc.KmsInstanceID, acc.CrkID +func testAccCheckIBMContainerVpcClusterEnvvar(name string) string { + config := fmt.Sprintf(` + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + resource_group_id = "%[3]s" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + kms_instance_id = "%[5]s" + crk = "%[6]s" + } + `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, acc.KmsInstanceID, acc.CrkID) + fmt.Println(config) + return config +} diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index 2097faff33..eab24b8685 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -151,6 +151,16 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Computed: true, Description: "Resource Controller URL", }, + "kms_instance_id": { + Type: schema.TypeString, + Optional: true, + Description: "Instance ID for boot volume encryption", + }, + "crk": { + Type: schema.TypeString, + Optional: true, + Description: "Root Key ID for boot volume encryption", + }, }, } } @@ -194,19 +204,20 @@ func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interf } - // for _, e := range d.Get("zones").(*schema.Set).List() { - // value := e.(map[string]interface{}) - // id := value["id"].(string) - // subnetid := value["subnet_id"].(string) - - // } + kmsid := d.Get("kms_instance_id").(string) + crk := d.Get("crk").(string) + wve := v2.WorkerVolumeEncryption{ + KmsInstanceID: kmsid, + WorkerVolumeCRKID: crk, + } workerPoolConfig := v2.WorkerPoolConfig{ - Name: d.Get("worker_pool_name").(string), - VpcID: d.Get("vpc_id").(string), - Flavor: d.Get("flavor").(string), - WorkerCount: d.Get("worker_count").(int), - Zones: zone, + Name: d.Get("worker_pool_name").(string), + VpcID: d.Get("vpc_id").(string), + Flavor: d.Get("flavor").(string), + WorkerCount: d.Get("worker_count").(int), + Zones: zone, + WorkerVolumeEncryption: &wve, } if l, ok := d.GetOk("labels"); ok { @@ -463,6 +474,10 @@ func resourceIBMContainerVpcWorkerPoolRead(d *schema.ResourceData, meta interfac if workerPool.Taints != nil { d.Set("taints", flattenWorkerPoolTaints(workerPool)) } + if workerPool.WorkerVolumeEncryption != nil { + d.Set("kms_instance_id", workerPool.WorkerVolumeEncryption.KmsInstanceID) + d.Set("crk", workerPool.WorkerVolumeEncryption.WorkerVolumeCRKID) + } controller, err := flex.GetBaseController(meta) if err != nil { return err diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go index 5980bfe32a..606aaedb9d 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go @@ -80,9 +80,12 @@ func testAccCheckIBMVpcContainerWorkerPoolDestroy(s *terraform.State) error { target := v2.ClusterTargetHeader{} // Try to find the key - _, err = wpClient.WorkerPools().GetWorkerPool(cluster, workerPoolID, target) + wp, err := wpClient.WorkerPools().GetWorkerPool(cluster, workerPoolID, target) if err == nil { + if wp.ActualState == "deleted" && wp.DesiredState == "deleted" { + return nil + } return fmt.Errorf("Worker pool still exists: %s", rs.Primary.ID) } else if !strings.Contains(err.Error(), "404") { return fmt.Errorf("[ERROR] Error waiting for worker pool (%s) to be destroyed: %s", rs.Primary.ID, err) @@ -207,3 +210,53 @@ func testAccCheckIBMVpcContainerWorkerPoolUpdate(name string) string { } `, name) } + +func TestAccIBMContainerVpcClusterWorkerPoolEnvvar(t *testing.T) { + + name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMVpcContainerWorkerPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMVpcContainerWorkerPoolEnvvar(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "flavor", "bx2.4x16"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "kms_instance_id", acc.KmsInstanceID), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "crk", acc.CrkID), + ), + }, + { + ResourceName: "ibm_container_vpc_worker_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "kms_instance_id", "crk"}, + }, + }, + }) +} + +func testAccCheckIBMVpcContainerWorkerPoolEnvvar(name string) string { + return fmt.Sprintf(` + resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = "%[2]s" + worker_pool_name = "%[1]s" + flavor = "bx2.4x16" + vpc_id = "%[3]s" + worker_count = 1 + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + kms_instance_id = "%[5]s" + crk = "%[6]s" + } + `, name, acc.IksClusterID, acc.IksClusterVpcID, acc.IksClusterSubnetID, acc.KmsInstanceID, acc.CrkID) +} diff --git a/website/docs/d/container_vpc_cluster.html.markdown b/website/docs/d/container_vpc_cluster.html.markdown index b9ec549c90..7133b6c76e 100644 --- a/website/docs/d/container_vpc_cluster.html.markdown +++ b/website/docs/d/container_vpc_cluster.html.markdown @@ -78,3 +78,5 @@ In addition to all argument reference list, you can access the following attribu - `id` - (String) The ID of the subnet that the worker pool is attached to in the zone. - `primary` - (Bool) If set to **true**, the subnet is used as the primary subnet. - `worker_count` - (Integer) The number of worker nodes in this worker pool. +- `crk` - Root Key ID for boot volume encryption. +- `kms_instance_id` - Instance ID for boot volume encryption. diff --git a/website/docs/d/container_worker_pool.html.markdown b/website/docs/d/container_worker_pool.html.markdown index adb2968f33..783b517b3d 100644 --- a/website/docs/d/container_worker_pool.html.markdown +++ b/website/docs/d/container_worker_pool.html.markdown @@ -40,3 +40,5 @@ Review the attribute references that are exported. - `public_vlan` - (String) The ID of the public VLAN. - `worker_count` - (String) Number of workers attached to this zone. - `zone` - (String) Zone name. +- `crk` - Root Key ID for boot volume encryption. +- `kms_instance_id` - Instance ID for boot volume encryption. From 0216a10f6018e2a3014a5dc495d76c861f8a7f24 Mon Sep 17 00:00:00 2001 From: Attila Laszlo Tabori Date: Fri, 20 May 2022 08:21:07 +0200 Subject: [PATCH 2/5] resolving review comments --- .../resource_ibm_container_vpc_cluster.go | 23 +++++++++++------- .../resource_ibm_container_vpc_worker_pool.go | 23 +++++++++++------- .../d/container_vpc_cluster.html.markdown | 2 -- .../r/container_vpc_cluster.html.markdown | 23 ++++++++++++++++++ .../r/container_vpc_worker_pool.html.markdown | 24 ++++++++++++++++++- 5 files changed, 76 insertions(+), 19 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go index 774350e393..61ed64ba34 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go @@ -277,14 +277,16 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", }, "kms_instance_id": { - Type: schema.TypeString, - Optional: true, - Description: "Instance ID for boot volume encryption", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Instance ID for boot volume encryption", }, "crk": { - Type: schema.TypeString, - Optional: true, - Description: "Root Key ID for boot volume encryption", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Root Key ID for boot volume encryption", }, //Get Cluster info Request @@ -482,8 +484,13 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface } } - kmsid := d.Get("kms_instance_id").(string) - crk := d.Get("crk").(string) + var kmsid, crk string + if v, ok := d.GetOk("kms_instance_id"); ok { + kmsid = v.(string) + } + if v, ok := d.GetOk("crk"); ok { + crk = v.(string) + } wve := v2.WorkerVolumeEncryption{ KmsInstanceID: kmsid, diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index eab24b8685..252f23e222 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -152,14 +152,16 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Description: "Resource Controller URL", }, "kms_instance_id": { - Type: schema.TypeString, - Optional: true, - Description: "Instance ID for boot volume encryption", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Instance ID for boot volume encryption", }, "crk": { - Type: schema.TypeString, - Optional: true, - Description: "Root Key ID for boot volume encryption", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Root Key ID for boot volume encryption", }, }, } @@ -204,8 +206,13 @@ func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interf } - kmsid := d.Get("kms_instance_id").(string) - crk := d.Get("crk").(string) + var kmsid, crk string + if v, ok := d.GetOk("kms_instance_id"); ok { + kmsid = v.(string) + } + if v, ok := d.GetOk("crk"); ok { + crk = v.(string) + } wve := v2.WorkerVolumeEncryption{ KmsInstanceID: kmsid, diff --git a/website/docs/d/container_vpc_cluster.html.markdown b/website/docs/d/container_vpc_cluster.html.markdown index 7133b6c76e..b9ec549c90 100644 --- a/website/docs/d/container_vpc_cluster.html.markdown +++ b/website/docs/d/container_vpc_cluster.html.markdown @@ -78,5 +78,3 @@ In addition to all argument reference list, you can access the following attribu - `id` - (String) The ID of the subnet that the worker pool is attached to in the zone. - `primary` - (Bool) If set to **true**, the subnet is used as the primary subnet. - `worker_count` - (Integer) The number of worker nodes in this worker pool. -- `crk` - Root Key ID for boot volume encryption. -- `kms_instance_id` - Instance ID for boot volume encryption. diff --git a/website/docs/r/container_vpc_cluster.html.markdown b/website/docs/r/container_vpc_cluster.html.markdown index 0d5bf2a792..494bf3ac0f 100644 --- a/website/docs/r/container_vpc_cluster.html.markdown +++ b/website/docs/r/container_vpc_cluster.html.markdown @@ -34,6 +34,26 @@ resource "ibm_container_vpc_cluster" "cluster" { } ``` +## Example with boot volume encryption +In the following example, you can create a Gen-2 VPC cluster with a default worker pool with one worker with boot volume encryption: + +```terraform +resource "ibm_container_vpc_cluster" "cluster" { + name = "my_vpc_cluster" + vpc_id = "r006-abb7c7ea-aadf-41bd-94c5-b8521736fadf" + kube_version = "1.17.5" + flavor = "bx2.2x8" + worker_count = "1" + resource_group_id = data.ibm_resource_group.resource_group.id + zones { + subnet_id = "0717-0c0899ce-48ac-4eb6-892d-4e2e1ff8c9478" + name = "us-south-1" + } + kms_instance_id = "8e9056e6-1936-4dd9-a0a1-51d824765e11" + crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" +} +``` + ### VPC Generation 2 Red Hat OpenShift on IBM Cloud cluster with existing OpenShift entitlement Create the Openshift Cluster with default worker pool entitlement with one worker node: @@ -190,6 +210,9 @@ Review the argument references that you can specify for your resource. - `name` - (Required, Forces new resource, String) The zone name for the default worker pool in a multizone cluster. - `subnet_id` - (Required, Forces new resource, String) The VPC subnet to assign the cluster's default worker pool. +- `crk` - Root Key ID for boot volume encryption. +- `kms_instance_id` - Instance ID for boot volume encryption. + **Note** 1. For users on account to add tags to a resource, you need to assign the right access. For more information, about tags, see [Tags permission](https://cloud.ibm.com/docs/account?topic=account-access). diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 73a4f91cfd..423260d5af 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -30,6 +30,26 @@ resource "ibm_container_vpc_worker_pool" "test_pool" { } ``` +In the following example, you can create a worker pool for a vpc cluster with boot volume encryption enabled: + +```terraform +resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = "my_vpc_cluster" + worker_pool_name = "my_vpc_pool" + flavor = "c2.2x4" + vpc_id = "6015365a-9d93-4bb4-8248-79ae0db2dc21" + worker_count = "1" + + zones { + name = "us-south-1" + subnet_id = "015ffb8b-efb1-4c03-8757-29335a07493b" + } + + kms_instance_id = "8e9056e6-1936-4dd9-a0a1-51d824765e11" + crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" +} +``` + In the follwoing example, you can create a worker pool for openshift cluster type with entitlement. ```terraform resource "ibm_container_vpc_worker_pool" "test_pool" { @@ -77,7 +97,9 @@ Review the argument references that you can specify for your resource. Nested scheme for `zones`: - `name` - (Required, String) The name of the zone. - `subnet_id` - (Required, String) The subnet that you want to use for your worker pool. - + +- `crk` - Root Key ID for boot volume encryption. +- `kms_instance_id` - Instance ID for boot volume encryption. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. From 3bbabc5b2a8027975de06b6e18a6ed053af09f54 Mon Sep 17 00:00:00 2001 From: Attila Laszlo Tabori Date: Tue, 24 May 2022 11:38:26 +0200 Subject: [PATCH 3/5] updates --- .../resource_ibm_container_vpc_cluster.go | 36 ++++++++++--------- .../resource_ibm_container_vpc_worker_pool.go | 32 ++++++++--------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go index 61ed64ba34..48f9088bc3 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go @@ -281,12 +281,14 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { Optional: true, DiffSuppressFunc: flex.ApplyOnce, Description: "Instance ID for boot volume encryption", + RequiredWith: []string{"crk"}, }, "crk": { Type: schema.TypeString, Optional: true, DiffSuppressFunc: flex.ApplyOnce, Description: "Root Key ID for boot volume encryption", + RequiredWith: []string{"kms_instance_id"}, }, //Get Cluster info Request @@ -484,25 +486,20 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface } } - var kmsid, crk string - if v, ok := d.GetOk("kms_instance_id"); ok { - kmsid = v.(string) - } - if v, ok := d.GetOk("crk"); ok { - crk = v.(string) - } - - wve := v2.WorkerVolumeEncryption{ - KmsInstanceID: kmsid, - WorkerVolumeCRKID: crk, + workerpool := v2.WorkerPoolConfig{ + VpcID: vpcID, + Flavor: flavor, + WorkerCount: workerCount, + Zones: zonesList, } - workerpool := v2.WorkerPoolConfig{ - VpcID: vpcID, - Flavor: flavor, - WorkerCount: workerCount, - Zones: zonesList, - WorkerVolumeEncryption: &wve, + if v, ok := d.GetOk("kms_instance_id"); ok { + crk := d.Get("crk").(string) + wve := v2.WorkerVolumeEncryption{ + KmsInstanceID: v.(string), + WorkerVolumeCRKID: crk, + } + workerpool.WorkerVolumeEncryption = &wve } if l, ok := d.GetOk("worker_labels"); ok { @@ -1014,6 +1011,11 @@ func resourceIBMContainerVpcClusterRead(d *schema.ResourceData, meta interface{} d.Set(flex.ResourceStatus, cls.State) d.Set(flex.ResourceGroupName, cls.ResourceGroupName) + if workerPool.WorkerVolumeEncryption != nil { + d.Set("crk", workerPool.WorkerVolumeEncryption.WorkerVolumeCRKID) + d.Set("kms_instance_id", workerPool.WorkerVolumeEncryption.KmsInstanceID) + } + return nil } diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index 252f23e222..032708927c 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -156,12 +156,14 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { Optional: true, DiffSuppressFunc: flex.ApplyOnce, Description: "Instance ID for boot volume encryption", + RequiredWith: []string{"crk"}, }, "crk": { Type: schema.TypeString, Optional: true, DiffSuppressFunc: flex.ApplyOnce, Description: "Root Key ID for boot volume encryption", + RequiredWith: []string{"kms_instance_id"}, }, }, } @@ -206,25 +208,21 @@ func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interf } - var kmsid, crk string - if v, ok := d.GetOk("kms_instance_id"); ok { - kmsid = v.(string) - } - if v, ok := d.GetOk("crk"); ok { - crk = v.(string) + workerPoolConfig := v2.WorkerPoolConfig{ + Name: d.Get("worker_pool_name").(string), + VpcID: d.Get("vpc_id").(string), + Flavor: d.Get("flavor").(string), + WorkerCount: d.Get("worker_count").(int), + Zones: zone, } - wve := v2.WorkerVolumeEncryption{ - KmsInstanceID: kmsid, - WorkerVolumeCRKID: crk, - } - workerPoolConfig := v2.WorkerPoolConfig{ - Name: d.Get("worker_pool_name").(string), - VpcID: d.Get("vpc_id").(string), - Flavor: d.Get("flavor").(string), - WorkerCount: d.Get("worker_count").(int), - Zones: zone, - WorkerVolumeEncryption: &wve, + if v, ok := d.GetOk("kms_instance_id"); ok { + crk := d.Get("crk").(string) + wve := v2.WorkerVolumeEncryption{ + KmsInstanceID: v.(string), + WorkerVolumeCRKID: crk, + } + workerPoolConfig.WorkerVolumeEncryption = &wve } if l, ok := d.GetOk("labels"); ok { From e76d61cd7e40f8cb8a5ba2b95cb6386ff711ecc4 Mon Sep 17 00:00:00 2001 From: Attila Laszlo Tabori Date: Wed, 25 May 2022 09:17:19 +0200 Subject: [PATCH 4/5] removed verifyignore of crk and kms in test, format update in reousrce --- .../kubernetes/resource_ibm_container_vpc_cluster_test.go | 2 +- website/docs/r/container_vpc_cluster.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go index 979ef046a8..d2f8d7129b 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go @@ -391,7 +391,7 @@ func TestAccIBMContainerVpcClusterEnvvar(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "wait_till", "update_all_workers", "kms_config", "force_delete_storage", "wait_for_worker_update", "kms_instance_id", "crk"}, + "wait_till", "update_all_workers", "kms_config", "force_delete_storage", "wait_for_worker_update"}, }, }, }) diff --git a/website/docs/r/container_vpc_cluster.html.markdown b/website/docs/r/container_vpc_cluster.html.markdown index 494bf3ac0f..7d5743035f 100644 --- a/website/docs/r/container_vpc_cluster.html.markdown +++ b/website/docs/r/container_vpc_cluster.html.markdown @@ -50,7 +50,7 @@ resource "ibm_container_vpc_cluster" "cluster" { name = "us-south-1" } kms_instance_id = "8e9056e6-1936-4dd9-a0a1-51d824765e11" - crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" + crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" } ``` From e6ddcca2989803b11410b252f3075f9d8254ab16 Mon Sep 17 00:00:00 2001 From: Attila Laszlo Tabori Date: Wed, 25 May 2022 09:21:40 +0200 Subject: [PATCH 5/5] format worker pool example --- website/docs/r/container_vpc_worker_pool.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index 423260d5af..474ea58158 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -46,7 +46,7 @@ resource "ibm_container_vpc_worker_pool" "test_pool" { } kms_instance_id = "8e9056e6-1936-4dd9-a0a1-51d824765e11" - crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" + crk = "804cb251-fa0a-46f5-a442-fe42cfb0ed5f" } ```