Skip to content

Commit

Permalink
upgrade dcl version to v1.73.0 (#11804) (#8276)
Browse files Browse the repository at this point in the history
[upstream:0e7c75ad99a88819bc4b2b397717e69e1c61d3cb]

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician authored Sep 24, 2024
1 parent cf4810b commit 46ceff1
Show file tree
Hide file tree
Showing 8 changed files with 152 additions and 3 deletions.
6 changes: 6 additions & 0 deletions .changelog/11804.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
containeraws: added `kubelet_config` field group to the `google_container_aws_node_pool` resource
```
```release-note:enhancement
assuredworkloads: added `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS` and `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT` enum values to `compliance_regime` in the `google_assuredworkload_workload` resource
```
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.21

require (
cloud.google.com/go/bigtable v1.30.0
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0
github.com/apparentlymart/go-cidr v1.1.0
github.com/davecgh/go-spew v1.1.1
github.com/dnaeon/go-vcr v1.0.1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -440,3 +440,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0 h1:mVsrkdw7rJbmay3EE/KjHx7WbQcrfwLmxmzCFDXIl90=
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k=
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS",
Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT",
},

"display_name": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,15 @@ func ResourceContainerAwsNodePool() *schema.Resource {
Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.",
},

"kubelet_config": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Description: "The kubelet configuration for the node pool.",
MaxItems: 1,
Elem: ContainerAwsNodePoolKubeletConfigSchema(),
},

"management": {
Type: schema.TypeList,
Computed: true,
Expand Down Expand Up @@ -503,6 +512,42 @@ func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource {
}
}

func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu_cfs_quota": {
Type: schema.TypeBool,
Computed: true,
Optional: true,
ForceNew: true,
Description: "Whether or not to enable CPU CFS quota. Defaults to true.",
},

"cpu_cfs_quota_period": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".",
},

"cpu_manager_policy": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".",
},

"pod_pids_limit": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.",
},
},
}
}

func ContainerAwsNodePoolManagementSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -568,6 +613,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -627,6 +673,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{})
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -681,6 +728,9 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{})
if err = d.Set("effective_annotations", res.Annotations); err != nil {
return fmt.Errorf("error setting effective_annotations in state: %s", err)
}
if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil {
return fmt.Errorf("error setting kubelet_config in state: %s", err)
}
if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil {
return fmt.Errorf("error setting management in state: %s", err)
}
Expand Down Expand Up @@ -731,6 +781,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -785,6 +836,7 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -1195,6 +1247,38 @@ func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxP

}

func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig {
if o == nil {
return containeraws.EmptyNodePoolKubeletConfig
}
objArr := o.([]interface{})
if len(objArr) == 0 || objArr[0] == nil {
return containeraws.EmptyNodePoolKubeletConfig
}
obj := objArr[0].(map[string]interface{})
return &containeraws.NodePoolKubeletConfig{
CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)),
CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)),
CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)),
PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))),
}
}

func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} {
if obj == nil || obj.Empty() {
return nil
}
transformed := map[string]interface{}{
"cpu_cfs_quota": obj.CpuCfsQuota,
"cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod,
"cpu_manager_policy": obj.CpuManagerPolicy,
"pod_pids_limit": obj.PodPidsLimit,
}

return []interface{}{transformed}

}

func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement {
if o == nil {
return nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,13 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = true
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "%{project_name}"
}
Expand Down Expand Up @@ -526,6 +533,13 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = false
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "%{project_name}"
}
Expand Down Expand Up @@ -1004,6 +1018,13 @@ resource "google_container_aws_node_pool" "primary" {
management {
auto_repair = true
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
annotations = {
label-one = "value-one"
Expand Down Expand Up @@ -1181,6 +1202,13 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = false
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
annotations = {
label-two = "value-two"
}
Expand Down
2 changes: 1 addition & 1 deletion website/docs/r/assured_workloads_workload.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ The following arguments are supported:

* `compliance_regime` -
(Required)
Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS
Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT

* `display_name` -
(Required)
Expand Down
29 changes: 29 additions & 0 deletions website/docs/r/container_aws_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,13 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = true
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "my-project-name"
}
Expand Down Expand Up @@ -633,6 +640,10 @@ The `max_pods_constraint` block supports:
**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.
Please refer to the field `effective_annotations` for all of the annotations present on the resource.

* `kubelet_config` -
(Optional)
The kubelet configuration for the node pool.

* `management` -
(Optional)
The Management configuration for this node pool.
Expand Down Expand Up @@ -721,6 +732,24 @@ The `taints` block supports:
(Required)
Value for the taint.

The `kubelet_config` block supports:

* `cpu_cfs_quota` -
(Optional)
Whether or not to enable CPU CFS quota. Defaults to true.

* `cpu_cfs_quota_period` -
(Optional)
Optional. The CPU CFS quota period to use for the node. Defaults to "100ms".

* `cpu_manager_policy` -
(Optional)
The CpuManagerPolicy to use for the node. Defaults to "none".

* `pod_pids_limit` -
(Optional)
Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.

The `management` block supports:

* `auto_repair` -
Expand Down

0 comments on commit 46ceff1

Please sign in to comment.