Skip to content

Commit

Permalink
Revert "upgrade dcl version to v1.73.0 (#11804)" (#11862) (#19673)
Browse files Browse the repository at this point in the history
[upstream:abefe83b63a813b308e7c09db02036664924a331]

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician authored and roaks3 committed Sep 30, 2024
1 parent 165d6ad commit d615f66
Show file tree
Hide file tree
Showing 8 changed files with 6 additions and 132 deletions.
3 changes: 3 additions & 0 deletions .changelog/11862.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:none

```
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.21

require (
cloud.google.com/go/bigtable v1.30.0
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0
github.com/apparentlymart/go-cidr v1.1.0
github.com/davecgh/go-spew v1.1.1
github.com/dnaeon/go-vcr v1.0.1
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -440,5 +440,3 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0 h1:mVsrkdw7rJbmay3EE/KjHx7WbQcrfwLmxmzCFDXIl90=
github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k=
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT",
Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS",
},

"display_name": {
Expand Down
84 changes: 0 additions & 84 deletions google/services/containeraws/resource_container_aws_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,6 @@ func ResourceContainerAwsNodePool() *schema.Resource {
Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.",
},

"kubelet_config": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Description: "The kubelet configuration for the node pool.",
MaxItems: 1,
Elem: ContainerAwsNodePoolKubeletConfigSchema(),
},

"management": {
Type: schema.TypeList,
Computed: true,
Expand Down Expand Up @@ -455,42 +446,6 @@ func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource {
}
}

func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu_cfs_quota": {
Type: schema.TypeBool,
Computed: true,
Optional: true,
ForceNew: true,
Description: "Whether or not to enable CPU CFS quota. Defaults to true.",
},

"cpu_cfs_quota_period": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".",
},

"cpu_manager_policy": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".",
},

"pod_pids_limit": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.",
},
},
}
}

func ContainerAwsNodePoolManagementSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -556,7 +511,6 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -616,7 +570,6 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{})
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -671,9 +624,6 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{})
if err = d.Set("effective_annotations", res.Annotations); err != nil {
return fmt.Errorf("error setting effective_annotations in state: %s", err)
}
if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil {
return fmt.Errorf("error setting kubelet_config in state: %s", err)
}
if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil {
return fmt.Errorf("error setting management in state: %s", err)
}
Expand Down Expand Up @@ -724,7 +674,6 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -779,7 +728,6 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}
SubnetId: dcl.String(d.Get("subnet_id").(string)),
Version: dcl.String(d.Get("version").(string)),
Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")),
KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")),
Management: expandContainerAwsNodePoolManagement(d.Get("management")),
Project: dcl.String(project),
UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")),
Expand Down Expand Up @@ -1132,38 +1080,6 @@ func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxP

}

func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig {
if o == nil {
return containeraws.EmptyNodePoolKubeletConfig
}
objArr := o.([]interface{})
if len(objArr) == 0 || objArr[0] == nil {
return containeraws.EmptyNodePoolKubeletConfig
}
obj := objArr[0].(map[string]interface{})
return &containeraws.NodePoolKubeletConfig{
CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)),
CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)),
CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)),
PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))),
}
}

func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} {
if obj == nil || obj.Empty() {
return nil
}
transformed := map[string]interface{}{
"cpu_cfs_quota": obj.CpuCfsQuota,
"cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod,
"cpu_manager_policy": obj.CpuManagerPolicy,
"pod_pids_limit": obj.PodPidsLimit,
}

return []interface{}{transformed}

}

func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement {
if o == nil {
return nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -276,13 +276,6 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = true
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "%{project_name}"
}
Expand Down Expand Up @@ -443,13 +436,6 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = false
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "%{project_name}"
}
Expand Down
2 changes: 1 addition & 1 deletion website/docs/r/assured_workloads_workload.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ The following arguments are supported:

* `compliance_regime` -
(Required)
Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT
Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS

* `display_name` -
(Required)
Expand Down
29 changes: 0 additions & 29 deletions website/docs/r/container_aws_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -179,13 +179,6 @@ resource "google_container_aws_node_pool" "primary" {
auto_repair = true
}
kubelet_config {
cpu_manager_policy = "none"
cpu_cfs_quota = true
cpu_cfs_quota_period = "100ms"
pod_pids_limit = 1024
}
project = "my-project-name"
}
Expand Down Expand Up @@ -640,10 +633,6 @@ The `max_pods_constraint` block supports:
**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.
Please refer to the field `effective_annotations` for all of the annotations present on the resource.

* `kubelet_config` -
(Optional)
The kubelet configuration for the node pool.

* `management` -
(Optional)
The Management configuration for this node pool.
Expand Down Expand Up @@ -732,24 +721,6 @@ The `taints` block supports:
(Required)
Value for the taint.

The `kubelet_config` block supports:

* `cpu_cfs_quota` -
(Optional)
Whether or not to enable CPU CFS quota. Defaults to true.

* `cpu_cfs_quota_period` -
(Optional)
Optional. The CPU CFS quota period to use for the node. Defaults to "100ms".

* `cpu_manager_policy` -
(Optional)
The CpuManagerPolicy to use for the node. Defaults to "none".

* `pod_pids_limit` -
(Optional)
Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.

The `management` block supports:

* `auto_repair` -
Expand Down

0 comments on commit d615f66

Please sign in to comment.