Skip to content

Commit

Permalink
Add the ability to use the GKE recurring maintenance window in beta.
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
nat-henderson authored and modular-magician committed Oct 24, 2019
1 parent 92b14b4 commit 597c9dc
Show file tree
Hide file tree
Showing 3 changed files with 211 additions and 37 deletions.
145 changes: 115 additions & 30 deletions google-beta/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,14 @@ var (
ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"}
)

func validateRFC3339Date(v interface{}, k string) (warnings []string, errors []error) {
_, err := time.Parse(time.RFC3339, v.(string))
if err != nil {
errors = append(errors, err)
}
return
}

func resourceContainerCluster() *schema.Resource {
return &schema.Resource{
Create: resourceContainerClusterCreate,
Expand Down Expand Up @@ -370,8 +378,10 @@ func resourceContainerCluster() *schema.Resource {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"daily_maintenance_window": {
Type: schema.TypeList,
Required: true,
Type: schema.TypeList,

Optional: true,

MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
Expand All @@ -388,6 +398,30 @@ func resourceContainerCluster() *schema.Resource {
},
},
},
"recurring_window": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{"maintenance_policy.0.daily_maintenance_window"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"start_time": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateRFC3339Date,
},
"end_time": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateRFC3339Date,
},
"recurrence": {
Type: schema.TypeString,
Required: true,
},
},
},
},
},
},
},
Expand Down Expand Up @@ -927,7 +961,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster := &containerBeta.Cluster{
Name: clusterName,
InitialNodeCount: int64(d.Get("initial_node_count").(int)),
MaintenancePolicy: expandMaintenancePolicy(d.Get("maintenance_policy")),
MaintenancePolicy: expandMaintenancePolicy(d, meta),
MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config")),
InitialClusterVersion: d.Get("min_master_version").(string),
ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string),
Expand Down Expand Up @@ -1438,15 +1472,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}

if d.HasChange("maintenance_policy") {
var req *containerBeta.SetMaintenancePolicyRequest
if mp, ok := d.GetOk("maintenance_policy"); ok {
req = &containerBeta.SetMaintenancePolicyRequest{
MaintenancePolicy: expandMaintenancePolicy(mp),
}
} else {
req = &containerBeta.SetMaintenancePolicyRequest{
NullFields: []string{"MaintenancePolicy"},
}
req := &containerBeta.SetMaintenancePolicyRequest{
MaintenancePolicy: expandMaintenancePolicy(d, meta),
}

updateF := func() error {
Expand Down Expand Up @@ -2172,22 +2199,63 @@ func expandIPAllocationPolicy(configured interface{}) *containerBeta.IPAllocatio
}
}

func expandMaintenancePolicy(configured interface{}) *containerBeta.MaintenancePolicy {
func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *containerBeta.MaintenancePolicy {
config := meta.(*Config)
// We have to perform a full Get() as part of this, to get the fingerprint. We can't do this
// at any other time, because the fingerprint update might happen between plan and apply.
// We can omit error checks, since to have gotten this far, a project is definitely configured.
project, _ := getProject(d, config)
location, _ := getLocation(d, config)
clusterName := d.Get("name").(string)
name := containerClusterFullName(project, location, clusterName)
cluster, _ := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
resourceVersion := ""
// If the cluster doesn't exist or if there is a read error of any kind, we will pass in an empty
// resourceVersion. If there happens to be a change to maintenance policy, we will fail at that
// point. This is a compromise between code cleanliness and a slightly worse user experience in
// an unlikely error case - we choose code cleanliness.
if cluster != nil && cluster.MaintenancePolicy != nil {
resourceVersion = cluster.MaintenancePolicy.ResourceVersion
}

configured := d.Get("maintenance_policy")
l := configured.([]interface{})
if len(l) == 0 || l[0] == nil {
return nil
return &containerBeta.MaintenancePolicy{
ResourceVersion: resourceVersion,
}
}

maintenancePolicy := l[0].(map[string]interface{})
dailyMaintenanceWindow := maintenancePolicy["daily_maintenance_window"].([]interface{})[0].(map[string]interface{})
startTime := dailyMaintenanceWindow["start_time"].(string)
return &containerBeta.MaintenancePolicy{
Window: &containerBeta.MaintenanceWindow{
DailyMaintenanceWindow: &containerBeta.DailyMaintenanceWindow{
StartTime: startTime,

if dailyMaintenanceWindow, ok := maintenancePolicy["daily_maintenance_window"]; ok && len(dailyMaintenanceWindow.([]interface{})) > 0 {
dmw := dailyMaintenanceWindow.([]interface{})[0].(map[string]interface{})
startTime := dmw["start_time"].(string)
return &containerBeta.MaintenancePolicy{
Window: &containerBeta.MaintenanceWindow{
DailyMaintenanceWindow: &containerBeta.DailyMaintenanceWindow{
StartTime: startTime,
},
},
},
ResourceVersion: resourceVersion,
}
}
if recurringWindow, ok := maintenancePolicy["recurring_window"]; ok && len(recurringWindow.([]interface{})) > 0 {
rw := recurringWindow.([]interface{})[0].(map[string]interface{})
return &containerBeta.MaintenancePolicy{
Window: &containerBeta.MaintenanceWindow{
RecurringWindow: &containerBeta.RecurringTimeWindow{
Window: &containerBeta.TimeWindow{
StartTime: rw["start_time"].(string),
EndTime: rw["end_time"].(string),
},
Recurrence: rw["recurrence"].(string),
},
},
ResourceVersion: resourceVersion,
}
}

return nil
}

func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *containerBeta.ClusterAutoscaling {
Expand Down Expand Up @@ -2598,19 +2666,36 @@ func flattenIPAllocationPolicy(c *containerBeta.Cluster, d *schema.ResourceData,
}

func flattenMaintenancePolicy(mp *containerBeta.MaintenancePolicy) []map[string]interface{} {
if mp == nil || mp.Window == nil || mp.Window.DailyMaintenanceWindow == nil {
if mp == nil || mp.Window == nil {
return nil
}
return []map[string]interface{}{
{
"daily_maintenance_window": []map[string]interface{}{
{
"start_time": mp.Window.DailyMaintenanceWindow.StartTime,
"duration": mp.Window.DailyMaintenanceWindow.Duration,
if mp.Window.DailyMaintenanceWindow != nil {
return []map[string]interface{}{
{
"daily_maintenance_window": []map[string]interface{}{
{
"start_time": mp.Window.DailyMaintenanceWindow.StartTime,
"duration": mp.Window.DailyMaintenanceWindow.Duration,
},
},
},
},
}
}
if mp.Window.RecurringWindow != nil {
return []map[string]interface{}{
{
"recurring_window": []map[string]interface{}{
{
"start_time": mp.Window.RecurringWindow.Window.StartTime,
"end_time": mp.Window.RecurringWindow.Window.EndTime,
"recurrence": mp.Window.RecurringWindow.Recurrence,
},
},
},
}
}

return nil
}

func flattenMasterAuth(ma *containerBeta.MasterAuth) []map[string]interface{} {
Expand Down
81 changes: 75 additions & 6 deletions google-beta/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1199,6 +1199,51 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) {
})
}

func TestAccContainerCluster_withRecurringMaintenanceWindow(t *testing.T) {
t.Parallel()
clusterName := acctest.RandString(10)
resourceName := "google_container_cluster.with_recurring_maintenance_window"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withRecurringMaintenanceWindow(clusterName, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z"),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr(resourceName,
"maintenance_policy.0.daily_maintenance_window.0.start_time"),
),
},
{
ResourceName: resourceName,
ImportStateIdPrefix: "us-central1-a/",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withRecurringMaintenanceWindow(clusterName, "", ""),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr(resourceName,
"maintenance_policy.0.daily_maintenance_window.0.start_time"),
resource.TestCheckNoResourceAttr(resourceName,
"maintenance_policy.0.recurring_window.0.start_time"),
),
},
{
ResourceName: resourceName,
ImportStateIdPrefix: "us-central1-a/",
ImportState: true,
ImportStateVerify: true,
// maintenance_policy.# = 0 is equivalent to no maintenance policy at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"maintenance_policy.#"},
},
},
})
}

func TestAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -2012,7 +2057,7 @@ resource "google_container_cluster" "with_release_channel" {
initial_node_count = 1
release_channel {
channel = "%s"
channel = "%s"
}
}`, clusterName, channel)
}
Expand Down Expand Up @@ -2871,6 +2916,30 @@ resource "google_container_cluster" "with_maintenance_window" {
}`, clusterName, maintenancePolicy)
}

func testAccContainerCluster_withRecurringMaintenanceWindow(clusterName string, startTime, endTime string) string {
maintenancePolicy := ""
if len(startTime) > 0 {
maintenancePolicy = fmt.Sprintf(`
maintenance_policy {
recurring_window {
start_time = "%s"
end_time = "%s"
recurrence = "FREQ=DAILY"
}
}`, startTime, endTime)
}

return fmt.Sprintf(`
resource "google_container_cluster" "with_recurring_maintenance_window" {
name = "cluster-test-%s"
zone = "us-central1-a"
initial_node_count = 1
%s
}`, clusterName, maintenancePolicy)

}

func testAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(cluster string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
Expand Down Expand Up @@ -3372,8 +3441,8 @@ resource "google_container_cluster" "cidr_error_overlap" {
initial_node_count = 1
ip_allocation_policy {
cluster_ipv4_cidr_block = "10.0.0.0/16"
services_ipv4_cidr_block = "10.1.0.0/16"
cluster_ipv4_cidr_block = "10.0.0.0/16"
services_ipv4_cidr_block = "10.1.0.0/16"
}
}
`, initConfig, secondCluster)
Expand All @@ -3395,11 +3464,11 @@ data "google_project" "project" {}
data "google_iam_policy" "test_kms_binding" {
binding {
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
members = [
members = [
"serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com",
]
]
}
}
Expand Down
22 changes: 21 additions & 1 deletion website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ The `authenticator_groups_config` block supports:

The `maintenance_policy` block supports:

* `daily_maintenance_window` - (Required) Time window specified for daily maintenance operations.
* `daily_maintenance_window` - (Required in GA, Optional in Beta) Time window specified for daily maintenance operations.
Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”,
where HH : \[00-23\] and MM : \[00-59\] GMT. For example:

Expand All @@ -414,6 +414,26 @@ maintenance_policy {
}
```

* `recurring_window` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) Time window for
recurring maintenance operations.

Specify `start_time` and `end_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) date format. The start time's date is
the initial date that the window starts, and the end time is used for calculating duration. Specify `recurrence` in
[RFC5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) RRULE format, to specify when this recurs.

For example:
```
maintenance_policy {
recurring_window {
start_time = "2019-01-01T03:00"
end_time = "2019-01-01T06:00"
recurrence = "FREQ=DAILY"
}
}
```

In beta, one or the other of `recurring_window` and `daily_maintenance_window` is required if a `maintenance_policy` block is supplied.

The `ip_allocation_policy` block supports:

* `use_ip_aliases` - (Optional) Whether alias IPs will be used for pod IPs in
Expand Down

0 comments on commit 597c9dc

Please sign in to comment.