Skip to content

Commit

Permalink
azurerm_batch_pool - Fixes a bug in settings_json of extension
Browse files Browse the repository at this point in the history
…block (hashicorp#24976)

* throw error when parsing failed for `extension`

* fix empty settings_json in read function

* add new testcase for empty settings_json

* Update batch_pool_resource.go

* optimize expand vnetConfig flow

* Update batch_pool.html.markdown

---------

Co-authored-by: kt <kt@katbyte.me>
Co-authored-by: catriona-m <86247157+catriona-m@users.noreply.github.com>
  • Loading branch information
3 people authored and rizkybiz committed Feb 29, 2024
1 parent fbfac79 commit 0254573
Show file tree
Hide file tree
Showing 4 changed files with 110 additions and 33 deletions.
64 changes: 35 additions & 29 deletions internal/services/batch/batch_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -756,79 +756,85 @@ func expandBatchPoolVirtualMachineConfig(d *pluginsdk.ResourceData) (*pool.Virtu
return nil, fmt.Errorf("storage_image_reference either is empty or contains parsing errors")
}

if containerConfiguration, err := ExpandBatchPoolContainerConfiguration(d.Get("container_configuration").([]interface{})); err == nil {
result.ContainerConfiguration = containerConfiguration
} else {
return nil, fmt.Errorf("container_configuration either is empty or contains parsing errors")
if v, ok := d.GetOk("container_configuration"); ok {
if containerConfiguration, err := ExpandBatchPoolContainerConfiguration(v.([]interface{})); err == nil {
result.ContainerConfiguration = containerConfiguration
} else {
return nil, fmt.Errorf("container_configuration either is empty or contains parsing errors")
}
}

if dataDisk, diskErr := expandBatchPoolDataDisks(d.Get("data_disks").([]interface{})); diskErr == nil {
result.DataDisks = dataDisk
if v, ok := d.GetOk("data_disks"); ok {
result.DataDisks = expandBatchPoolDataDisks(v.([]interface{}))
}

if diskEncryptionConfig, diskEncryptionErr := expandBatchPoolDiskEncryptionConfiguration(d.Get("disk_encryption").([]interface{})); diskEncryptionErr == nil {
result.DiskEncryptionConfiguration = diskEncryptionConfig
} else {
return nil, diskEncryptionErr
}

if extensions, extErr := expandBatchPoolExtensions(d.Get("extensions").([]interface{})); extErr == nil {
result.Extensions = extensions
} else {
return nil, extErr
}

if licenseType, ok := d.GetOk("license_type"); ok {
result.LicenseType = utils.String(licenseType.(string))
}

if nodeReplacementConfig, nodeRepCfgErr := expandBatchPoolNodeReplacementConfig(d.Get("node_placement").([]interface{})); nodeRepCfgErr == nil {
result.NodePlacementConfiguration = nodeReplacementConfig
if v, ok := d.GetOk("node_placement"); ok {
result.NodePlacementConfiguration = expandBatchPoolNodeReplacementConfig(v.([]interface{}))
}

if osDisk, osDiskErr := expandBatchPoolOSDisk(d.Get("os_disk_placement")); osDiskErr == nil {
result.OsDisk = osDisk
if v, ok := d.GetOk("os_disk_placement"); ok {
result.OsDisk = expandBatchPoolOSDisk(v)
}

if windowsConfiguration, windowsConfigErr := expandBatchPoolWindowsConfiguration(d.Get("windows").([]interface{})); windowsConfigErr == nil {
result.WindowsConfiguration = windowsConfiguration
if v, ok := d.GetOk("windows"); ok {
result.WindowsConfiguration = expandBatchPoolWindowsConfiguration(v.([]interface{}))
}

return &result, nil
}

func expandBatchPoolOSDisk(ref interface{}) (*pool.OSDisk, error) {
func expandBatchPoolOSDisk(ref interface{}) *pool.OSDisk {
if ref == nil {
return nil, fmt.Errorf("os_disk_placement is empty")
return nil
}

return &pool.OSDisk{
EphemeralOSDiskSettings: &pool.DiffDiskSettings{
Placement: pointer.To(pool.DiffDiskPlacement(ref.(string))),
},
}, nil
}
}

func expandBatchPoolNodeReplacementConfig(list []interface{}) (*pool.NodePlacementConfiguration, error) {
func expandBatchPoolNodeReplacementConfig(list []interface{}) *pool.NodePlacementConfiguration {
if len(list) == 0 || list[0] == nil {
return nil, fmt.Errorf("node_placement is empty")
return nil
}
item := list[0].(map[string]interface{})["policy"].(string)
return &pool.NodePlacementConfiguration{
Policy: pointer.To(pool.NodePlacementPolicyType(item)),
}, nil
}
}

func expandBatchPoolWindowsConfiguration(list []interface{}) (*pool.WindowsConfiguration, error) {
if len(list) == 0 || list[0] == nil {
return nil, fmt.Errorf("windows is empty")
func expandBatchPoolWindowsConfiguration(list []interface{}) *pool.WindowsConfiguration {
if len(list) == 0 {
return nil
}

item := list[0].(map[string]interface{})["enable_automatic_updates"].(bool)
return &pool.WindowsConfiguration{
EnableAutomaticUpdates: utils.Bool(item),
}, nil
}
}

func expandBatchPoolExtensions(list []interface{}) (*[]pool.VmExtension, error) {
if len(list) == 0 || list[0] == nil {
return nil, fmt.Errorf("extensions is empty")
return nil, nil
}

var result []pool.VmExtension
Expand All @@ -838,7 +844,7 @@ func expandBatchPoolExtensions(list []interface{}) (*[]pool.VmExtension, error)
if batchPoolExtension, err := expandBatchPoolExtension(item); err == nil {
result = append(result, *batchPoolExtension)
} else {
return nil, fmt.Errorf("cloud_service_configuration either is empty or contains parsing errors")
return nil, err
}
}

Expand All @@ -847,7 +853,7 @@ func expandBatchPoolExtensions(list []interface{}) (*[]pool.VmExtension, error)

func expandBatchPoolExtension(ref map[string]interface{}) (*pool.VmExtension, error) {
if len(ref) == 0 {
return nil, fmt.Errorf("extension is empty")
return nil, nil
}

result := pool.VmExtension{
Expand Down Expand Up @@ -891,7 +897,7 @@ func expandBatchPoolExtension(ref map[string]interface{}) (*pool.VmExtension, er

func expandBatchPoolDiskEncryptionConfiguration(list []interface{}) (*pool.DiskEncryptionConfiguration, error) {
if len(list) == 0 || list[0] == nil {
return nil, fmt.Errorf("disk_encryption is empty")
return nil, nil
}
var result pool.DiskEncryptionConfiguration

Expand All @@ -910,9 +916,9 @@ func expandBatchPoolDiskEncryptionConfiguration(list []interface{}) (*pool.DiskE
return &result, nil
}

func expandBatchPoolDataDisks(list []interface{}) (*[]pool.DataDisk, error) {
func expandBatchPoolDataDisks(list []interface{}) *[]pool.DataDisk {
if len(list) == 0 || list[0] == nil {
return nil, fmt.Errorf("data_disk is empty")
return nil
}
var result []pool.DataDisk

Expand All @@ -921,7 +927,7 @@ func expandBatchPoolDataDisks(list []interface{}) (*[]pool.DataDisk, error) {
result = append(result, expandBatchPoolDataDisk(item))
}

return &result, nil
return &result
}

func expandBatchPoolDataDisk(ref map[string]interface{}) pool.DataDisk {
Expand Down
8 changes: 6 additions & 2 deletions internal/services/batch/batch_pool_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package batch

import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
Expand Down Expand Up @@ -667,7 +668,7 @@ func resourceBatchPool() *pluginsdk.Resource {
Optional: true,
ValidateFunc: validation.StringIsJSON,
},
"protected_settings": {
"protected_settings": { // todo 4.0 - should this actually be a map of key value pairs?
Type: pluginsdk.TypeString,
Optional: true,
Sensitive: true,
Expand Down Expand Up @@ -904,6 +905,8 @@ func resourceBatchPoolCreate(d *pluginsdk.ResourceData, meta interface{}) error
parameters.Properties.DeploymentConfiguration = &pool.DeploymentConfiguration{
VirtualMachineConfiguration: vmDeploymentConfiguration,
}
} else {
return deploymentErr
}

certificates := d.Get("certificate").([]interface{})
Expand Down Expand Up @@ -1208,10 +1211,11 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error {
extension["automatic_upgrade_enabled"] = *item.EnableAutomaticUpgrade
}
if item.Settings != nil {
extension["settings_json"], err = pluginsdk.FlattenJsonToString((*item.Settings).(map[string]interface{}))
settingValue, err := json.Marshal((*item.Settings).(map[string]interface{}))
if err != nil {
return fmt.Errorf("flattening `settings_json`: %+v", err)
}
extension["settings_json"] = string(settingValue)
}

for i := 0; i < n; i++ {
Expand Down
67 changes: 67 additions & 0 deletions internal/services/batch/batch_pool_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,26 @@ func TestAccBatchPool_extensions(t *testing.T) {
})
}

func TestAccBatchPool_extensionsWithEmptySettings(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}
data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.extensionsWithEmptySettings(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("extensions.0.name").HasValue("OmsAgentForLinux"),
check.That(data.ResourceName).Key("extensions.0.publisher").HasValue("Microsoft.EnterpriseCloud.Monitoring"),
check.That(data.ResourceName).Key("extensions.0.type").HasValue("OmsAgentForLinux"),
check.That(data.ResourceName).Key("extensions.0.type_handler_version").HasValue("1.17"),
check.That(data.ResourceName).Key("extensions.0.auto_upgrade_minor_version").HasValue("true"),
check.That(data.ResourceName).Key("extensions.0.automatic_upgrade_enabled").HasValue("true"),
check.That(data.ResourceName).Key("extensions.0.settings_json").HasValue("{}"),
),
},
})
}

func TestAccBatchPool_interNodeCommunicationWithTaskSchedulingPolicy(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_batch_pool", "test")
r := BatchPoolResource{}
Expand Down Expand Up @@ -2395,6 +2415,53 @@ resource "azurerm_batch_pool" "test" {
`, template, data.RandomString, data.RandomString, data.RandomString)
}

func (BatchPoolResource) extensionsWithEmptySettings(data acceptance.TestData) string {
template := BatchPoolResource{}.template(data)
return fmt.Sprintf(`
%s
resource "azurerm_batch_account" "test" {
name = "testaccbatch%s"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}
resource "azurerm_log_analytics_workspace" "test" {
name = "testaccloganalytics%s"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
sku = "PerGB2018"
retention_in_days = 30
}
resource "azurerm_batch_pool" "test" {
name = "testaccpool%s"
resource_group_name = azurerm_resource_group.test.name
account_name = azurerm_batch_account.test.name
node_agent_sku_id = "batch.node.ubuntu 22.04"
vm_size = "Standard_A1"
extensions {
name = "OmsAgentForLinux"
publisher = "Microsoft.EnterpriseCloud.Monitoring"
settings_json = jsonencode({})
protected_settings = jsonencode({ "workspaceKey" = "${azurerm_log_analytics_workspace.test.primary_shared_key}" })
type = "OmsAgentForLinux"
type_handler_version = "1.17"
auto_upgrade_minor_version = true
automatic_upgrade_enabled = true
}
fixed_scale {
target_dedicated_nodes = 1
}
storage_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts"
version = "latest"
}
}
`, template, data.RandomString, data.RandomString, data.RandomString)
}

func (BatchPoolResource) diskSettings(data acceptance.TestData) string {
template := BatchPoolResource{}.template(data)
return fmt.Sprintf(`
Expand Down
4 changes: 2 additions & 2 deletions website/docs/r/batch_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,9 @@ If specified, the extensions mentioned in this configuration will be installed o

~> **NOTE:** When `automatic_upgrade_enabled` is set to `true`, the `type_handler_version` is automatically updated by the Azure platform when a new version is available and any change in `type_handler_version` should be manually ignored by user.

* `settings_json` - (Optional) JSON formatted public settings for the extension.
* `settings_json` - (Optional) JSON formatted public settings for the extension, the value should be encoded with [`jsonencode`](https://developer.hashicorp.com/terraform/language/functions/jsonencode) function.

* `protected_settings` - (Optional) The extension can contain either `protected_settings` or `provision_after_extensions` or no protected settings at all.
* `protected_settings` - (Optional) JSON formatted protected settings for the extension, the value should be encoded with [`jsonencode`](https://developer.hashicorp.com/terraform/language/functions/jsonencode) function. The extension can contain either `protected_settings` or `provision_after_extensions` or no protected settings at all.

* `provision_after_extensions` - (Optional) The collection of extension names. Collection of extension names after which this extension needs to be provisioned.

Expand Down

0 comments on commit 0254573

Please sign in to comment.