diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 60d4d32..bb9cd81 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -17,3 +17,7 @@ updates: directory: "examples/private_cluster" # Location of package manifests schedule: interval: "weekly" + - package-ecosystem: "terraform" # See documentation for possible values + directory: "examples/complete" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/auto_assignee.yml b/.github/workflows/auto_assignee.yml index 9acc9b8..f8b8bcd 100644 --- a/.github/workflows/auto_assignee.yml +++ b/.github/workflows/auto_assignee.yml @@ -7,7 +7,7 @@ on: workflow_dispatch: jobs: assignee: - uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} with: diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index eb86ae3..26392e2 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -4,9 +4,9 @@ on: pull_request: jobs: auto-merge: - uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} with: - tfcheck: 'private_cluster-example / Check code format' + tfcheck: 'complete-example / Check code format' ... diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index b34acec..1ee6f78 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -7,7 +7,7 @@ on: workflow_dispatch: jobs: changelog: - uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@master secrets: inherit with: branch: 'master' diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml index 444164d..c4a5793 100644 --- a/.github/workflows/readme.yml +++ b/.github/workflows/readme.yml @@ -12,4 +12,4 @@ jobs: uses: clouddrove/github-shared-workflows/.github/workflows/readme.yml@master secrets: TOKEN : ${{ secrets.GITHUB }} - SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} \ No newline at end of file + SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} diff --git a/.github/workflows/tf-checks.yml b/.github/workflows/tf-checks.yml index b287445..b23f1b4 100644 --- a/.github/workflows/tf-checks.yml +++ b/.github/workflows/tf-checks.yml @@ -6,22 +6,22 @@ on: workflow_dispatch: jobs: basic-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/basic/' complete-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/complete/' private_cluster-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/private_cluster/' public_cluster-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/public_cluster/' aks_with_microsoft_entra_id-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: - working_directory: './examples/aks_with_microsoft_entra_id/' \ No newline at end of file + working_directory: './examples/aks_with_microsoft_entra_id/' diff --git a/.github/workflows/tflint.yml b/.github/workflows/tflint.yml index 04cca22..71a6fc4 100644 --- a/.github/workflows/tflint.yml +++ b/.github/workflows/tflint.yml @@ -6,6 +6,6 @@ on: workflow_dispatch: jobs: tf-lint: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} diff --git a/.github/workflows/tfsec.yml b/.github/workflows/tfsec.yml index 7f1003f..c203751 100644 --- a/.github/workflows/tfsec.yml +++ b/.github/workflows/tfsec.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: jobs: tfsec: - uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@master secrets: inherit with: working_directory: '.' diff --git a/README.yaml b/README.yaml index 3790dae..e36a63c 100644 --- a/README.yaml +++ b/README.yaml @@ -1,3 +1,4 @@ + --- # # This is the canonical configuration for the `README.md` @@ -126,5 +127,3 @@ usage: |- log_analytics_workspace_id = module.log-analytics.workspace_id # when diagnostic_setting_enable = true && oms_agent_enabled = true } ``` - - diff --git a/aks.tf b/aks.tf new file mode 100644 index 0000000..a30f113 --- /dev/null +++ b/aks.tf @@ -0,0 +1,429 @@ + +resource "azurerm_kubernetes_cluster" "aks" { + count = var.enabled ? 1 : 0 + name = format("%s-aks", module.labels.id) + location = local.location + resource_group_name = local.resource_group_name + dns_prefix = replace(module.labels.id, "/[\\W_]/", "-") + kubernetes_version = var.kubernetes_version + automatic_upgrade_channel = var.automatic_upgrade_channel + sku_tier = var.aks_sku_tier + node_resource_group = var.node_resource_group == null ? format("%s-aks-node-rg", module.labels.id) : var.node_resource_group + disk_encryption_set_id = var.key_vault_id != null ? azurerm_disk_encryption_set.main[0].id : null + private_cluster_enabled = var.private_cluster_enabled + private_dns_zone_id = var.private_cluster_enabled ? local.private_dns_zone : null + http_application_routing_enabled = var.enable_http_application_routing + azure_policy_enabled = var.azure_policy_enabled + edge_zone = var.edge_zone + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + role_based_access_control_enabled = var.role_based_access_control_enabled + local_account_disabled = var.local_account_disabled + workload_identity_enabled = var.workload_identity_enabled + oidc_issuer_enabled = var.oidc_issuer_enabled + + default_node_pool { + name = local.default_node_pool.agents_pool_name + node_count = local.default_node_pool.count + vm_size = local.default_node_pool.vm_size + auto_scaling_enabled = local.default_node_pool.auto_scaling_enabled + min_count = local.default_node_pool.min_count + max_count = local.default_node_pool.max_count + max_pods = local.default_node_pool.max_pods + os_disk_type = local.default_node_pool.os_disk_type + os_disk_size_gb = local.default_node_pool.os_disk_size_gb + type = local.default_node_pool.type + vnet_subnet_id = local.default_node_pool.vnet_subnet_id + host_encryption_enabled = local.default_node_pool.host_encryption_enabled + node_public_ip_enabled = local.default_node_pool.node_public_ip_enabled + fips_enabled = local.default_node_pool.fips_enabled + node_labels = local.default_node_pool.node_labels + only_critical_addons_enabled = local.default_node_pool.only_critical_addons_enabled + orchestrator_version = local.default_node_pool.orchestrator_version + proximity_placement_group_id = local.default_node_pool.proximity_placement_group_id + scale_down_mode = local.default_node_pool.scale_down_mode + snapshot_id = local.default_node_pool.snapshot_id + tags = local.default_node_pool.tags + temporary_name_for_rotation = local.default_node_pool.temporary_name_for_rotation + ultra_ssd_enabled = local.default_node_pool.ultra_ssd_enabled + zones = local.default_node_pool.zones + node_network_profile { + node_public_ip_tags = var.node_public_ip_tags + } + + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] + + content { + subnet_name = var.aci_connector_linux_subnet_name + } + } + + dynamic "ingress_application_gateway" { + for_each = toset(var.ingress_application_gateway != null ? [var.ingress_application_gateway] : []) + + content { + gateway_id = ingress_application_gateway.value.gateway_id + gateway_name = ingress_application_gateway.value.gateway_name + subnet_cidr = ingress_application_gateway.value.subnet_cidr + subnet_id = ingress_application_gateway.value.subnet_id + } + } + + dynamic "key_management_service" { + for_each = var.kms_enabled ? ["key_management_service"] : [] + + content { + key_vault_key_id = var.kms_key_vault_key_id + key_vault_network_access = var.kms_key_vault_network_access + } + } + + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] + + content { + secret_rotation_enabled = var.secret_rotation_enabled + secret_rotation_interval = var.secret_rotation_interval + } + } + + dynamic "kubelet_identity" { + for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] + content { + client_id = kubelet_identity.value.client_id + object_id = kubelet_identity.value.object_id + user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id + } + } + + dynamic "http_proxy_config" { + for_each = var.enable_http_proxy ? [1] : [] + + content { + http_proxy = var.http_proxy_config.http_proxy + https_proxy = var.http_proxy_config.https_proxy + no_proxy = var.http_proxy_config.no_proxy + } + } + + + dynamic "http_proxy_config" { + for_each = var.http_proxy_config != null ? ["http_proxy_config"] : [] + + content { + http_proxy = http_proxy_config.value.http_proxy + https_proxy = http_proxy_config.value.https_proxy + no_proxy = http_proxy_config.value.no_proxy + trusted_ca = http_proxy_config.value.trusted_ca + } + } + + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + + dynamic "api_server_access_profile" { + for_each = var.api_server_access_profile != null ? [1] : [] + content { + authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges + } + } + + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile_enabled ? [var.auto_scaler_profile] : [] + + content { + balance_similar_node_groups = auto_scaler_profile.value.balance_similar_node_groups + empty_bulk_delete_max = auto_scaler_profile.value.empty_bulk_delete_max + expander = auto_scaler_profile.value.expander + max_graceful_termination_sec = auto_scaler_profile.value.max_graceful_termination_sec + max_node_provisioning_time = auto_scaler_profile.value.max_node_provisioning_time + max_unready_nodes = auto_scaler_profile.value.max_unready_nodes + max_unready_percentage = auto_scaler_profile.value.max_unready_percentage + new_pod_scale_up_delay = auto_scaler_profile.value.new_pod_scale_up_delay + scale_down_delay_after_add = auto_scaler_profile.value.scale_down_delay_after_add + scale_down_delay_after_delete = auto_scaler_profile.value.scale_down_delay_after_delete + scale_down_delay_after_failure = auto_scaler_profile.value.scale_down_delay_after_failure + scale_down_unneeded = auto_scaler_profile.value.scale_down_unneeded + scale_down_unready = auto_scaler_profile.value.scale_down_unready + scale_down_utilization_threshold = auto_scaler_profile.value.scale_down_utilization_threshold + scan_interval = auto_scaler_profile.value.scan_interval + skip_nodes_with_local_storage = auto_scaler_profile.value.skip_nodes_with_local_storage + skip_nodes_with_system_pods = auto_scaler_profile.value.skip_nodes_with_system_pods + } + } + + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + content { + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + duration = maintenance_window_auto_upgrade.value.duration + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + week_index = maintenance_window_auto_upgrade.value.week_index + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + start_date = maintenance_window_auto_upgrade.value.start_date + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed + content { + start = not_allowed.value.start + end = not_allowed.value.end + } + } + } + } + + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control == null ? [] : var.role_based_access_control + content { + tenant_id = azure_active_directory_role_based_access_control.value.tenant_id + admin_group_object_ids = !azure_active_directory_role_based_access_control.value.azure_rbac_enabled ? var.admin_group_id : null + azure_rbac_enabled = azure_active_directory_role_based_access_control.value.azure_rbac_enabled + } + } + + dynamic "microsoft_defender" { + for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] + + content { + log_analytics_workspace_id = var.log_analytics_workspace_id + } + } + + dynamic "oms_agent" { + for_each = var.oms_agent_enabled ? ["oms_agent"] : [] + + content { + log_analytics_workspace_id = var.log_analytics_workspace_id + msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled + } + } + + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + content { + mode = var.service_mesh_profile.mode + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + revisions = var.service_mesh_profile.revisions + } + } + dynamic "service_principal" { + for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] + + content { + client_id = var.client_id + client_secret = var.client_secret + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile.blob_driver_enabled + disk_driver_enabled = var.storage_profile.disk_driver_enabled + # disk_driver_version = var.storage_profile.disk_driver_version + file_driver_enabled = var.storage_profile.file_driver_enabled + snapshot_controller_enabled = var.storage_profile.snapshot_controller_enabled + } + } + + identity { + type = var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? "UserAssigned" : "SystemAssigned" + } + + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_id + } + } + + dynamic "linux_profile" { + for_each = var.linux_profile != null ? [true] : [] + iterator = lp + content { + admin_username = var.linux_profile.username + + ssh_key { + key_data = var.linux_profile.ssh_key + } + } + } + + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] + + content { + keda_enabled = workload_autoscaler_profile.value.keda_enabled + vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled + } + } + + + + + dynamic "windows_profile" { + for_each = var.windows_profile != null ? [var.windows_profile] : [] + + content { + admin_username = windows_profile.value.admin_username + admin_password = windows_profile.value.admin_password + license = windows_profile.value.license + + dynamic "gmsa" { + for_each = windows_profile.value.gmsa != null ? [windows_profile.value.gmsa] : [] + + content { + dns_server = gmsa.value.dns_server + root_domain = gmsa.value.root_domain + } + } + } + } + + network_profile { + network_plugin = var.network_plugin + network_policy = var.network_policy + network_data_plane = var.network_data_plane + dns_service_ip = cidrhost(var.service_cidr, 10) + service_cidr = var.service_cidr + load_balancer_sku = var.load_balancer_sku + network_plugin_mode = var.network_plugin_mode + outbound_type = var.outbound_type + pod_cidr = var.net_profile_pod_cidr + + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [1] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + } + depends_on = [ + azurerm_role_assignment.aks_uai_private_dns_zone_contributor, + ] + tags = module.labels.tags +} + diff --git a/diagnostic.tf b/diagnostic.tf new file mode 100644 index 0000000..468dfea --- /dev/null +++ b/diagnostic.tf @@ -0,0 +1,110 @@ + +resource "azurerm_monitor_diagnostic_setting" "aks_diag" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + name = format("%s-aks-diag-log", module.labels.id) + target_resource_id = azurerm_kubernetes_cluster.aks[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + dynamic "enabled_log" { + for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] + content { + category = var.kv_logs.category != null ? enabled_log.value : null + category_group = var.kv_logs.category == null ? enabled_log.value : null + } + } + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + +resource "azurerm_monitor_diagnostic_setting" "pip_aks" { + depends_on = [data.azurerm_resources.aks_pip, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + name = format("%s-aks-pip-diag-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_pip[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + dynamic "enabled_log" { + for_each = var.pip_logs.enabled ? var.pip_logs.category != null ? var.pip_logs.category : var.pip_logs.category_group : [] + content { + category = var.pip_logs.category != null ? enabled_log.value : null + category_group = var.pip_logs.category == null ? enabled_log.value : null + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + + +resource "azurerm_monitor_diagnostic_setting" "aks-nsg" { + depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + name = format("%s-aks-nsg-diag-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_nsg[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "enabled_log" { + for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] + content { + category = var.kv_logs.category != null ? enabled_log.value : null + category_group = var.kv_logs.category == null ? enabled_log.value : null + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + +resource "azurerm_monitor_diagnostic_setting" "aks-nic" { + depends_on = [data.azurerm_resources.aks_nic, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + name = format("%s-aks-nic-dia-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_nic[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} diff --git a/examples/aks_with_microsoft_entra_id/example.tf b/examples/aks_with_microsoft_entra_id/example.tf index a8e0bf0..6c06200 100644 --- a/examples/aks_with_microsoft_entra_id/example.tf +++ b/examples/aks_with_microsoft_entra_id/example.tf @@ -1,5 +1,11 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } data "azurerm_client_config" "current_client_config" {} @@ -52,7 +58,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -64,8 +70,12 @@ module "log-analytics" { module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" + version = "1.2.0" name = "apptestwvshaks" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location diff --git a/examples/aks_with_microsoft_entra_id/versions.tf b/examples/aks_with_microsoft_entra_id/versions.tf index 18fc9ba..33578d1 100644 --- a/examples/aks_with_microsoft_entra_id/versions.tf +++ b/examples/aks_with_microsoft_entra_id/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } diff --git a/examples/basic/versions.tf b/examples/basic/versions.tf index f3fa032..0619d27 100644 --- a/examples/basic/versions.tf +++ b/examples/basic/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } \ No newline at end of file diff --git a/examples/complete/example.tf b/examples/complete/example.tf index 8a848d4..2545ba9 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -1,14 +1,21 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { source = "clouddrove/resource-group/azure" version = "1.0.2" - name = "Public-app" - environment = "test" + name = "Public-app1" + environment = "test2" label_order = ["name", "environment", ] location = "Canada Central" } @@ -17,8 +24,8 @@ module "vnet" { source = "clouddrove/vnet/azure" version = "1.0.4" - name = "app" - environment = "test" + name = "app1" + environment = "test2" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -29,8 +36,8 @@ module "subnet" { source = "clouddrove/subnet/azure" version = "1.2.1" - name = "app" - environment = "test" + name = "app1" + environment = "test2" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -43,7 +50,7 @@ module "subnet" { # route_table routes = [ { - name = "rt-test" + name = "rt_test" address_prefix = "0.0.0.0/0" next_hop_type = "Internet" } @@ -52,7 +59,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +67,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" - name = "appakstest" + version = "1.2.0" + name = "vjsn-738" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,41 +97,66 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false - log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id + diagnostic_setting_enable = true + log_analytics_workspace_id = module.log-analytics.workspace_id } module "aks" { - source = "../.." - name = "app1" - environment = "test" - + source = "../../" + name = "app-yum" + environment = "test" resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27.7" + kubernetes_version = "1.28.9" private_cluster_enabled = false + default_node_pool = { - name = "agentpool1" - max_pods = 200 - os_disk_size_gb = 64 - vm_size = "Standard_B4ms" - count = 1 - enable_node_public_ip = false - max_surge = "33%" + name = "default-nodepool" + max_pods = 200 + os_disk_size_gb = 64 + vm_size = "Standard_B4ms" + count = 3 + node_public_ip_enabled = false + auto_scaling_enabled = true + min_count = 3 + max_count = 5 } ##### if requred more than one node group. nodes_pools = [ { - name = "nodegroup2" - max_pods = 200 - os_disk_size_gb = 64 - vm_size = "Standard_B4ms" - count = 2 - enable_node_public_ip = false - mode = "User" - max_surge = "33%" + name = "nodepool2" + max_pods = 30 + os_disk_size_gb = 64 + vm_size = "Standard_B4ms" + count = 2 + node_public_ip_enabled = true + mode = "User" + auto_scaling_enabled = true + min_count = 3 + max_count = 5 + node_labels = { + "sfvfv" = "spot" + } + }, + { + name = "spotnodepool" + max_pods = null + os_disk_size_gb = null + vm_size = "Standard_D2_v3" + count = 1 + node_public_ip_enabled = false + mode = null + auto_scaling_enabled = true + min_count = 1 + max_count = 1 + node_labels = { + "dsvdv" = "spot" + } + priority = "Spot" + eviction_policy = "Delete" + spot_max_price = -1 }, ] @@ -132,7 +169,15 @@ module "aks" { admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting. - microsoft_defender_enabled = true + microsoft_defender_enabled = false diagnostic_setting_enable = true - log_analytics_workspace_id = module.log-analytics.workspace_id # when diagnostic_setting_enable = true && oms_agent_enabled = true + log_analytics_workspace_id = module.log-analytics.workspace_id +} + +output "test1" { + value = module.aks.nodes_pools_with_defaults +} + +output "test" { + value = module.aks.nodes_pools } diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 18fc9ba..33578d1 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } diff --git a/examples/private_cluster/example.tf b/examples/private_cluster/example.tf index 73d6f4e..691daa0 100644 --- a/examples/private_cluster/example.tf +++ b/examples/private_cluster/example.tf @@ -1,14 +1,21 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { source = "clouddrove/resource-group/azure" version = "1.0.2" - name = "app" - environment = "test" + name = "app-1" + environment = "test-2" label_order = ["name", "environment", ] location = "Canada Central" } @@ -27,7 +34,7 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.0" + version = "1.2.1" name = "app" environment = "test" @@ -52,7 +59,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +67,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" - name = "apptest5rds4556" + version = "1.2.0" + name = "apptest3428335" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,7 +97,7 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false + diagnostic_setting_enable = true log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id } @@ -97,12 +109,13 @@ module "aks" { resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27" + kubernetes_version = "1.28.9" + default_node_pool = { - name = "agentpool" + name = "agentpool1" max_pods = 200 os_disk_size_gb = 64 - vm_size = "Standard_B2s" + vm_size = "Standard_B4ms" count = 1 enable_node_public_ip = false } diff --git a/examples/private_cluster/versions.tf b/examples/private_cluster/versions.tf index 18fc9ba..ace4aa4 100644 --- a/examples/private_cluster/versions.tf +++ b/examples/private_cluster/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 3.112.0" } } } diff --git a/examples/public_cluster/example.tf b/examples/public_cluster/example.tf index 8e41cf0..7523b6e 100644 --- a/examples/public_cluster/example.tf +++ b/examples/public_cluster/example.tf @@ -1,6 +1,14 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } + +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { @@ -27,7 +35,7 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.0" + version = "1.2.1" name = "app" environment = "test" @@ -52,7 +60,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +68,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" + version = "1.2.0" name = "apptest5rds4556" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,20 +98,20 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false + diagnostic_setting_enable = true log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id } module "aks" { - source = "../.." - name = "app" - environment = "test" - + source = "../.." + name = "app" + environment = "test" resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27.7" + kubernetes_version = "1.28.9" private_cluster_enabled = false + default_node_pool = { name = "agentpool1" max_pods = 200 @@ -108,7 +121,6 @@ module "aks" { enable_node_public_ip = false } - ##### if requred more than one node group. nodes_pools = [ { diff --git a/examples/public_cluster/versions.tf b/examples/public_cluster/versions.tf index 18fc9ba..93f5d2c 100644 --- a/examples/public_cluster/versions.tf +++ b/examples/public_cluster/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 3.108.0" } } -} +} \ No newline at end of file diff --git a/extensions.tf b/extensions.tf new file mode 100644 index 0000000..b45d197 --- /dev/null +++ b/extensions.tf @@ -0,0 +1,36 @@ + +resource "azurerm_kubernetes_cluster_extension" "flux" { + depends_on = [azurerm_kubernetes_cluster.aks] + count = var.flux_enable ? 1 : 0 + name = "flux-extension" + cluster_id = join("", azurerm_kubernetes_cluster.aks[0].id) + extension_type = "microsoft.flux" + configuration_settings = { + "image-automation-controller.ssh-host-key-args" = "--ssh-hostkey-algos=rsa-sha2-512,rsa-sha2-256" + "multiTenancy.enforce" = "false" + "source-controller.ssh-host-key-args" = "--ssh-hostkey-algos=rsa-sha2-512,rsa-sha2-256" + } +} + +resource "azurerm_kubernetes_flux_configuration" "flux" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_extension.flux] + count = var.flux_enable ? 1 : 0 + name = "flux-conf" + cluster_id = join("", azurerm_kubernetes_cluster.aks[0].id) + namespace = "flux-system" + scope = "cluster" + + git_repository { + url = var.flux_git_repo_url != "" ? var.flux_git_repo_url : "" + reference_type = "branch" + reference_value = var.flux_git_repo_branch + ssh_private_key_base64 = var.ssh_private_key_base64 != "" ? var.ssh_private_key_base64 : "" + } + + kustomizations { + name = "flux-system-kustomization" + timeout_in_seconds = var.flux_timeout_in_seconds + sync_interval_in_seconds = var.flux_sync_interval_in_seconds + retry_interval_in_seconds = var.flux_retry_interval_in_seconds + } +} diff --git a/locals.tf b/locals.tf new file mode 100644 index 0000000..e8fa4b7 --- /dev/null +++ b/locals.tf @@ -0,0 +1,115 @@ + +data "azurerm_subscription" "current" {} +data "azurerm_client_config" "current" {} + + +locals { + private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type + resource_group_name = var.resource_group_name + location = var.location + default_agent_profile = { + agents_pool_name = "agentpool" + count = 1 + vm_size = "Standard_D2_v3" + os_type = "Linux" + auto_scaling_enabled = false + host_encryption_enabled = false + min_count = null + max_count = null + type = "VirtualMachineScaleSets" + node_taints = null + vnet_subnet_id = var.nodes_subnet_id + max_pods = 30 + os_disk_type = "Managed" + os_disk_size_gb = 128 + host_group_id = null + orchestrator_version = null + node_public_ip_enabled = false + mode = "System" + fips_enabled = null + node_labels = null + only_critical_addons_enabled = null + proximity_placement_group_id = null + scale_down_mode = null + snapshot_id = null + tags = null + temporary_name_for_rotation = null + ultra_ssd_enabled = null + zones = null + priority = null + eviction_policy = null + spot_max_price = null + } + default_node_pool = merge(local.default_agent_profile, var.default_node_pool) + nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_agent_profile, ap)] + nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] + # Defaults for Linux profile + # Generally smaller images so can run more pods and require smaller HD + default_linux_node_profile = { + max_pods = 30 + os_disk_size_gb = 128 + } + # Defaults for Windows profile + # Do not want to run same number of pods and some images can be quite large + default_windows_node_profile = { + max_pods = 20 + os_disk_size_gb = 256 + } + +} + +output "nodes_pools_with_defaults" { + value = local.nodes_pools_with_defaults +} + +output "nodes_pools" { + value = local.nodes_pools +} + +module "labels" { + + source = "clouddrove/labels/azure" + version = "1.0.0" + name = var.name + environment = var.environment + managedby = var.managedby + label_order = var.label_order + repository = var.repository +} + +##----------------------------------------------------------------------------- +## DATA BLOCKS FOR DIAGNOSTIC.TF +##----------------------------------------------------------------------------- +data "azurerm_resources" "aks_pip" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + type = "Microsoft.Network/publicIPAddresses" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} + +data "azurerm_resources" "aks_nsg" { + depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + type = "Microsoft.Network/networkSecurityGroups" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} + + +data "azurerm_resources" "aks_nic" { + depends_on = [azurerm_kubernetes_cluster.aks] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + type = "Microsoft.Network/networkInterfaces" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} diff --git a/main.tf b/main.tf deleted file mode 100644 index 2730b5c..0000000 --- a/main.tf +++ /dev/null @@ -1,922 +0,0 @@ -## Managed By : CloudDrove -## Copyright @ CloudDrove. All Right Reserved. - -## Vritual Network and Subnet Creation - -data "azurerm_subscription" "current" {} -data "azurerm_client_config" "current" {} - - -locals { - resource_group_name = var.resource_group_name - location = var.location - default_agent_profile = { - name = "agentpool" - count = 1 - vm_size = "Standard_D2_v3" - os_type = "Linux" - enable_auto_scaling = false - enable_host_encryption = true - min_count = null - max_count = null - type = "VirtualMachineScaleSets" - node_taints = null - vnet_subnet_id = var.nodes_subnet_id - max_pods = 30 - os_disk_type = "Managed" - os_disk_size_gb = 128 - host_group_id = null - orchestrator_version = null - enable_node_public_ip = false - mode = "System" - node_soak_duration_in_minutes = null - max_surge = null - drain_timeout_in_minutes = null - } - - default_node_pool = merge(local.default_agent_profile, var.default_node_pool) - nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_agent_profile, ap)] - nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] - # Defaults for Linux profile - # Generally smaller images so can run more pods and require smaller HD - default_linux_node_profile = { - max_pods = 30 - os_disk_size_gb = 128 - } - - # Defaults for Windows profile - # Do not want to run same number of pods and some images can be quite large - default_windows_node_profile = { - max_pods = 20 - os_disk_size_gb = 256 - } -} - -module "labels" { - - source = "clouddrove/labels/azure" - version = "1.0.0" - name = var.name - environment = var.environment - managedby = var.managedby - label_order = var.label_order - repository = var.repository -} - -locals { - private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type -} - -resource "azurerm_kubernetes_cluster" "aks" { - count = var.enabled ? 1 : 0 - name = format("%s-aks", module.labels.id) - location = local.location - resource_group_name = local.resource_group_name - dns_prefix = replace(module.labels.id, "/[\\W_]/", "-") - kubernetes_version = var.kubernetes_version - automatic_channel_upgrade = var.automatic_channel_upgrade - sku_tier = var.aks_sku_tier - node_resource_group = var.node_resource_group == null ? format("%s-aks-node-rg", module.labels.id) : var.node_resource_group - disk_encryption_set_id = var.key_vault_id != null ? azurerm_disk_encryption_set.main[0].id : null - private_cluster_enabled = var.private_cluster_enabled - private_dns_zone_id = var.private_cluster_enabled ? local.private_dns_zone : null - http_application_routing_enabled = var.enable_http_application_routing - azure_policy_enabled = var.azure_policy_enabled - edge_zone = var.edge_zone - image_cleaner_enabled = var.image_cleaner_enabled - image_cleaner_interval_hours = var.image_cleaner_interval_hours - role_based_access_control_enabled = var.role_based_access_control_enabled - local_account_disabled = var.local_account_disabled - - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] - - content { - name = var.agents_pool_name - vm_size = var.agents_size - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = var.agents_max_count - max_pods = var.agents_max_pods - min_count = var.agents_min_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = var.pod_subnet_id - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vnet_subnet_id = var.vnet_subnet_id - zones = var.agents_availability_zones - - node_network_profile { - node_public_ip_tags = var.node_public_ip_tags - } - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - } - } - - dynamic "aci_connector_linux" { - for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] - - content { - subnet_name = var.aci_connector_linux_subnet_name - } - } - - - dynamic "ingress_application_gateway" { - for_each = toset(var.ingress_application_gateway != null ? [var.ingress_application_gateway] : []) - - content { - gateway_id = ingress_application_gateway.value.gateway_id - gateway_name = ingress_application_gateway.value.gateway_name - subnet_cidr = ingress_application_gateway.value.subnet_cidr - subnet_id = ingress_application_gateway.value.subnet_id - } - } - - dynamic "key_management_service" { - for_each = var.kms_enabled ? ["key_management_service"] : [] - - content { - key_vault_key_id = var.kms_key_vault_key_id - key_vault_network_access = var.kms_key_vault_network_access - } - } - - dynamic "key_vault_secrets_provider" { - for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] - - content { - secret_rotation_enabled = var.secret_rotation_enabled - secret_rotation_interval = var.secret_rotation_interval - } - } - - dynamic "kubelet_identity" { - for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] - content { - client_id = kubelet_identity.value.client_id - object_id = kubelet_identity.value.object_id - user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id - } - } - - dynamic "http_proxy_config" { - for_each = var.enable_http_proxy ? [1] : [] - - content { - http_proxy = var.http_proxy_config.http_proxy - https_proxy = var.http_proxy_config.https_proxy - no_proxy = var.http_proxy_config.no_proxy - } - } - - dynamic "confidential_computing" { - for_each = var.confidential_computing == null ? [] : [var.confidential_computing] - - content { - sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled - } - } - - dynamic "api_server_access_profile" { - for_each = var.api_server_access_profile != null ? [1] : [] - - content { - authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges - vnet_integration_enabled = var.api_server_access_profile.vnet_integration_enabled - subnet_id = var.api_server_access_profile.subnet_id - } - } - - dynamic "auto_scaler_profile" { - for_each = var.auto_scaler_profile_enabled ? [var.auto_scaler_profile] : [] - - content { - balance_similar_node_groups = auto_scaler_profile.value.balance_similar_node_groups - empty_bulk_delete_max = auto_scaler_profile.value.empty_bulk_delete_max - expander = auto_scaler_profile.value.expander - max_graceful_termination_sec = auto_scaler_profile.value.max_graceful_termination_sec - max_node_provisioning_time = auto_scaler_profile.value.max_node_provisioning_time - max_unready_nodes = auto_scaler_profile.value.max_unready_nodes - max_unready_percentage = auto_scaler_profile.value.max_unready_percentage - new_pod_scale_up_delay = auto_scaler_profile.value.new_pod_scale_up_delay - scale_down_delay_after_add = auto_scaler_profile.value.scale_down_delay_after_add - scale_down_delay_after_delete = auto_scaler_profile.value.scale_down_delay_after_delete - scale_down_delay_after_failure = auto_scaler_profile.value.scale_down_delay_after_failure - scale_down_unneeded = auto_scaler_profile.value.scale_down_unneeded - scale_down_unready = auto_scaler_profile.value.scale_down_unready - scale_down_utilization_threshold = auto_scaler_profile.value.scale_down_utilization_threshold - scan_interval = auto_scaler_profile.value.scan_interval - skip_nodes_with_local_storage = auto_scaler_profile.value.skip_nodes_with_local_storage - skip_nodes_with_system_pods = auto_scaler_profile.value.skip_nodes_with_system_pods - } - } - - dynamic "maintenance_window_auto_upgrade" { - for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] - content { - frequency = maintenance_window_auto_upgrade.value.frequency - interval = maintenance_window_auto_upgrade.value.interval - duration = maintenance_window_auto_upgrade.value.duration - day_of_week = maintenance_window_auto_upgrade.value.day_of_week - day_of_month = maintenance_window_auto_upgrade.value.day_of_month - week_index = maintenance_window_auto_upgrade.value.week_index - start_time = maintenance_window_auto_upgrade.value.start_time - utc_offset = maintenance_window_auto_upgrade.value.utc_offset - start_date = maintenance_window_auto_upgrade.value.start_date - - dynamic "not_allowed" { - for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed - content { - start = not_allowed.value.start - end = not_allowed.value.end - } - } - } - } - - dynamic "maintenance_window_node_os" { - for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] - content { - duration = maintenance_window_node_os.value.duration - frequency = maintenance_window_node_os.value.frequency - interval = maintenance_window_node_os.value.interval - day_of_month = maintenance_window_node_os.value.day_of_month - day_of_week = maintenance_window_node_os.value.day_of_week - start_date = maintenance_window_node_os.value.start_date - start_time = maintenance_window_node_os.value.start_time - utc_offset = maintenance_window_node_os.value.utc_offset - week_index = maintenance_window_node_os.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - - dynamic "azure_active_directory_role_based_access_control" { - for_each = var.role_based_access_control == null ? [] : var.role_based_access_control - content { - managed = azure_active_directory_role_based_access_control.value.managed - tenant_id = azure_active_directory_role_based_access_control.value.tenant_id - admin_group_object_ids = !azure_active_directory_role_based_access_control.value.azure_rbac_enabled ? var.admin_group_id : null - azure_rbac_enabled = azure_active_directory_role_based_access_control.value.azure_rbac_enabled - } - } - default_node_pool { - name = local.default_node_pool.name - node_count = local.default_node_pool.count - vm_size = local.default_node_pool.vm_size - enable_auto_scaling = local.default_node_pool.enable_auto_scaling - min_count = local.default_node_pool.min_count - max_count = local.default_node_pool.max_count - max_pods = local.default_node_pool.max_pods - os_disk_type = local.default_node_pool.os_disk_type - os_disk_size_gb = local.default_node_pool.os_disk_size_gb - type = local.default_node_pool.type - vnet_subnet_id = local.default_node_pool.vnet_subnet_id - temporary_name_for_rotation = var.temporary_name_for_rotation - enable_host_encryption = local.default_node_pool.enable_host_encryption - dynamic "upgrade_settings" { - for_each = local.default_node_pool.max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = local.default_node_pool.max_surge - node_soak_duration_in_minutes = local.default_node_pool.node_soak_duration_in_minutes - drain_timeout_in_minutes = local.default_node_pool.drain_timeout_in_minutes - } - } - } - - dynamic "microsoft_defender" { - for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] - - content { - log_analytics_workspace_id = var.log_analytics_workspace_id - } - } - - dynamic "oms_agent" { - for_each = var.oms_agent_enabled ? ["oms_agent"] : [] - - content { - log_analytics_workspace_id = var.log_analytics_workspace_id - msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled - } - } - - dynamic "service_mesh_profile" { - for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] - content { - mode = var.service_mesh_profile.mode - external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled - internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled - } - } - dynamic "service_principal" { - for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] - - content { - client_id = var.client_id - client_secret = var.client_secret - } - } - dynamic "storage_profile" { - for_each = var.storage_profile_enabled ? ["storage_profile"] : [] - - content { - blob_driver_enabled = var.storage_profile.blob_driver_enabled - disk_driver_enabled = var.storage_profile.disk_driver_enabled - disk_driver_version = var.storage_profile.disk_driver_version - file_driver_enabled = var.storage_profile.file_driver_enabled - snapshot_controller_enabled = var.storage_profile.snapshot_controller_enabled - } - } - - identity { - type = var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? "UserAssigned" : "SystemAssigned" - } - - dynamic "web_app_routing" { - for_each = var.web_app_routing == null ? [] : ["web_app_routing"] - - content { - dns_zone_ids = var.web_app_routing.dns_zone_ids - } - } - - dynamic "linux_profile" { - for_each = var.linux_profile != null ? [true] : [] - iterator = lp - content { - admin_username = var.linux_profile.username - - ssh_key { - key_data = var.linux_profile.ssh_key - } - } - } - - dynamic "workload_autoscaler_profile" { - for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] - - content { - keda_enabled = workload_autoscaler_profile.value.keda_enabled - vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled - } - } - - - dynamic "http_proxy_config" { - for_each = var.http_proxy_config != null ? ["http_proxy_config"] : [] - - content { - http_proxy = http_proxy_config.value.http_proxy - https_proxy = http_proxy_config.value.https_proxy - no_proxy = http_proxy_config.value.no_proxy - trusted_ca = http_proxy_config.value.trusted_ca - } - } - - dynamic "windows_profile" { - for_each = var.windows_profile != null ? [var.windows_profile] : [] - - content { - admin_username = windows_profile.value.admin_username - admin_password = windows_profile.value.admin_password - license = windows_profile.value.license - - dynamic "gmsa" { - for_each = windows_profile.value.gmsa != null ? [windows_profile.value.gmsa] : [] - - content { - dns_server = gmsa.value.dns_server - root_domain = gmsa.value.root_domain - } - } - } - } - - network_profile { - network_plugin = var.network_plugin - network_policy = var.network_policy - network_data_plane = var.network_data_plane - dns_service_ip = cidrhost(var.service_cidr, 10) - service_cidr = var.service_cidr - load_balancer_sku = var.load_balancer_sku - network_plugin_mode = var.network_plugin_mode - outbound_type = var.outbound_type - pod_cidr = var.net_profile_pod_cidr - - - dynamic "load_balancer_profile" { - for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [1] : [] - - content { - idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes - managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count - managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count - outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids - outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids - outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated - } - } - } - depends_on = [ - azurerm_role_assignment.aks_uai_private_dns_zone_contributor, - ] - tags = module.labels.tags -} -resource "azurerm_kubernetes_cluster_node_pool" "node_pools" { - count = var.enabled ? length(local.nodes_pools) : 0 - kubernetes_cluster_id = azurerm_kubernetes_cluster.aks[0].id - name = local.nodes_pools[count.index].name - vm_size = local.nodes_pools[count.index].vm_size - os_type = local.nodes_pools[count.index].os_type - os_disk_type = local.nodes_pools[count.index].os_disk_type - os_disk_size_gb = local.nodes_pools[count.index].os_disk_size_gb - vnet_subnet_id = local.nodes_pools[count.index].vnet_subnet_id - enable_auto_scaling = local.nodes_pools[count.index].enable_auto_scaling - enable_host_encryption = local.nodes_pools[count.index].enable_host_encryption - node_count = local.nodes_pools[count.index].count - min_count = local.nodes_pools[count.index].min_count - max_count = local.nodes_pools[count.index].max_count - max_pods = local.nodes_pools[count.index].max_pods - enable_node_public_ip = local.nodes_pools[count.index].enable_node_public_ip - mode = local.nodes_pools[count.index].mode - orchestrator_version = local.nodes_pools[count.index].orchestrator_version - node_taints = local.nodes_pools[count.index].node_taints - host_group_id = local.nodes_pools[count.index].host_group_id - capacity_reservation_group_id = var.capacity_reservation_group_id - workload_runtime = var.workload_runtime - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.kubelet_config != null ? [var.kubelet_config] : [] - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "upgrade_settings" { - for_each = local.nodes_pools[count.index].max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = local.nodes_pools[count.index].max_surge - node_soak_duration_in_minutes = local.nodes_pools[count.index].node_soak_duration_in_minutes - drain_timeout_in_minutes = local.nodes_pools[count.index].drain_timeout_in_minutes - } - } - - windows_profile { - outbound_nat_enabled = var.outbound_nat_enabled - } -} - -resource "azurerm_role_assignment" "aks_entra_id" { - count = var.enabled && var.role_based_access_control != null && try(var.role_based_access_control[0].azure_rbac_enabled, false) == true ? length(var.admin_group_id) : 0 - scope = azurerm_kubernetes_cluster.aks[0].id - role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" - principal_id = var.admin_group_id[count.index] -} - -# Allow aks system indentiy access to encrpty disc -resource "azurerm_role_assignment" "aks_system_identity" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - scope = azurerm_disk_encryption_set.main[0].id - role_definition_name = "Key Vault Crypto Service Encryption User" -} - -# Allow aks system indentiy access to ACR -resource "azurerm_role_assignment" "aks_acr_access_principal_id" { - count = var.enabled && var.acr_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - scope = var.acr_id - role_definition_name = "AcrPull" -} - -resource "azurerm_role_assignment" "aks_acr_access_object_id" { - count = var.enabled && var.acr_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - scope = var.acr_id - role_definition_name = "AcrPull" -} - -# Allow user assigned identity to manage AKS items in MC_xxx RG -resource "azurerm_role_assignment" "aks_user_assigned" { - count = var.enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - scope = format("/subscriptions/%s/resourceGroups/%s", data.azurerm_subscription.current.subscription_id, azurerm_kubernetes_cluster.aks[0].node_resource_group) - role_definition_name = "Network Contributor" -} - -resource "azurerm_user_assigned_identity" "aks_user_assigned_identity" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - - name = format("%s-aks-mid", module.labels.id) - resource_group_name = local.resource_group_name - location = local.location -} - -resource "azurerm_role_assignment" "aks_uai_private_dns_zone_contributor" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - - scope = var.private_dns_zone_id - role_definition_name = "Private DNS Zone Contributor" - principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id -} - -resource "azurerm_role_assignment" "aks_uai_vnet_network_contributor" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - scope = var.vnet_id - role_definition_name = "Network Contributor" - principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id -} - -resource "azurerm_role_assignment" "key_vault_secrets_provider" { - count = var.enabled && var.key_vault_secrets_provider_enabled ? 1 : 0 - scope = var.key_vault_id - role_definition_name = "Key Vault Administrator" - principal_id = azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id -} - -resource "azurerm_role_assignment" "rbac_keyvault_crypto_officer" { - for_each = toset(var.enabled && var.cmk_enabled ? var.admin_objects_ids : []) - scope = var.key_vault_id - role_definition_name = "Key Vault Crypto Officer" - principal_id = each.value -} - -resource "azurerm_key_vault_key" "example" { - depends_on = [azurerm_role_assignment.rbac_keyvault_crypto_officer] - count = var.enabled && var.cmk_enabled ? 1 : 0 - name = format("%s-aks-encrypted-key", module.labels.id) - expiration_date = var.expiration_date - key_vault_id = var.key_vault_id - key_type = "RSA" - key_size = 2048 - key_opts = [ - "decrypt", - "encrypt", - "sign", - "unwrapKey", - "verify", - "wrapKey", - ] - dynamic "rotation_policy" { - for_each = var.rotation_policy_enabled ? var.rotation_policy : {} - content { - automatic { - time_before_expiry = rotation_policy.value.time_before_expiry - } - - expire_after = rotation_policy.value.expire_after - notify_before_expiry = rotation_policy.value.notify_before_expiry - } - } -} - -resource "azurerm_disk_encryption_set" "main" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - name = format("%s-aks-dsk-encrpted", module.labels.id) - resource_group_name = local.resource_group_name - location = local.location - key_vault_key_id = var.key_vault_id != "" ? azurerm_key_vault_key.example[0].id : null - - identity { - type = "SystemAssigned" - } -} - -resource "azurerm_role_assignment" "azurerm_disk_encryption_set_key_vault_access" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - principal_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id - scope = var.key_vault_id - role_definition_name = "Key Vault Crypto Service Encryption User" -} - -resource "azurerm_key_vault_access_policy" "main" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = azurerm_disk_encryption_set.main[0].identity[0].tenant_id - object_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id - key_permissions = [ - "Get", - "WrapKey", - "UnwrapKey" - ] - certificate_permissions = [ - "Get" - ] -} - -resource "azurerm_key_vault_access_policy" "key_vault" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - - key_permissions = ["Get"] - certificate_permissions = ["Get"] - secret_permissions = ["Get"] -} - -resource "azurerm_key_vault_access_policy" "kubelet_identity" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - - key_permissions = ["Get"] - certificate_permissions = ["Get"] - secret_permissions = ["Get"] -} - -resource "azurerm_monitor_diagnostic_setting" "aks_diag" { - depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - name = format("%s-aks-diag-log", module.labels.id) - target_resource_id = azurerm_kubernetes_cluster.aks[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - dynamic "enabled_log" { - for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] - content { - category = var.kv_logs.category != null ? enabled_log.value : null - category_group = var.kv_logs.category == null ? enabled_log.value : null - } - } - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_pip" { - depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - type = "Microsoft.Network/publicIPAddresses" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "pip_aks" { - depends_on = [data.azurerm_resources.aks_pip, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - name = format("%s-aks-pip-diag-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_pip[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - dynamic "enabled_log" { - for_each = var.pip_logs.enabled ? var.pip_logs.category != null ? var.pip_logs.category : var.pip_logs.category_group : [] - content { - category = var.pip_logs.category != null ? enabled_log.value : null - category_group = var.pip_logs.category == null ? enabled_log.value : null - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_nsg" { - depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - type = "Microsoft.Network/networkSecurityGroups" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "aks-nsg" { - depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - name = format("%s-aks-nsg-diag-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_nsg[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "enabled_log" { - for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] - content { - category = var.kv_logs.category != null ? enabled_log.value : null - category_group = var.kv_logs.category == null ? enabled_log.value : null - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_nic" { - depends_on = [azurerm_kubernetes_cluster.aks] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - type = "Microsoft.Network/networkInterfaces" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "aks-nic" { - depends_on = [data.azurerm_resources.aks_nic, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - name = format("%s-aks-nic-dia-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_nic[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -## AKS user authentication with Azure Rbac. -resource "azurerm_role_assignment" "example" { - for_each = var.enabled && var.aks_user_auth_role != null ? { for k in var.aks_user_auth_role : k.principal_id => k } : null - # scope = - scope = each.value.scope - role_definition_name = each.value.role_definition_name - principal_id = each.value.principal_id -} \ No newline at end of file diff --git a/node.tf b/node.tf new file mode 100644 index 0000000..cc605f2 --- /dev/null +++ b/node.tf @@ -0,0 +1,101 @@ + +resource "azurerm_kubernetes_cluster_node_pool" "node_pools" { + count = var.enabled ? length(var.nodes_pools) : 0 + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks[0].id + name = local.nodes_pools[count.index].name + vm_size = local.nodes_pools[count.index].vm_size + os_type = local.nodes_pools[count.index].os_type + os_disk_type = local.nodes_pools[count.index].os_disk_type + os_disk_size_gb = local.nodes_pools[count.index].os_disk_size_gb + vnet_subnet_id = local.nodes_pools[count.index].vnet_subnet_id + auto_scaling_enabled = local.nodes_pools[count.index].auto_scaling_enabled + host_encryption_enabled = local.nodes_pools[count.index].host_encryption_enabled + node_count = local.nodes_pools[count.index].count + min_count = local.nodes_pools[count.index].min_count + max_count = local.nodes_pools[count.index].max_count + max_pods = local.nodes_pools[count.index].max_pods + node_public_ip_enabled = local.nodes_pools[count.index].node_public_ip_enabled + mode = local.nodes_pools[count.index].mode + orchestrator_version = local.nodes_pools[count.index].orchestrator_version + node_taints = local.nodes_pools[count.index].node_taints + host_group_id = local.nodes_pools[count.index].host_group_id + node_labels = local.nodes_pools[count.index].node_labels + capacity_reservation_group_id = var.capacity_reservation_group_id + workload_runtime = var.workload_runtime + zones = var.agents_availability_zones + priority = local.nodes_pools[count.index].priority + eviction_policy = local.nodes_pools[count.index].eviction_policy + spot_max_price = local.nodes_pools[count.index].spot_max_price + + dynamic "kubelet_config" { + for_each = var.kubelet_config != null ? [var.kubelet_config] : [] + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + content { + max_surge = var.agents_pool_max_surge + } + } + + windows_profile { + outbound_nat_enabled = var.outbound_nat_enabled + } +} diff --git a/outputs.tf b/outputs.tf index 230b5d6..b1e382c 100644 --- a/outputs.tf +++ b/outputs.tf @@ -26,5 +26,4 @@ output "node_resource_group" { output "key_vault_secrets_provider" { value = var.enabled && var.key_vault_secrets_provider_enabled ? azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id : null description = "Specifies the obejct id of key vault secrets provider " -} - +} \ No newline at end of file diff --git a/role.tf b/role.tf new file mode 100644 index 0000000..65827aa --- /dev/null +++ b/role.tf @@ -0,0 +1,182 @@ + +resource "azurerm_role_assignment" "aks_entra_id" { + count = var.enabled && var.role_based_access_control != null && try(var.role_based_access_control[0].azure_rbac_enabled, false) == true ? length(var.admin_group_id) : 0 + scope = azurerm_kubernetes_cluster.aks[0].id + role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" + principal_id = var.admin_group_id[count.index] +} + +# Allow aks system indentiy access to encrpty disc +resource "azurerm_role_assignment" "aks_system_identity" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = azurerm_disk_encryption_set.main[0].id + role_definition_name = "Contributor" +} + +# Allow aks system indentiy access to ACR +resource "azurerm_role_assignment" "aks_acr_access_principal_id" { + count = var.enabled && var.acr_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = var.acr_id + role_definition_name = "AcrPull" +} + +resource "azurerm_role_assignment" "aks_acr_access_object_id" { + count = var.enabled && var.acr_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + scope = var.acr_id + role_definition_name = "AcrPull" +} + +# Allow user assigned identity to manage AKS items in MC_xxx RG +resource "azurerm_role_assignment" "aks_user_assigned" { + count = var.enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + scope = format("/subscriptions/%s/resourceGroups/%s", data.azurerm_subscription.current.subscription_id, azurerm_kubernetes_cluster.aks[0].node_resource_group) + role_definition_name = "Network Contributor" +} + +resource "azurerm_user_assigned_identity" "aks_user_assigned_identity" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + + name = format("%s-aks-mid", module.labels.id) + resource_group_name = local.resource_group_name + location = local.location +} + +resource "azurerm_role_assignment" "aks_uai_private_dns_zone_contributor" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + + scope = var.private_dns_zone_id + role_definition_name = "Private DNS Zone Contributor" + principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id +} + +resource "azurerm_role_assignment" "aks_uai_vnet_network_contributor" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + scope = var.vnet_id + role_definition_name = "Network Contributor" + principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id +} + +resource "azurerm_role_assignment" "rbac_keyvault_crypto_officer" { + for_each = toset(var.enabled && var.cmk_enabled ? var.admin_objects_ids : []) + scope = var.key_vault_id + role_definition_name = "Key Vault Crypto Officer" + principal_id = each.value +} + +resource "azurerm_key_vault_key" "example" { + depends_on = [azurerm_role_assignment.rbac_keyvault_crypto_officer] + count = var.enabled && var.cmk_enabled ? 1 : 0 + name = format("%s-aks-encrypted-key", module.labels.id) + expiration_date = var.expiration_date + key_vault_id = var.key_vault_id + key_type = "RSA" + key_size = 2048 + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + dynamic "rotation_policy" { + for_each = var.rotation_policy_enabled ? var.rotation_policy : {} + content { + automatic { + time_before_expiry = rotation_policy.value.time_before_expiry + } + + expire_after = rotation_policy.value.expire_after + notify_before_expiry = rotation_policy.value.notify_before_expiry + } + } +} + +resource "azurerm_disk_encryption_set" "main" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + name = format("%s-aks-dsk-encrpted", module.labels.id) + resource_group_name = local.resource_group_name + location = local.location + key_vault_key_id = var.key_vault_id != "" ? azurerm_key_vault_key.example[0].id : null + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_role_assignment" "azurerm_disk_encryption_set_key_vault_access" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + principal_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id + scope = var.key_vault_id + role_definition_name = "Key Vault Crypto Service Encryption User" +} + +## AKS user authentication with Azure Rbac. +resource "azurerm_role_assignment" "example" { + for_each = var.enabled && var.aks_user_auth_role != null ? { for k in var.aks_user_auth_role : k.principal_id => k } : null + # scope = + scope = each.value.scope + role_definition_name = each.value.role_definition_name + principal_id = each.value.principal_id +} + +resource "azurerm_key_vault_access_policy" "main" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = azurerm_disk_encryption_set.main[0].identity[0].tenant_id + object_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id + key_permissions = [ + "Get", + "WrapKey", + "UnwrapKey" + ] + certificate_permissions = [ + "Get" + ] +} + +resource "azurerm_key_vault_access_policy" "key_vault" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + + key_permissions = ["Get"] + certificate_permissions = ["Get"] + secret_permissions = ["Get"] +} + +resource "azurerm_key_vault_access_policy" "kubelet_identity" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + + key_permissions = ["Get"] + certificate_permissions = ["Get"] + secret_permissions = ["Get"] +} + +resource "azurerm_role_assignment" "aks_system_object_id" { + count = var.enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = var.vnet_id + role_definition_name = "Network Contributor" +} + +resource "azurerm_role_assignment" "key_vault_secrets_provider" { + count = var.enabled && var.key_vault_secrets_provider_enabled ? 1 : 0 + scope = var.key_vault_id + role_definition_name = "Key Vault Administrator" + principal_id = azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id +} diff --git a/variables.tf b/variables.tf index 7f04477..e726577 100644 --- a/variables.tf +++ b/variables.tf @@ -1,10 +1,11 @@ -#Module : LABEL -#Description : Terraform label module variables. +##----------------------------------------------------------------------------- +## GLOBAL VARIABLE +##----------------------------------------------------------------------------- variable "name" { type = string default = "" - description = "Name (e.g. `app` or `cluster`)." + description = "Name (e.g. `app` or `cluster`)." } variable "repository" { @@ -36,7 +37,9 @@ variable "managedby" { default = "hello@clouddrove.com" description = "ManagedBy, eg 'CloudDrove'." } - +##----------------------------------------------------------------------------- +## KUBERNETES_CLUSTER VARIABLE +##----------------------------------------------------------------------------- variable "enabled" { type = bool default = true @@ -61,164 +64,6 @@ variable "kubernetes_version" { description = "Version of Kubernetes to deploy" } -variable "workload_runtime" { - type = string - default = null - description = "Used to specify the workload runtime. Allowed values are OCIContainer, WasmWasi and KataMshvVmIsolation." -} - -variable "agents_pool_name" { - type = string - default = "nodepool" - description = "The default Azure AKS agentpool (nodepool) name." - nullable = false -} - -variable "agents_size" { - type = string - default = "Standard_D2s_v3" - description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." -} - -variable "enable_auto_scaling" { - type = bool - default = false - description = "Enable node pool autoscaling" -} - -variable "enable_host_encryption" { - type = bool - default = false - description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" -} - -variable "enable_node_public_ip" { - type = bool - default = false - description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." -} - -variable "default_node_pool_fips_enabled" { - type = bool - default = null - description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." -} - -variable "agents_max_count" { - type = number - default = null - description = "Maximum number of nodes in a pool" -} - -variable "agents_max_pods" { - type = number - default = null - description = "The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." -} - -variable "agents_min_count" { - type = number - default = null - description = "Minimum number of nodes in a pool" -} - -variable "agents_labels" { - type = map(string) - default = {} - description = "A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." -} - -variable "only_critical_addons_enabled" { - type = bool - default = null - description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." -} - -variable "orchestrator_version" { - type = string - default = null - description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" -} - -variable "os_disk_size_gb" { - type = number - default = 50 - description = "Disk size of nodes in GBs." -} - -variable "os_disk_type" { - type = string - default = "Managed" - description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." - nullable = false -} - -variable "os_sku" { - type = string - default = null - description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." -} - -variable "pod_subnet_id" { - type = string - default = null - description = "(Optional) The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created." -} - -variable "agents_proximity_placement_group_id" { - type = string - default = null - description = "The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." -} - -variable "scale_down_mode" { - type = string - default = "Delete" - description = "Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." -} - -variable "snapshot_id" { - type = string - default = null - description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." -} - -variable "tags" { - type = map(string) - default = {} - description = "Any tags that should be present on the AKS cluster resources" -} - -variable "agents_tags" { - type = map(string) - default = {} - description = "A mapping of tags to assign to the Node Pool." -} - -variable "temporary_name_for_rotation" { - type = string - default = "tempnode" - description = "Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" -} - -variable "agents_type" { - type = string - default = "VirtualMachineScaleSets" - description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." -} - -variable "ultra_ssd_enabled" { - type = bool - default = false - description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." -} - -variable "vnet_subnet_id" { - type = string - default = null - description = "(Optional) The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created." -} - variable "agents_availability_zones" { type = list(string) default = null @@ -304,6 +149,24 @@ EOT nullable = false } +variable "agents_pool_max_surge" { + type = string + default = null + description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." +} + +variable "agents_pool_node_soak_duration_in_minutes" { + type = number + default = 0 + description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." +} + +variable "agents_pool_drain_timeout_in_minutes" { + type = number + default = null + description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." +} + variable "aci_connector_linux_enabled" { type = bool default = false @@ -324,12 +187,6 @@ variable "aks_sku_tier" { description = "aks sku tier. Possible values are Free ou Paid" } -variable "private_cluster_enabled" { - type = bool - default = true - description = "Configure AKS as a Private Cluster : https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#private_cluster_enabled" -} - variable "node_resource_group" { type = string default = null @@ -350,33 +207,6 @@ https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/ EOD } -variable "default_node_pool" { - description = <