diff --git a/README.md b/README.md index 987a78b..98bc9d7 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,9 @@ [uri-changelog]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/CHANGELOG.md [uri-docs]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/content/docs -[uri-multi-cluster]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/content/docs/3.-multi-cluster.md -[uri-single-cluster-dev]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/content/docs/1.-single-cluster-dev.md -[uri-single-cluster-prod]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/content/docs/2.-single-cluster-prod.md +[uri-single-cluster-dev]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/src/single/dev.md +[uri-single-cluster-prod]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/src/single/production.md [uri-terraform-oci-oke]: https://github.com/oracle-terraform-modules/terraform-oci-oke -[uri-terraform-options]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/content/docs/5.-terraform-options.md +[uri-terraform-options]: https://github.com/oracle-terraform-modules/terraform-oci-verrazzano/blob/main/docs/src/terraformoptions.md [uri-verrazzano]: https://verrazzano.io [uri-verrazzano-medium]: https://medium.com/verrazzano [uri-verrazzano-slack]: https://bit.ly/3gOeRJn @@ -18,7 +17,6 @@ This module automates the installation of [Verrazzano Container Platform][uri-ve * [Create a single cluster with dev profile][uri-single-cluster-dev] * [Create a single cluster with production profile][uri-single-cluster-prod] -* [Create a multi-cluster][uri-multi-cluster] * [Terraform Options][uri-terraform-options] diff --git a/admin.tf b/admin.tf index fea6103..f3aa345 100644 --- a/admin.tf +++ b/admin.tf @@ -1,9 +1,35 @@ # Copyright (c) 2023 Oracle Corporation and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl +locals { + all_ports = -1 + + # keep as reference + # apiserver_port = 6443 + + # Protocols + # See https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + all_protocols = "all" + icmp_protocol = 1 + tcp_protocol = 6 + udp_protocol = 17 + + anywhere = "0.0.0.0/0" + rule_type_nsg = "NETWORK_SECURITY_GROUP" + rule_type_cidr = "CIDR_BLOCK" + rule_type_service = "SERVICE_CIDR_BLOCK" + + service_mesh_ports = [80, 443, 15012, 15017, 15021, 15443] + + # Todo verify if we need 15021 open for public + public_lb_allowed_ports = [80, 443, 15021] +} + module "admin" { - source = "oracle-terraform-modules/oke/oci" - version = "4.5.9" + # source = "oracle-terraform-modules/oke/oci" + # version = "4.5.9" + + source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" home_region = local.admin_region region = local.admin_region @@ -12,28 +38,21 @@ module "admin" { # general oci parameters compartment_id = var.compartment_id - label_prefix = var.label_prefix # ssh keys ssh_private_key_path = var.ssh_private_key_path ssh_public_key_path = var.ssh_public_key_path # networking - create_drg = true + # create_drg = true drg_display_name = lookup(var.admin_region, "admin_name") - # admin is always connected to everybody - remote_peering_connections = { - for k, v in var.managed_clusters : "rpc-to-${k}" => {} if tobool(v) - } - - internet_gateway_route_rules = [ for c in keys(var.managed_clusters) : { destination = lookup(lookup(var.cidrs, c), "vcn") destination_type = "CIDR_BLOCK" - network_entity_id = "drg" + network_entity_id = module.admin_drg.drg_id description = "Routing to allow ssh to ${title(c)}" } if tobool(lookup(var.managed_clusters, c)) ] @@ -43,7 +62,7 @@ module "admin" { { destination = lookup(lookup(var.cidrs, c), "vcn") destination_type = "CIDR_BLOCK" - network_entity_id = "drg" + network_entity_id = module.admin_drg.drg_id description = "Routing to allow connectivity to ${title(c)} cluster" } if tobool(lookup(var.managed_clusters, c)) ] @@ -52,41 +71,68 @@ module "admin" { vcn_dns_label = lookup(var.admin_region, "admin_name") vcn_name = lookup(var.admin_region, "admin_name") - + #subnets + subnets = { + bastion = { newbits = 13, dns_label = "bastion" } + operator = { newbits = 13, dns_label = "operator" } + cp = { newbits = 13, dns_label = "cp" } + int_lb = { newbits = 11, dns_label = "ilb" } + pub_lb = { newbits = 11, dns_label = "plb" } + workers = { newbits = 2, dns_label = "workers" } + pods = { newbits = 2, dns_label = "pods" } + } # bastion host - create_bastion_host = true - upgrade_bastion = false + create_bastion = true + bastion_upgrade = false + bastion_allowed_cidrs = ["0.0.0.0/0"] + # operator host - create_operator = true - upgrade_operator = false - enable_operator_instance_principal = true + create_operator = true + operator_upgrade = false + create_iam_operator_policy = "auto" # oke cluster options cluster_name = lookup(var.admin_region, "admin_name") cni_type = var.preferred_cni - control_plane_type = var.oke_control_plane - control_plane_allowed_cidrs = ["0.0.0.0/0"] + control_plane_is_public = var.oke_control_plane == "public" + control_plane_allowed_cidrs = [local.anywhere] kubernetes_version = var.kubernetes_version pods_cidr = lookup(var.admin_region, "pods") services_cidr = lookup(var.admin_region, "services") # node pools - kubeproxy_mode = "ipvs" - node_pools = var.nodepools + kubeproxy_mode = "ipvs" + worker_pool_mode = "node-pool" + + worker_pools = var.nodepools - cloudinit_nodepool_common = var.cloudinit_nodepool_common + worker_cloud_init = var.worker_cloud_init - node_pool_image_type = "oke" + worker_image_type = "oke" # oke load balancers - load_balancers = "both" - preferred_load_balancer = "public" - internal_lb_allowed_cidrs = ["0.0.0.0/0"] - internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] - public_lb_allowed_cidrs = ["0.0.0.0/0"] - public_lb_allowed_ports = [80, 443, 15021] + load_balancers = "both" + preferred_load_balancer = "public" + + allow_rules_internal_lb = { + for p in local.service_mesh_ports : + + format("Allow ingress to port %v", p) => { + protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, + } + } + # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] + # TODO: allow configuration of source cidr + allow_rules_public_lb = { + + for p in local.public_lb_allowed_ports : + + format("Allow ingress to port %v", p) => { + protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, + } + } user_id = var.user_id @@ -105,3 +151,37 @@ resource "oci_objectstorage_bucket" "thanos_admin" { count = tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 } + + +module "admin_drg" { + source = "oracle-terraform-modules/drg/oci" + version = "1.0.5" + + # general oci parameters + compartment_id = var.compartment_id + label_prefix = var.label_prefix + + # drg parameters + drg_display_name = "${lookup(var.admin_region, "admin_name")}-drg" + + drg_vcn_attachments = { + drg = { + vcn_id = module.admin.vcn_id + vcn_transit_routing_rt_id = null + drg_route_table_id = null + } + } + + # var.drg_id can either contain an existing DRG ID or be null. + drg_id = null + + # admin is always connected to everybody + remote_peering_connections = { + for k, v in var.managed_clusters : "rpc-to-${k}" => {} if tobool(v) + } + + # count = var.create_drg || var.drg_id != null ? 1 : 0 + providers = { + oci = oci.sydney + } +} diff --git a/docs/src/multi/pri-ep.md b/docs/src/multi/pri-ep.md index b68239a..e3da212 100644 --- a/docs/src/multi/pri-ep.md +++ b/docs/src/multi/pri-ep.md @@ -125,40 +125,42 @@ Do not remove those that you are not using. ```terraform,editable output "cluster_ids" { value = { - # "johannesburg" = join(",", module.johannesburg[*].cluster_id) - # "chuncheon" = join(",", module.chuncheon[*].cluster_id) - # "hyderabad" = join(",", module.hyderabad[*].cluster_id) - # "mumbai" = join(",", module.mumbai[*].cluster_id) - # "osaka" = join(",", module.osaka[*].cluster_id) - # "seoul" = join(",", module.seoul[*].cluster_id) - # "singapore" = join(",", module.singapore[*].cluster_id) - # "tokyo" = join(",", module.tokyo[*].cluster_id) - # "amsterdam" = join(",", module.amsterdam[*].cluster_id) - # "frankfurt" = join(",", module.frankfurt[*].cluster_id) - # "london" = join(",", module.london[*].cluster_id) - # "madrid" = join(",", module.madrid[*].cluster_id) - # "marseille" = join(",", module.marseille[*].cluster_id) - # "milan" = join(",", module.milan[*].cluster_id) - # "newport" = join(",", module.newport[*].cluster_id) - # "paris" = join(",", module.paris[*].cluster_id) - # "stockholm" = join(",", module.stockholm[*].cluster_id) - # "zurich" = join(",", module.zurich[*].cluster_id) - # "abudhabi" = join(",", module.abudhabi[*].cluster_id) - # "dubai" = join(",", module.dubai[*].cluster_id) - # "jeddah" = join(",", module.jeddah[*].cluster_id) - # "jerusalem" = join(",", module.jerusalem[*].cluster_id) - #"melbourne" = join(",", module.melbourne[*].cluster_id) - # "sydney" = join(",", module.sydney[*].cluster_id) - # "santiago" = join(",", module.santiago[*].cluster_id) - # "saupaulo" = join(",", module.saupaulo[*].cluster_id) - # "vinhedo" = join(",", module.vinhedo[*].cluster_id) - # "ashburn" = join(",", module.ashburn[*].cluster_id) - # "chicago" = join(",", module.chicago[*].cluster_id) - # "montreal" = join(",", module.montreal[*].cluster_id) - "phoenix" = join(",", module.phoenix[*].cluster_id) - # "queretaro" = join(",", module.queretaro[*].cluster_id) - # "sanjose" = join(",", module.sanjose[*].cluster_id) - # "toronto" = join(",", module.toronto[*].cluster_id) + # "johannesburg" = one(module.johannesburg[*].cluster_id) + # "chuncheon" = one(module.chuncheon[*].cluster_id) + # "hyderabad" = one(module.hyderabad[*].cluster_id) + # "mumbai" = one(module.mumbai[*].cluster_id) + # "osaka" = one(module.osaka[*].cluster_id) + # "seoul" = one(module.seoul[*].cluster_id) + # "singapore" = one(module.singapore[*].cluster_id) + # "tokyo" = one(module.tokyo[*].cluster_id) + # "amsterdam" = one(module.amsterdam[*].cluster_id) + # "frankfurt" = one(module.frankfurt[*].cluster_id) + # "london" = one(module.london[*].cluster_id) + # "madrid" = one(module.madrid[*].cluster_id) + # "marseille" = one(module.marseille[*].cluster_id) + # "milan" = one(module.milan[*].cluster_id) + # "newport" = one(module.newport[*].cluster_id) + # "paris" = one(module.paris[*].cluster_id) + # "stockholm" = one(module.stockholm[*].cluster_id) + # "zurich" = one(module.zurich[*].cluster_id) + # "abudhabi" = one(module.abudhabi[*].cluster_id) + # "dubai" = one(module.dubai[*].cluster_id) + # "jeddah" = one(module.jeddah[*].cluster_id) + # "jerusalem" = one(module.jerusalem[*].cluster_id) + # "melbourne" = one(module.melbourne[*].cluster_id) + # "sydney" = one(module.sydney[*].cluster_id) + "melbourne" = one(module.melbourne[*].cluster_id) + # "sydney" = one(module.sydney[*].cluster_id) + # "santiago" = one(module.santiago[*].cluster_id) + # "saupaulo" = one(module.saupaulo[*].cluster_id) + # "vinhedo" = one(module.vinhedo[*].cluster_id) + # "ashburn" = one(module.ashburn[*].cluster_id) + # "chicago" = one(module.chicago[*].cluster_id) + # "montreal" = one(module.montreal[*].cluster_id) + # "phoenix" = one(module.phoenix[*].cluster_id) + # "queretaro" = one(module.queretaro[*].cluster_id) + # "sanjose" = one(module.sanjose[*].cluster_id) + # "toronto" = one(module.toronto[*].cluster_id) } } ``` @@ -344,7 +346,7 @@ done - fingerprint - and the private key -``` +```bash,editable cd /home/opc/vz/clusters for cluster in admin phoenix; do kubectx $cluster diff --git a/docs/src/multi/pub-ep.md b/docs/src/multi/pub-ep.md index 506e143..4a5f065 100644 --- a/docs/src/multi/pub-ep.md +++ b/docs/src/multi/pub-ep.md @@ -124,40 +124,42 @@ Do not remove those that you are not using. ```terraform,editable output "cluster_ids" { value = { - # "johannesburg" = join(",", module.johannesburg[*].cluster_id) - # "chuncheon" = join(",", module.chuncheon[*].cluster_id) - # "hyderabad" = join(",", module.hyderabad[*].cluster_id) - # "mumbai" = join(",", module.mumbai[*].cluster_id) - # "osaka" = join(",", module.osaka[*].cluster_id) - # "seoul" = join(",", module.seoul[*].cluster_id) - # "singapore" = join(",", module.singapore[*].cluster_id) - # "tokyo" = join(",", module.tokyo[*].cluster_id) - # "amsterdam" = join(",", module.amsterdam[*].cluster_id) - # "frankfurt" = join(",", module.frankfurt[*].cluster_id) - # "london" = join(",", module.london[*].cluster_id) - # "madrid" = join(",", module.madrid[*].cluster_id) - # "marseille" = join(",", module.marseille[*].cluster_id) - # "milan" = join(",", module.milan[*].cluster_id) - # "newport" = join(",", module.newport[*].cluster_id) - # "paris" = join(",", module.paris[*].cluster_id) - # "stockholm" = join(",", module.stockholm[*].cluster_id) - # "zurich" = join(",", module.zurich[*].cluster_id) - # "abudhabi" = join(",", module.abudhabi[*].cluster_id) - # "dubai" = join(",", module.dubai[*].cluster_id) - # "jeddah" = join(",", module.jeddah[*].cluster_id) - # "jerusalem" = join(",", module.jerusalem[*].cluster_id) - #"melbourne" = join(",", module.melbourne[*].cluster_id) - # "sydney" = join(",", module.sydney[*].cluster_id) - # "santiago" = join(",", module.santiago[*].cluster_id) - # "saupaulo" = join(",", module.saupaulo[*].cluster_id) - # "vinhedo" = join(",", module.vinhedo[*].cluster_id) - # "ashburn" = join(",", module.ashburn[*].cluster_id) - # "chicago" = join(",", module.chicago[*].cluster_id) - # "montreal" = join(",", module.montreal[*].cluster_id) - "phoenix" = join(",", module.phoenix[*].cluster_id) - # "queretaro" = join(",", module.queretaro[*].cluster_id) - # "sanjose" = join(",", module.sanjose[*].cluster_id) - # "toronto" = join(",", module.toronto[*].cluster_id) + # "johannesburg" = one(module.johannesburg[*].cluster_id) + # "chuncheon" = one(module.chuncheon[*].cluster_id) + # "hyderabad" = one(module.hyderabad[*].cluster_id) + # "mumbai" = one(module.mumbai[*].cluster_id) + # "osaka" = one(module.osaka[*].cluster_id) + # "seoul" = one(module.seoul[*].cluster_id) + # "singapore" = one(module.singapore[*].cluster_id) + # "tokyo" = one(module.tokyo[*].cluster_id) + # "amsterdam" = one(module.amsterdam[*].cluster_id) + # "frankfurt" = one(module.frankfurt[*].cluster_id) + # "london" = one(module.london[*].cluster_id) + # "madrid" = one(module.madrid[*].cluster_id) + # "marseille" = one(module.marseille[*].cluster_id) + # "milan" = one(module.milan[*].cluster_id) + # "newport" = one(module.newport[*].cluster_id) + # "paris" = one(module.paris[*].cluster_id) + # "stockholm" = one(module.stockholm[*].cluster_id) + # "zurich" = one(module.zurich[*].cluster_id) + # "abudhabi" = one(module.abudhabi[*].cluster_id) + # "dubai" = one(module.dubai[*].cluster_id) + # "jeddah" = one(module.jeddah[*].cluster_id) + # "jerusalem" = one(module.jerusalem[*].cluster_id) + # "melbourne" = one(module.melbourne[*].cluster_id) + # "sydney" = one(module.sydney[*].cluster_id) + "melbourne" = one(module.melbourne[*].cluster_id) + # "sydney" = one(module.sydney[*].cluster_id) + # "santiago" = one(module.santiago[*].cluster_id) + # "saupaulo" = one(module.saupaulo[*].cluster_id) + # "vinhedo" = one(module.vinhedo[*].cluster_id) + # "ashburn" = one(module.ashburn[*].cluster_id) + # "chicago" = one(module.chicago[*].cluster_id) + # "montreal" = one(module.montreal[*].cluster_id) + # "phoenix" = one(module.phoenix[*].cluster_id) + # "queretaro" = one(module.queretaro[*].cluster_id) + # "sanjose" = one(module.sanjose[*].cluster_id) + # "toronto" = one(module.toronto[*].cluster_id) } } ``` @@ -328,7 +330,7 @@ done - fingerprint - and the private key -``` +```bash,editable cd /home/opc/vz/clusters for cluster in admin phoenix; do kubectx $cluster diff --git a/main.tf b/main.tf index 0f064d1..6caa9a0 100644 --- a/main.tf +++ b/main.tf @@ -25,9 +25,9 @@ module "clusters" { clusters = var.managed_clusters - oke_control_plane = var.oke_control_plane - preferred_cni = var.preferred_cni - cloudinit_nodepool_common = var.cloudinit_nodepool_common + oke_control_plane = var.oke_control_plane + preferred_cni = var.preferred_cni + worker_cloud_init = var.worker_cloud_init nodepools = var.nodepools @@ -96,9 +96,9 @@ module "verrazzano" { verrazzano_load_balancer = var.verrazzano_load_balancer all_cluster_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = module.admin.cluster_id }, local.managed_clusters) managed_cluster_ids = local.managed_clusters - int_nsg_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = lookup(module.admin.nsg_ids, "int_lb") }, module.clusters.int_nsg_ids) - int_lb_subnet_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = lookup(module.admin.subnet_ids, "int_lb") }, module.clusters.int_lb_subnet_ids) - pub_nsg_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = lookup(module.admin.nsg_ids, "pub_lb") }, module.clusters.pub_nsg_ids) + int_nsg_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = module.admin.int_lb_nsg_id }, module.clusters.int_nsg_ids) + int_lb_subnet_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = module.admin.int_lb_subnet_id }, module.clusters.int_lb_subnet_ids) + pub_nsg_ids = merge({ lookup(var.admin_region, "admin_name", "admin") = module.admin.pub_lb_nsg_id }, module.clusters.pub_nsg_ids) # verrazzano components argocd = var.argocd @@ -137,4 +137,3 @@ module "verrazzano" { count = tobool(var.get_kubeconfigs) ? 1 : 0 } - diff --git a/modules/clusters/abudhabi.tf b/modules/clusters/abudhabi.tf new file mode 100644 index 0000000..8ade870 --- /dev/null +++ b/modules/clusters/abudhabi.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "abudhabi" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["abudhabi"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "abudhabi" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "abudhabi" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "abudhabi")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("abudhabi")), "vcn")] +# vcn_dns_label = "abudhabi" +# vcn_name = "abudhabi" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "abudhabi" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.abudhabi +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "abudhabi")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_abudhabi" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.abudhabi + +# count = tobool(lookup(var.clusters, "abudhabi", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "abudhabi_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "abudhabi-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.abudhabi[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "abudhabi" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "abudhabi", "false")) ? 1 : 0 +# providers = { +# oci = oci.abudhabi +# } +# } \ No newline at end of file diff --git a/modules/clusters/africa.tf b/modules/clusters/africa.tf deleted file mode 100644 index dac3a8d..0000000 --- a/modules/clusters/africa.tf +++ /dev/null @@ -1,88 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "johannesburg" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["johannesburg"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "johannesburg" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "johannesburg" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "johannesburg")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("johannesburg")), "vcn")] -# vcn_dns_label = "johannesburg" -# vcn_name = "johannesburg" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "johannesburg" -# cni_type = var.preferred_cni -# control_plane_type = var.oke_control_plane -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("johannesburg")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("johannesburg")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.johannesburg -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "johannesburg")) ? 1 : 0 - -# } diff --git a/modules/clusters/amsterdam.tf b/modules/clusters/amsterdam.tf new file mode 100644 index 0000000..7aa5d45 --- /dev/null +++ b/modules/clusters/amsterdam.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "amsterdam" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["amsterdam"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "amsterdam" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "amsterdam" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "amsterdam")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("amsterdam")), "vcn")] +# vcn_dns_label = "amsterdam" +# vcn_name = "amsterdam" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "amsterdam" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.amsterdam +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "amsterdam")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_amsterdam" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.amsterdam + +# count = tobool(lookup(var.clusters, "amsterdam", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "amsterdam_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "amsterdam-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.amsterdam[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "amsterdam" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "amsterdam", "false")) ? 1 : 0 +# providers = { +# oci = oci.amsterdam +# } +# } \ No newline at end of file diff --git a/modules/clusters/ashburn.tf b/modules/clusters/ashburn.tf new file mode 100644 index 0000000..d64601f --- /dev/null +++ b/modules/clusters/ashburn.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "ashburn" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["ashburn"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "ashburn" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "ashburn" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "ashburn")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("ashburn")), "vcn")] +# vcn_dns_label = "ashburn" +# vcn_name = "ashburn" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "ashburn" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.ashburn +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "ashburn")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_ashburn" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.ashburn + +# count = tobool(lookup(var.clusters, "ashburn", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "ashburn_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "ashburn-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.ashburn[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "ashburn" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "ashburn", "false")) ? 1 : 0 +# providers = { +# oci = oci.ashburn +# } +# } \ No newline at end of file diff --git a/modules/clusters/australia.tf b/modules/clusters/australia.tf deleted file mode 100644 index 45074e2..0000000 --- a/modules/clusters/australia.tf +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -module "melbourne" { - source = "oracle-terraform-modules/oke/oci" - version = "4.5.9" - - home_region = var.home_region - region = local.regions["melbourne"] - - tenancy_id = var.tenancy_id - - # general oci parameters - compartment_id = var.compartment_id - label_prefix = var.label_prefix - - # ssh keys - ssh_private_key_path = "~/.ssh/id_rsa" - ssh_public_key_path = "~/.ssh/id_rsa.pub" - - # networking - create_drg = true - drg_display_name = "melbourne" - - remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "melbourne" } : { "rpc-to-admin" : {} } - - nat_gateway_route_rules = concat([ - { - destination = lookup(var.admin_region, "vcn_cidr") - destination_type = "CIDR_BLOCK" - network_entity_id = "drg" - description = "To Admin" - }], var.connectivity_mode == "mesh" ? - [for c in keys(var.clusters) : - { - destination = lookup(lookup(var.cidrs, c), "vcn") - destination_type = "CIDR_BLOCK" - network_entity_id = "drg" - description = "Routing to allow connectivity to ${title(c)} cluster" - } if tobool(lookup(var.clusters, c) && c != "melbourne")] : [] - ) - - vcn_cidrs = [lookup(lookup(var.cidrs, lower("melbourne")), "vcn")] - vcn_dns_label = "melbourne" - vcn_name = "melbourne" - - # bastion host - create_bastion_host = false - - # operator host - create_operator = false - enable_operator_instance_principal = false - - - # oke cluster options - allow_worker_ssh_access = false - cluster_name = "melbourne" - cni_type = var.preferred_cni - control_plane_type = var.oke_control_plane - control_plane_allowed_cidrs = ["0.0.0.0/0"] - kubernetes_version = var.kubernetes_version - pods_cidr = lookup(lookup(var.cidrs, lower("melbourne")), "pods") - services_cidr = lookup(lookup(var.cidrs, lower("melbourne")), "services") - - - # node pools - kubeproxy_mode = "ipvs" - node_pools = local.managed_nodepools - cloudinit_nodepool_common = var.cloudinit_nodepool_common - - node_pool_image_type = "oke" - - # oke load balancers - load_balancers = "both" - preferred_load_balancer = "public" - internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] - internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] - public_lb_allowed_cidrs = ["0.0.0.0/0"] - public_lb_allowed_ports = [80, 443] - - providers = { - oci = oci.melbourne - oci.home = oci.home - } - - count = tobool(lookup(var.clusters, "melbourne")) ? 1 : 0 - -} - -resource "oci_objectstorage_bucket" "thanos_melbourne" { - compartment_id = var.compartment_id - name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" - namespace = lookup(var.thanos, "bucket_namespace") - - provider = oci.melbourne - - count = tobool(lookup(var.clusters, "melbourne", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 -} - -module "sydney" { - source = "oracle-terraform-modules/oke/oci" - version = "4.5.9" - - home_region = var.home_region - region = local.regions["sydney"] - - tenancy_id = var.tenancy_id - - # general oci parameters - compartment_id = var.compartment_id - label_prefix = var.label_prefix - - # ssh keys - ssh_private_key_path = "~/.ssh/id_rsa" - ssh_public_key_path = "~/.ssh/id_rsa.pub" - - # networking - create_drg = true - drg_display_name = "sydney" - - remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sydney" } : { "rpc-to-admin" : {} } - - nat_gateway_route_rules = concat([ - { - destination = lookup(var.admin_region, "vcn_cidr") - destination_type = "CIDR_BLOCK" - network_entity_id = "drg" - description = "To Admin" - }], var.connectivity_mode == "mesh" ? - [for c in keys(var.clusters) : - { - destination = lookup(lookup(var.cidrs, c), "vcn") - destination_type = "CIDR_BLOCK" - network_entity_id = "drg" - description = "Routing to allow connectivity to ${title(c)} cluster" - } if tobool(lookup(var.clusters, c) && c != "sydney")] : [] - ) - - vcn_cidrs = [lookup(lookup(var.cidrs, lower("sydney")), "vcn")] - vcn_dns_label = "sydney" - vcn_name = "sydney" - - # bastion host - create_bastion_host = false - upgrade_bastion = false - - # operator host - create_operator = false - upgrade_operator = false - enable_operator_instance_principal = false - - - # oke cluster options - allow_worker_ssh_access = false - cluster_name = "sydney" - cni_type = var.preferred_cni - control_plane_type = var.oke_control_plane - control_plane_allowed_cidrs = ["0.0.0.0/0"] - kubernetes_version = var.kubernetes_version - pods_cidr = lookup(lookup(var.cidrs, lower("sydney")), "pods") - services_cidr = lookup(lookup(var.cidrs, lower("sydney")), "services") - - - # node pools - kubeproxy_mode = "ipvs" - node_pools = local.managed_nodepools - cloudinit_nodepool_common = var.cloudinit_nodepool_common - - node_pool_image_type = "oke" - - # oke load balancers - load_balancers = "both" - preferred_load_balancer = "public" - internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] - internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] - public_lb_allowed_cidrs = ["0.0.0.0/0"] - public_lb_allowed_ports = [80, 443] - - providers = { - oci = oci.sydney - oci.home = oci.home - } - - count = tobool(lookup(var.clusters, "sydney", )) ? 1 : 0 - -} - -resource "oci_objectstorage_bucket" "thanos_sydney" { - compartment_id = var.compartment_id - name = "syd-${lookup(var.thanos, "bucket_name", "thanos")}" - namespace = lookup(var.thanos, "bucket_namespace") - - provider = oci.sydney - - count = tobool(lookup(var.clusters, "sydney", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 -} \ No newline at end of file diff --git a/modules/clusters/brazil.tf b/modules/clusters/brazil.tf deleted file mode 100644 index 42c7d41..0000000 --- a/modules/clusters/brazil.tf +++ /dev/null @@ -1,174 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "saupaulo" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["saupaulo"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "saupaulo" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "saupaulo" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "saupaulo")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("saupaulo")), "vcn")] -# vcn_dns_label = "saupaulo" -# vcn_name = "saupaulo" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "saupaulo" -# cni_type = var.preferred_cni -# control_plane_type = var.oke_control_plane -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("saupaulo")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("saupaulo")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.saupaulo -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "saupaulo")) ? 1 : 0 - -# } - -# module "vinhedo" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["vinhedo"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "vinhedo" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "vinhedo" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "vinhedo")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("vinhedo")), "vcn")] -# vcn_dns_label = "vinhedo" -# vcn_name = "vinhedo" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "vinhedo" -# cni_type = var.preferred_cni -# control_plane_type = var.oke_control_plane -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("vinhedo")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("vinhedo")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.vinhedo -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "vinhedo")) ? 1 : 0 - -# } \ No newline at end of file diff --git a/modules/clusters/canada.tf b/modules/clusters/canada.tf deleted file mode 100644 index cc5c4da..0000000 --- a/modules/clusters/canada.tf +++ /dev/null @@ -1,175 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "toronto" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["toronto"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "toronto" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "toronto" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "toronto")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("toronto")), "vcn")] -# vcn_dns_label = "toronto" -# vcn_name = "toronto" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "toronto" -# cni_type = var.preferred_cni -# control_plane_type = var.oke_control_plane -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("toronto")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("toronto")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.toronto -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "toronto")) ? 1 : 0 - -# } - -# module "montreal" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["montreal"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "montreal" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "montreal" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "montreal")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("montreal")), "vcn")] -# vcn_dns_label = "montreal" -# vcn_name = "montreal" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "montreal" -# cni_type = var.preferred_cni -# control_plane_type = var.oke_control_plane -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("montreal")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("montreal")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.montreal -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "montreal")) ? 1 : 0 - -# } - diff --git a/modules/clusters/chicago.tf b/modules/clusters/chicago.tf new file mode 100644 index 0000000..ddf9685 --- /dev/null +++ b/modules/clusters/chicago.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "chicago" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["chicago"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "chicago" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chicago" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "chicago")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("chicago")), "vcn")] +# vcn_dns_label = "chicago" +# vcn_name = "chicago" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "chicago" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.chicago +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "chicago")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_chicago" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.chicago + +# count = tobool(lookup(var.clusters, "chicago", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "chicago_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "chicago-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.chicago[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chicago" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "chicago", "false")) ? 1 : 0 +# providers = { +# oci = oci.chicago +# } +# } \ No newline at end of file diff --git a/modules/clusters/chuncheon.tf b/modules/clusters/chuncheon.tf new file mode 100644 index 0000000..709b8c3 --- /dev/null +++ b/modules/clusters/chuncheon.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "chuncheon" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["chuncheon"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "chuncheon" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chuncheon" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "chuncheon")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("chuncheon")), "vcn")] +# vcn_dns_label = "chuncheon" +# vcn_name = "chuncheon" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "chuncheon" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.chuncheon +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "chuncheon")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_chuncheon" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.chuncheon + +# count = tobool(lookup(var.clusters, "chuncheon", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "chuncheon_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "chuncheon-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.chuncheon[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chuncheon" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "chuncheon", "false")) ? 1 : 0 +# providers = { +# oci = oci.chuncheon +# } +# } \ No newline at end of file diff --git a/modules/clusters/dubai.tf b/modules/clusters/dubai.tf new file mode 100644 index 0000000..5cd6f21 --- /dev/null +++ b/modules/clusters/dubai.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "dubai" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["dubai"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "dubai" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "dubai" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "dubai")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("dubai")), "vcn")] +# vcn_dns_label = "dubai" +# vcn_name = "dubai" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "dubai" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.dubai +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "dubai")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_dubai" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.dubai + +# count = tobool(lookup(var.clusters, "dubai", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "dubai_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "dubai-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.dubai[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "dubai" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "dubai", "false")) ? 1 : 0 +# providers = { +# oci = oci.dubai +# } +# } \ No newline at end of file diff --git a/modules/clusters/europe.tf b/modules/clusters/europe.tf deleted file mode 100644 index 4dae1b1..0000000 --- a/modules/clusters/europe.tf +++ /dev/null @@ -1,521 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "frankfurt" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["frankfurt"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "frankfurt" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "frankfurt" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "frankfurt")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("frankfurt")), "vcn")] -# vcn_dns_label = "frankfurt" -# vcn_name = "frankfurt" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "frankfurt" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("frankfurt")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("frankfurt")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.frankfurt -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "frankfurt")) ? 1 : 0 - -# } - - -# module "amsterdam" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["amsterdam"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "amsterdam" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "amsterdam" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "amsterdam")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("amsterdam")), "vcn")] -# vcn_dns_label = "amsterdam" -# vcn_name = "amsterdam" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "amsterdam" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("amsterdam")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("amsterdam")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.amsterdam -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "amsterdam")) ? 1 : 0 - -# } - - -# module "madrid" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["madrid"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "madrid" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "madrid" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "madrid")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("madrid")), "vcn")] -# vcn_dns_label = "madrid" -# vcn_name = "madrid" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "madrid" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("madrid")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("madrid")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.madrid -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "madrid")) ? 1 : 0 - -# } - -# module "milan" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["milan"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "milan" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "milan" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "milan")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("milan")), "vcn")] -# vcn_dns_label = "milan" -# vcn_name = "milan" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "milan" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("milan")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("milan")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.milan -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "milan")) ? 1 : 0 - -# } - -# module "stockholm" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["stockholm"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "stockholm" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "stockholm" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "stockholm")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("stockholm")), "vcn")] -# vcn_dns_label = "stockholm" -# vcn_name = "stockholm" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "stockholm" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("stockholm")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("stockholm")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.stockholm -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "stockholm")) ? 1 : 0 - -# } - -# module "zurich" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["zurich"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "zurich" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "zurich" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "zurich")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("zurich")), "vcn")] -# vcn_dns_label = "zurich" -# vcn_name = "zurich" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "zurich" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("zurich")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("zurich")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.zurich -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "zurich")) ? 1 : 0 - -# } - diff --git a/modules/clusters/france.tf b/modules/clusters/france.tf deleted file mode 100644 index 4628f14..0000000 --- a/modules/clusters/france.tf +++ /dev/null @@ -1,175 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "paris" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["paris"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "paris" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "paris" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "paris")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("paris")), "vcn")] -# vcn_dns_label = "paris" -# vcn_name = "paris" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "paris" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("paris")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("paris")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.paris -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "paris")) ? 1 : 0 - -# } - -# module "marseille" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["marseille"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "marseille" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "marseille" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "marseille")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("marseille")), "vcn")] -# vcn_dns_label = "marseille" -# vcn_name = "marseille" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "marseille" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("marseille")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("marseille")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.marseille -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "marseille")) ? 1 : 0 - -# } - diff --git a/modules/clusters/frankfurt.tf b/modules/clusters/frankfurt.tf new file mode 100644 index 0000000..cc66db3 --- /dev/null +++ b/modules/clusters/frankfurt.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "frankfurt" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["frankfurt"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "frankfurt" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "frankfurt" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "frankfurt")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("frankfurt")), "vcn")] +# vcn_dns_label = "frankfurt" +# vcn_name = "frankfurt" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "frankfurt" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.frankfurt +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "frankfurt")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_frankfurt" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.frankfurt + +# count = tobool(lookup(var.clusters, "frankfurt", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "frankfurt_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "frankfurt-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.frankfurt[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "frankfurt" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "frankfurt", "false")) ? 1 : 0 +# providers = { +# oci = oci.frankfurt +# } +# } \ No newline at end of file diff --git a/modules/clusters/hyderabad.tf b/modules/clusters/hyderabad.tf new file mode 100644 index 0000000..ecc2c28 --- /dev/null +++ b/modules/clusters/hyderabad.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "hyderabad" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["hyderabad"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "hyderabad" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "hyderabad" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "hyderabad")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("hyderabad")), "vcn")] +# vcn_dns_label = "hyderabad" +# vcn_name = "hyderabad" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "hyderabad" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.hyderabad +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "hyderabad")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_hyderabad" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.hyderabad + +# count = tobool(lookup(var.clusters, "hyderabad", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "hyderabad_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "hyderabad-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.hyderabad[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "hyderabad" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "hyderabad", "false")) ? 1 : 0 +# providers = { +# oci = oci.hyderabad +# } +# } \ No newline at end of file diff --git a/modules/clusters/india.tf b/modules/clusters/india.tf deleted file mode 100644 index 3a8afed..0000000 --- a/modules/clusters/india.tf +++ /dev/null @@ -1,175 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "hyderabad" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["hyderabad"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "hyderabad" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "hyderabad" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "hyderabad")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("hyderabad")), "vcn")] -# vcn_dns_label = "hyderabad" -# vcn_name = "hyderabad" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "hyderabad" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("hyderabad")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("hyderabad")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.hyderabad -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "hyderabad")) ? 1 : 0 - -# } - -# module "mumbai" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["mumbai"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "mumbai" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "mumbai" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "mumbai")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("mumbai")), "vcn")] -# vcn_dns_label = "mumbai" -# vcn_name = "mumbai" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "mumbai" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("mumbai")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("mumbai")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.mumbai -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "mumbai")) ? 1 : 0 - -# } - diff --git a/modules/clusters/japan.tf b/modules/clusters/japan.tf deleted file mode 100644 index 1003859..0000000 --- a/modules/clusters/japan.tf +++ /dev/null @@ -1,174 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "osaka" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["osaka"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "osaka" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "osaka" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "osaka")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("osaka")), "vcn")] -# vcn_dns_label = "osaka" -# vcn_name = "osaka" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "osaka" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("osaka")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("osaka")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.osaka -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "osaka")) ? 1 : 0 - -# } - -# module "tokyo" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["tokyo"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "tokyo" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "tokyo" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "tokyo")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("tokyo")), "vcn")] -# vcn_dns_label = "tokyo" -# vcn_name = "tokyo" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "tokyo" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("tokyo")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("tokyo")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.tokyo -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "tokyo")) ? 1 : 0 - -# } diff --git a/modules/clusters/jeddah.tf b/modules/clusters/jeddah.tf new file mode 100644 index 0000000..370ff8c --- /dev/null +++ b/modules/clusters/jeddah.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "jeddah" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["jeddah"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "jeddah" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jeddah" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "jeddah")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("jeddah")), "vcn")] +# vcn_dns_label = "jeddah" +# vcn_name = "jeddah" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "jeddah" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.jeddah +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "jeddah")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_jeddah" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.jeddah + +# count = tobool(lookup(var.clusters, "jeddah", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "jeddah_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "jeddah-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.jeddah[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jeddah" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "jeddah", "false")) ? 1 : 0 +# providers = { +# oci = oci.jeddah +# } +# } \ No newline at end of file diff --git a/modules/clusters/jerusalem.tf b/modules/clusters/jerusalem.tf new file mode 100644 index 0000000..b131b47 --- /dev/null +++ b/modules/clusters/jerusalem.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "jerusalem" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["jerusalem"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "jerusalem" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jerusalem" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "jerusalem")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("jerusalem")), "vcn")] +# vcn_dns_label = "jerusalem" +# vcn_name = "jerusalem" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "jerusalem" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.jerusalem +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "jerusalem")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_jerusalem" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.jerusalem + +# count = tobool(lookup(var.clusters, "jerusalem", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "jerusalem_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "jerusalem-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.jerusalem[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jerusalem" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "jerusalem", "false")) ? 1 : 0 +# providers = { +# oci = oci.jerusalem +# } +# } \ No newline at end of file diff --git a/modules/clusters/johannesburg.tf b/modules/clusters/johannesburg.tf new file mode 100644 index 0000000..68ad8ff --- /dev/null +++ b/modules/clusters/johannesburg.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "johannesburg" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["johannesburg"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "johannesburg" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "johannesburg" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "johannesburg")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("johannesburg")), "vcn")] +# vcn_dns_label = "johannesburg" +# vcn_name = "johannesburg" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "johannesburg" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.johannesburg +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "johannesburg")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_johannesburg" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.johannesburg + +# count = tobool(lookup(var.clusters, "johannesburg", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "johannesburg_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "johannesburg-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.johannesburg[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "johannesburg" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "johannesburg", "false")) ? 1 : 0 +# providers = { +# oci = oci.johannesburg +# } +# } \ No newline at end of file diff --git a/modules/clusters/korea.tf b/modules/clusters/korea.tf deleted file mode 100644 index b7710f6..0000000 --- a/modules/clusters/korea.tf +++ /dev/null @@ -1,174 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "chuncheon" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["chuncheon"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "chuncheon" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chuncheon" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "chuncheon")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("chuncheon")), "vcn")] -# vcn_dns_label = "chuncheon" -# vcn_name = "chuncheon" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "chuncheon" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("chuncheon")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("chuncheon")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.chuncheon -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "chuncheon")) ? 1 : 0 - -# } - -# module "seoul" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["seoul"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "seoul" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "seoul" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "seoul")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("seoul")), "vcn")] -# vcn_dns_label = "seoul" -# vcn_name = "seoul" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "seoul" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("seoul")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("seoul")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.seoul -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "seoul")) ? 1 : 0 - -# } diff --git a/modules/clusters/latam.tf b/modules/clusters/latam.tf deleted file mode 100644 index c6dc917..0000000 --- a/modules/clusters/latam.tf +++ /dev/null @@ -1,175 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "queretaro" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["queretaro"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "queretaro" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "queretaro" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "queretaro")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("queretaro")), "vcn")] -# vcn_dns_label = "queretaro" -# vcn_name = "queretaro" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "queretaro" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("queretaro")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("queretaro")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.queretaro -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "queretaro")) ? 1 : 0 - -# } - -# module "santiago" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["santiago"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "santiago" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "santiago" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "santiago")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("santiago")), "vcn")] -# vcn_dns_label = "santiago" -# vcn_name = "santiago" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "santiago" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("santiago")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("santiago")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.santiago -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "santiago")) ? 1 : 0 - -# } - diff --git a/modules/clusters/locals.tf b/modules/clusters/locals.tf new file mode 100644 index 0000000..2e56ba0 --- /dev/null +++ b/modules/clusters/locals.tf @@ -0,0 +1,27 @@ +# Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +locals { + all_ports = -1 + + # keep as reference + # apiserver_port = 6443 + + # Protocols + # See https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + all_protocols = "all" + icmp_protocol = 1 + tcp_protocol = 6 + udp_protocol = 17 + + anywhere = "0.0.0.0/0" + rule_type_nsg = "NETWORK_SECURITY_GROUP" + rule_type_cidr = "CIDR_BLOCK" + rule_type_service = "SERVICE_CIDR_BLOCK" + + service_mesh_ports = [80, 443, 15012, 15017, 15021, 15443] + + # Todo verify if we need 15021 open for public + public_lb_allowed_ports = [80, 443, 15021] + +} \ No newline at end of file diff --git a/modules/clusters/london.tf b/modules/clusters/london.tf new file mode 100644 index 0000000..6c9d090 --- /dev/null +++ b/modules/clusters/london.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "london" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["london"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "london" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "london" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "london")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("london")), "vcn")] +# vcn_dns_label = "london" +# vcn_name = "london" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "london" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.london +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "london")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_london" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.london + +# count = tobool(lookup(var.clusters, "london", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "london_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "london-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.london[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "london" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "london", "false")) ? 1 : 0 +# providers = { +# oci = oci.london +# } +# } \ No newline at end of file diff --git a/modules/clusters/madrid.tf b/modules/clusters/madrid.tf new file mode 100644 index 0000000..218d1a8 --- /dev/null +++ b/modules/clusters/madrid.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "madrid" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["madrid"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "madrid" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "madrid" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "madrid")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("madrid")), "vcn")] +# vcn_dns_label = "madrid" +# vcn_name = "madrid" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "madrid" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.madrid +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "madrid")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_madrid" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.madrid + +# count = tobool(lookup(var.clusters, "madrid", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "madrid_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "madrid-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.madrid[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "madrid" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "madrid", "false")) ? 1 : 0 +# providers = { +# oci = oci.madrid +# } +# } \ No newline at end of file diff --git a/modules/clusters/marseille.tf b/modules/clusters/marseille.tf new file mode 100644 index 0000000..d11698b --- /dev/null +++ b/modules/clusters/marseille.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "marseille" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["marseille"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "marseille" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "marseille" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "marseille")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("marseille")), "vcn")] +# vcn_dns_label = "marseille" +# vcn_name = "marseille" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "marseille" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.marseille +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "marseille")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_marseille" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.marseille + +# count = tobool(lookup(var.clusters, "marseille", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "marseille_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "marseille-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.marseille[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "marseille" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "marseille", "false")) ? 1 : 0 +# providers = { +# oci = oci.marseille +# } +# } \ No newline at end of file diff --git a/modules/clusters/melbourne.tf b/modules/clusters/melbourne.tf new file mode 100644 index 0000000..c2bf14f --- /dev/null +++ b/modules/clusters/melbourne.tf @@ -0,0 +1,161 @@ +# Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +module "melbourne" { + # source = "oracle-terraform-modules/oke/oci" + # version = "4.5.9" + + source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + + home_region = var.home_region + region = local.regions["melbourne"] + + tenancy_id = var.tenancy_id + + # general oci parameters + compartment_id = var.compartment_id + + # ssh keys + ssh_private_key_path = "~/.ssh/id_rsa" + ssh_public_key_path = "~/.ssh/id_rsa.pub" + + # networking + # create_drg = true + drg_display_name = "melbourne" + + # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "melbourne" } : { "rpc-to-admin" : {} } + + nat_gateway_route_rules = concat([ + { + destination = lookup(var.admin_region, "vcn_cidr") + destination_type = "CIDR_BLOCK" + network_entity_id = "drg" + description = "To Admin" + }], var.connectivity_mode == "mesh" ? + [for c in keys(var.clusters) : + { + destination = lookup(lookup(var.cidrs, c), "vcn") + destination_type = "CIDR_BLOCK" + network_entity_id = "drg" + description = "Routing to allow connectivity to ${title(c)} cluster" + } if tobool(lookup(var.clusters, c) && c != "melbourne")] : [] + ) + + vcn_cidrs = [lookup(lookup(var.cidrs, lower("melbourne")), "vcn")] + vcn_dns_label = "melbourne" + vcn_name = "melbourne" + + #subnets + subnets = { + # bastion = { newbits = 13, dns_label = "bastion" } + # operator = { newbits = 13, dns_label = "operator" } + cp = { newbits = 13, dns_label = "cp" } + int_lb = { newbits = 11, dns_label = "ilb" } + pub_lb = { newbits = 11, dns_label = "plb" } + workers = { newbits = 2, dns_label = "workers" } + pods = { newbits = 2, dns_label = "pods" } + } + + # bastion host + create_bastion = false + + # operator host + create_operator = false + create_iam_operator_policy = "never" + + + # oke cluster options + cluster_name = "melbourne" + cni_type = var.preferred_cni + control_plane_is_public = var.oke_control_plane == "public" + control_plane_allowed_cidrs = [local.anywhere] + kubernetes_version = var.kubernetes_version + pods_cidr = lookup(var.admin_region, "pods") + services_cidr = lookup(var.admin_region, "services") + + allow_worker_ssh_access = false + + # node pools + kubeproxy_mode = "ipvs" + worker_pool_mode = "node-pool" + + worker_pools = var.nodepools + + worker_cloud_init = var.worker_cloud_init + + worker_image_type = "oke" + + + # oke load balancers + load_balancers = "both" + preferred_load_balancer = "public" + + allow_rules_internal_lb = { + for p in local.service_mesh_ports : + + format("Allow ingress to port %v", p) => { + protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, + } + } + # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] + # TODO: allow configuration of source cidr + allow_rules_public_lb = { + + for p in local.public_lb_allowed_ports : + + format("Allow ingress to port %v", p) => { + protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, + } + } + +# user_id = var.user_id + + providers = { + oci = oci.melbourne + oci.home = oci.home + } + + count = tobool(lookup(var.clusters, "melbourne")) ? 1 : 0 + +} + +resource "oci_objectstorage_bucket" "thanos_melbourne" { + compartment_id = var.compartment_id + name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" + namespace = lookup(var.thanos, "bucket_namespace") + + provider = oci.melbourne + + count = tobool(lookup(var.clusters, "melbourne", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +} + +module "melbourne_drg" { + source = "oracle-terraform-modules/drg/oci" + version = "1.0.5" + + # general oci parameters + compartment_id = var.compartment_id + label_prefix = var.label_prefix + + # drg parameters + drg_display_name = "melbourne-drg" + + drg_vcn_attachments = { + drg = { + vcn_id = module.melbourne[0].vcn_id + vcn_transit_routing_rt_id = null + drg_route_table_id = null + } + } + + # var.drg_id can either contain an existing DRG ID or be null. + drg_id = null + + remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "melbourne" } : { "rpc-to-admin" : {} } + + count = tobool(lookup(var.clusters, "melbourne", "false")) ? 1 : 0 + providers = { + oci = oci.melbourne + } +} + diff --git a/modules/clusters/middleeast.tf b/modules/clusters/middleeast.tf deleted file mode 100644 index a9418b8..0000000 --- a/modules/clusters/middleeast.tf +++ /dev/null @@ -1,174 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "jeddah" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["jeddah"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "jeddah" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jeddah" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "jeddah")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("jeddah")), "vcn")] -# vcn_dns_label = "jeddah" -# vcn_name = "jeddah" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "jeddah" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("jeddah")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("jeddah")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.jeddah -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "jeddah")) ? 1 : 0 - -# } - -# module "jerusalem" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["jerusalem"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "jerusalem" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "jerusalem" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "jerusalem")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("jerusalem")), "vcn")] -# vcn_dns_label = "jerusalem" -# vcn_name = "jerusalem" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "jerusalem" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("jerusalem")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("jerusalem")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.jerusalem -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "jerusalem")) ? 1 : 0 - -# } diff --git a/modules/clusters/milan.tf b/modules/clusters/milan.tf new file mode 100644 index 0000000..98da710 --- /dev/null +++ b/modules/clusters/milan.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "milan" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["milan"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "milan" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "milan" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "milan")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("milan")), "vcn")] +# vcn_dns_label = "milan" +# vcn_name = "milan" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "milan" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.milan +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "milan")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_milan" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.milan + +# count = tobool(lookup(var.clusters, "milan", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "milan_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "milan-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.milan[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "milan" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "milan", "false")) ? 1 : 0 +# providers = { +# oci = oci.milan +# } +# } \ No newline at end of file diff --git a/modules/clusters/montreal.tf b/modules/clusters/montreal.tf new file mode 100644 index 0000000..74b3552 --- /dev/null +++ b/modules/clusters/montreal.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "montreal" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["montreal"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "montreal" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "montreal" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "montreal")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("montreal")), "vcn")] +# vcn_dns_label = "montreal" +# vcn_name = "montreal" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "montreal" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.montreal +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "montreal")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_montreal" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.montreal + +# count = tobool(lookup(var.clusters, "montreal", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "montreal_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "montreal-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.montreal[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "montreal" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "montreal", "false")) ? 1 : 0 +# providers = { +# oci = oci.montreal +# } +# } \ No newline at end of file diff --git a/modules/clusters/mumbai.tf b/modules/clusters/mumbai.tf new file mode 100644 index 0000000..881d789 --- /dev/null +++ b/modules/clusters/mumbai.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "mumbai" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["mumbai"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "mumbai" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "mumbai" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "mumbai")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("mumbai")), "vcn")] +# vcn_dns_label = "mumbai" +# vcn_name = "mumbai" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "mumbai" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.mumbai +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "mumbai")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_mumbai" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.mumbai + +# count = tobool(lookup(var.clusters, "mumbai", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "mumbai_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "mumbai-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.mumbai[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "mumbai" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "mumbai", "false")) ? 1 : 0 +# providers = { +# oci = oci.mumbai +# } +# } \ No newline at end of file diff --git a/modules/clusters/newport.tf b/modules/clusters/newport.tf new file mode 100644 index 0000000..a411417 --- /dev/null +++ b/modules/clusters/newport.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "newport" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["newport"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "newport" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "newport" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "newport")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("newport")), "vcn")] +# vcn_dns_label = "newport" +# vcn_name = "newport" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "newport" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.newport +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "newport")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_newport" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.newport + +# count = tobool(lookup(var.clusters, "newport", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "newport_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "newport-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.newport[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "newport" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "newport", "false")) ? 1 : 0 +# providers = { +# oci = oci.newport +# } +# } \ No newline at end of file diff --git a/modules/clusters/osaka.tf b/modules/clusters/osaka.tf new file mode 100644 index 0000000..83d6393 --- /dev/null +++ b/modules/clusters/osaka.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "osaka" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["osaka"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "osaka" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "osaka" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "osaka")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("osaka")), "vcn")] +# vcn_dns_label = "osaka" +# vcn_name = "osaka" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "osaka" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.osaka +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "osaka")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_osaka" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.osaka + +# count = tobool(lookup(var.clusters, "osaka", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "osaka_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "osaka-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.osaka[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "osaka" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "osaka", "false")) ? 1 : 0 +# providers = { +# oci = oci.osaka +# } +# } \ No newline at end of file diff --git a/modules/clusters/outputs.tf b/modules/clusters/outputs.tf index 3e1bfc2..08be7cd 100644 --- a/modules/clusters/outputs.tf +++ b/modules/clusters/outputs.tf @@ -28,7 +28,7 @@ output "cluster_ids" { # "melbourne" = one(module.melbourne[*].cluster_id) # "sydney" = one(module.sydney[*].cluster_id) "melbourne" = one(module.melbourne[*].cluster_id) - "sydney" = one(module.sydney[*].cluster_id) + # "sydney" = one(module.sydney[*].cluster_id) # "santiago" = one(module.santiago[*].cluster_id) # "saupaulo" = one(module.saupaulo[*].cluster_id) # "vinhedo" = one(module.vinhedo[*].cluster_id) @@ -66,8 +66,8 @@ output "int_nsg_ids" { # "dubai" = one(module.dubai[*].int_lb_nsg) # "jeddah" = one(module.jeddah[*].int_lb_nsg) # "jerusalem" = one(module.jerusalem[*].int_lb_nsg) - "melbourne" = one(module.melbourne[*].int_lb_nsg) - "sydney" = one(module.sydney[*].int_lb_nsg) + "melbourne" = one(module.melbourne[*].int_lb_nsg_id) + # "sydney" = one(module.sydney[*].int_lb_nsg) # "santiago" = one(module.santiago[*].int_lb_nsg) # "saupaulo" = one(module.saupaulo[*].int_lb_nsg) # "vinhedo" = one(module.vinhedo[*].int_lb_nsg) @@ -105,8 +105,8 @@ output "pub_nsg_ids" { # "dubai" = one(module.dubai[*].pub_lb_nsg) # "jeddah" = one(module.jeddah[*].pub_lb_nsg) # "jerusalem" = one(module.jerusalem[*].pub_lb_nsg) - "melbourne" = one(module.melbourne[*].pub_lb_nsg) - "sydney" = one(module.sydney[*].pub_lb_nsg) + "melbourne" = one(module.melbourne[*].pub_lb_nsg_id) + # "sydney" = one(module.sydney[*].pub_lb_nsg) # "santiago" = one(module.santiago[*].pub_lb_nsg) # "saupaulo" = one(module.saupaulo[*].pub_lb_nsg) # "vinhedo" = one(module.vinhedo[*].pub_lb_nsg) @@ -122,39 +122,39 @@ output "pub_nsg_ids" { output "int_lb_subnet_ids" { value = { - # "johannesburg" = coalesce(lookup(module.johannesburg[0].subnet_ids,"int_lb")) - # "chuncheon" = coalesce(lookup(module.chuncheon[0].subnet_ids,"int_lb")) - # "hyderabad" = coalesce(lookup(module.hyderabad[0].subnet_ids,"int_lb")) - # "mumbai" = coalesce(lookup(module.mumbai[0].subnet_ids,"int_lb")) - # "osaka" = coalesce(lookup(module.osaka[0].subnet_ids,"int_lb")) - # "seoul" = coalesce(lookup(module.seoul[0].subnet_ids,"int_lb")) - # "singapore" = coalesce(lookup(module.singapore[0].subnet_ids,"int_lb")) - # "tokyo" = coalesce(lookup(module.tokyo[0].subnet_ids,"int_lb")) - # "amsterdam" = coalesce(lookup(module.amsterdam[0].subnet_ids,"int_lb")) - # "frankfurt" = coalesce(lookup(module.frankfurt[0].subnet_ids,"int_lb")) - # "london" = coalesce(lookup(module.london[0].subnet_ids,"int_lb")) - # "madrid" = coalesce(lookup(module.madrid[0].subnet_ids,"int_lb")) - # "marseille" = coalesce(lookup(module.marseille[0].subnet_ids,"int_lb")) - # "milan" = coalesce(lookup(module.milan[0].subnet_ids,"int_lb")) - # "newport" = coalesce(lookup(module.newport[0].subnet_ids,"int_lb")) - # "paris" = coalesce(lookup(module.paris[0].subnet_ids,"int_lb")) - # "stockholm" = coalesce(lookup(module.stockholm[0].subnet_ids,"int_lb")) - # "zurich" = coalesce(lookup(module.zurich[0].subnet_ids,"int_lb")) - # "abudhabi" = coalesce(lookup(module.abudhabi[0].subnet_ids,"int_lb")) - # "dubai" = coalesce(lookup(module.dubai[0].subnet_ids,"int_lb")) - # "jeddah" = coalesce(lookup(module.jeddah[0].subnet_ids,"int_lb")) - # "jerusalem" = coalesce(lookup(module.jerusalem[0].subnet_ids,"int_lb")) - "melbourne" = coalesce(lookup(module.melbourne[0].subnet_ids, "int_lb")) - # "sydney" = coalesce(lookup(module.sydney[0].subnet_ids,"int_lb")) - # "santiago" = coalesce(lookup(module.santiago[0].subnet_ids,"int_lb")) - # "saupaulo" = coalesce(lookup(module.saupaulo[0].subnet_ids,"int_lb")) - # "vinhedo" = coalesce(lookup(module.vinhedo[0].subnet_ids,"int_lb")) - # "ashburn" = coalesce(lookup(module.ashburn[0].subnet_ids,"int_lb")) - # "chicago" = coalesce(lookup(module.chicago[0].subnet_ids,"int_lb")) - # "montreal" = coalesce(lookup(module.montreal[0].subnet_ids,"int_lb")) - # "phoenix" = coalesce(lookup(module.phoenix[0].subnet_ids,"int_lb")) - # "queretaro" = coalesce(lookup(module.queretaro[0].subnet_ids,"int_lb")) - # "sanjose" = coalesce(lookup(module.sanjose[0].subnet_ids,"int_lb")) - # "toronto" = coalesce(lookup(module.toronto[0].subnet_ids,"int_lb")) + # "johannesburg" = one(module.johannesburg[0].int_lb_subnet_id) + # "chuncheon" = one(module.chuncheon[0].int_lb_subnet_id) + # "hyderabad" = one(module.hyderabad[0].int_lb_subnet_id) + # "mumbai" = one(module.mumbai[0].int_lb_subnet_id) + # "osaka" = one(module.osaka[0].int_lb_subnet_id) + # "seoul" = one(module.seoul[0].int_lb_subnet_id) + # "singapore" = one(module.singapore[0].int_lb_subnet_id) + # "tokyo" = one(module.tokyo[0].int_lb_subnet_id) + # "amsterdam" = one(module.amsterdam[0].int_lb_subnet_id) + # "frankfurt" = one(module.frankfurt[0].int_lb_subnet_id) + # "london" = one(module.london[0].int_lb_subnet_id) + # "madrid" = one(module.madrid[0].int_lb_subnet_id) + # "marseille" = one(module.marseille[0].int_lb_subnet_id) + # "milan" = one(module.milan[0].int_lb_subnet_id) + # "newport" = one(module.newport[0].int_lb_subnet_id) + # "paris" = one(module.paris[0].int_lb_subnet_id) + # "stockholm" = one(module.stockholm[0].int_lb_subnet_id) + # "zurich" = one(module.zurich[0].int_lb_subnet_id) + # "abudhabi" = one(module.abudhabi[0].int_lb_subnet_id) + # "dubai" = one(module.dubai[0].int_lb_subnet_id) + # "jeddah" = one(module.jeddah[0].int_lb_subnet_id) + # "jerusalem" = one(module.jerusalem[0].int_lb_subnet_id) + "melbourne" = one(module.melbourne[*].int_lb_subnet_id) + # "sydney" = one(module.sydney[0].int_lb_subnet_id) + # "santiago" = one(module.santiago[0].int_lb_subnet_id) + # "saupaulo" = one(module.saupaulo[0].int_lb_subnet_id) + # "vinhedo" = one(module.vinhedo[0].int_lb_subnet_id) + # "ashburn" = one(module.ashburn[0].int_lb_subnet_id) + # "chicago" = one(module.chicago[0].int_lb_subnet_id) + # "montreal" = one(module.montreal[0].int_lb_subnet_id) + # "phoenix" = one(module.phoenix[0].int_lb_subnet_id) + # "queretaro" = one(module.queretaro[0].int_lb_subnet_id) + # "sanjose" = one(module.sanjose[0].int_lb_subnet_id) + # "toronto" = one(module.toronto[0].int_lb_subnet_id) } } diff --git a/modules/clusters/paris.tf b/modules/clusters/paris.tf new file mode 100644 index 0000000..1e38516 --- /dev/null +++ b/modules/clusters/paris.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "paris" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["paris"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "paris" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "paris" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "paris")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("paris")), "vcn")] +# vcn_dns_label = "paris" +# vcn_name = "paris" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "paris" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.paris +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "paris")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_paris" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.paris + +# count = tobool(lookup(var.clusters, "paris", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "paris_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "paris-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.paris[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "paris" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "paris", "false")) ? 1 : 0 +# providers = { +# oci = oci.paris +# } +# } \ No newline at end of file diff --git a/modules/clusters/phoenix.tf b/modules/clusters/phoenix.tf new file mode 100644 index 0000000..5ab1669 --- /dev/null +++ b/modules/clusters/phoenix.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "phoenix" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["phoenix"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "phoenix" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "phoenix" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "phoenix")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("phoenix")), "vcn")] +# vcn_dns_label = "phoenix" +# vcn_name = "phoenix" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "phoenix" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.phoenix +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "phoenix")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_phoenix" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.phoenix + +# count = tobool(lookup(var.clusters, "phoenix", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "phoenix_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "phoenix-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.phoenix[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "phoenix" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "phoenix", "false")) ? 1 : 0 +# providers = { +# oci = oci.phoenix +# } +# } \ No newline at end of file diff --git a/modules/clusters/queretaro.tf b/modules/clusters/queretaro.tf new file mode 100644 index 0000000..de6378b --- /dev/null +++ b/modules/clusters/queretaro.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "queretaro" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["queretaro"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "queretaro" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "queretaro" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "queretaro")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("queretaro")), "vcn")] +# vcn_dns_label = "queretaro" +# vcn_name = "queretaro" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "queretaro" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.queretaro +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "queretaro")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_queretaro" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.queretaro + +# count = tobool(lookup(var.clusters, "queretaro", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "queretaro_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "queretaro-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.queretaro[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "queretaro" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "queretaro", "false")) ? 1 : 0 +# providers = { +# oci = oci.queretaro +# } +# } \ No newline at end of file diff --git a/modules/clusters/sanjose.tf b/modules/clusters/sanjose.tf new file mode 100644 index 0000000..a0cd8f3 --- /dev/null +++ b/modules/clusters/sanjose.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "sanjose" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["sanjose"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "sanjose" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sanjose" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "sanjose")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("sanjose")), "vcn")] +# vcn_dns_label = "sanjose" +# vcn_name = "sanjose" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "sanjose" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.sanjose +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "sanjose")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_sanjose" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.sanjose + +# count = tobool(lookup(var.clusters, "sanjose", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "sanjose_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "sanjose-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.sanjose[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sanjose" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "sanjose", "false")) ? 1 : 0 +# providers = { +# oci = oci.sanjose +# } +# } \ No newline at end of file diff --git a/modules/clusters/santiago.tf b/modules/clusters/santiago.tf new file mode 100644 index 0000000..5246c65 --- /dev/null +++ b/modules/clusters/santiago.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "santiago" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["santiago"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "santiago" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "santiago" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "santiago")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("santiago")), "vcn")] +# vcn_dns_label = "santiago" +# vcn_name = "santiago" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "santiago" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.santiago +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "santiago")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_santiago" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.santiago + +# count = tobool(lookup(var.clusters, "santiago", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "santiago_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "santiago-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.santiago[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "santiago" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "santiago", "false")) ? 1 : 0 +# providers = { +# oci = oci.santiago +# } +# } \ No newline at end of file diff --git a/modules/clusters/saupaulo.tf b/modules/clusters/saupaulo.tf new file mode 100644 index 0000000..497168b --- /dev/null +++ b/modules/clusters/saupaulo.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "saupaulo" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["saupaulo"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "saupaulo" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "saupaulo" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "saupaulo")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("saupaulo")), "vcn")] +# vcn_dns_label = "saupaulo" +# vcn_name = "saupaulo" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "saupaulo" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.saupaulo +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "saupaulo")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_saupaulo" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.saupaulo + +# count = tobool(lookup(var.clusters, "saupaulo", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "saupaulo_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "saupaulo-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.saupaulo[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "saupaulo" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "saupaulo", "false")) ? 1 : 0 +# providers = { +# oci = oci.saupaulo +# } +# } \ No newline at end of file diff --git a/modules/clusters/seoul.tf b/modules/clusters/seoul.tf new file mode 100644 index 0000000..69a63a5 --- /dev/null +++ b/modules/clusters/seoul.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "seoul" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["seoul"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "seoul" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "seoul" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "seoul")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("seoul")), "vcn")] +# vcn_dns_label = "seoul" +# vcn_name = "seoul" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "seoul" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.seoul +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "seoul")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_seoul" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.seoul + +# count = tobool(lookup(var.clusters, "seoul", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "seoul_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "seoul-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.seoul[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "seoul" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "seoul", "false")) ? 1 : 0 +# providers = { +# oci = oci.seoul +# } +# } \ No newline at end of file diff --git a/modules/clusters/singapore.tf b/modules/clusters/singapore.tf index 131c24f..df297a2 100644 --- a/modules/clusters/singapore.tf +++ b/modules/clusters/singapore.tf @@ -2,8 +2,10 @@ # # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl # module "singapore" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" # home_region = var.home_region # region = local.regions["singapore"] @@ -12,17 +14,16 @@ # # general oci parameters # compartment_id = var.compartment_id -# label_prefix = var.label_prefix # # ssh keys # ssh_private_key_path = "~/.ssh/id_rsa" # ssh_public_key_path = "~/.ssh/id_rsa.pub" # # networking -# create_drg = true +# # create_drg = true # drg_display_name = "singapore" -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "singapore" } : { "rpc-to-admin" : {} } +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "singapore" } : { "rpc-to-admin" : {} } # nat_gateway_route_rules = concat([ # { @@ -44,39 +45,70 @@ # vcn_dns_label = "singapore" # vcn_name = "singapore" +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + # # bastion host -# create_bastion_host = false +# create_bastion = false # # operator host # create_operator = false -# enable_operator_instance_principal = false +# create_iam_operator_policy = "never" # # oke cluster options -# allow_worker_ssh_access = false # cluster_name = "singapore" -# control_plane_type = var.oke_control_plane # cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] # kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("singapore")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("singapore")), "services") +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") +# allow_worker_ssh_access = false # # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" -# node_pool_image_type = "oke" # # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id # providers = { # oci = oci.singapore @@ -86,3 +118,43 @@ # count = tobool(lookup(var.clusters, "singapore")) ? 1 : 0 # } + +# resource "oci_objectstorage_bucket" "thanos_singapore" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.singapore + +# count = tobool(lookup(var.clusters, "singapore", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "singapore_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "singapore-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.singapore[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "singapore" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "singapore", "false")) ? 1 : 0 +# providers = { +# oci = oci.singapore +# } +# } \ No newline at end of file diff --git a/modules/clusters/stockholm.tf b/modules/clusters/stockholm.tf new file mode 100644 index 0000000..8a21a41 --- /dev/null +++ b/modules/clusters/stockholm.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "stockholm" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["stockholm"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "stockholm" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "stockholm" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "stockholm")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("stockholm")), "vcn")] +# vcn_dns_label = "stockholm" +# vcn_name = "stockholm" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "stockholm" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.stockholm +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "stockholm")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_stockholm" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.stockholm + +# count = tobool(lookup(var.clusters, "stockholm", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "stockholm_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "stockholm-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.stockholm[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "stockholm" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "stockholm", "false")) ? 1 : 0 +# providers = { +# oci = oci.stockholm +# } +# } \ No newline at end of file diff --git a/modules/clusters/sydney.tf b/modules/clusters/sydney.tf new file mode 100644 index 0000000..92c761f --- /dev/null +++ b/modules/clusters/sydney.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "sydney" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["sydney"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "sydney" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sydney" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "sydney")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("sydney")), "vcn")] +# vcn_dns_label = "sydney" +# vcn_name = "sydney" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "sydney" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.sydney +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "sydney")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_sydney" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.sydney + +# count = tobool(lookup(var.clusters, "sydney", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "sydney_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "sydney-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.sydney[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sydney" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "sydney", "false")) ? 1 : 0 +# providers = { +# oci = oci.sydney +# } +# } \ No newline at end of file diff --git a/modules/clusters/tokyo.tf b/modules/clusters/tokyo.tf new file mode 100644 index 0000000..27f514b --- /dev/null +++ b/modules/clusters/tokyo.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "tokyo" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["tokyo"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "tokyo" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "tokyo" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "tokyo")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("tokyo")), "vcn")] +# vcn_dns_label = "tokyo" +# vcn_name = "tokyo" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "tokyo" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.tokyo +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "tokyo")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_tokyo" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.tokyo + +# count = tobool(lookup(var.clusters, "tokyo", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "tokyo_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "tokyo-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.tokyo[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "tokyo" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "tokyo", "false")) ? 1 : 0 +# providers = { +# oci = oci.tokyo +# } +# } \ No newline at end of file diff --git a/modules/clusters/toronto.tf b/modules/clusters/toronto.tf new file mode 100644 index 0000000..f5a7b78 --- /dev/null +++ b/modules/clusters/toronto.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "toronto" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["toronto"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "toronto" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "toronto" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "toronto")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("toronto")), "vcn")] +# vcn_dns_label = "toronto" +# vcn_name = "toronto" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "toronto" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.toronto +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "toronto")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_toronto" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.toronto + +# count = tobool(lookup(var.clusters, "toronto", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "toronto_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "toronto-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.toronto[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "toronto" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "toronto", "false")) ? 1 : 0 +# providers = { +# oci = oci.toronto +# } +# } \ No newline at end of file diff --git a/modules/clusters/uae.tf b/modules/clusters/uae.tf deleted file mode 100644 index 2bc0fa5..0000000 --- a/modules/clusters/uae.tf +++ /dev/null @@ -1,173 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "dubai" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["dubai"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "dubai" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "dubai" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "dubai")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("dubai")), "vcn")] -# vcn_dns_label = "dubai" -# vcn_name = "dubai" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "dubai" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("dubai")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("dubai")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.dubai -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "dubai")) ? 1 : 0 - -# } - -# module "abudhabi" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["abudhabi"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "abudhabi" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "abudhabi" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "abudhabi")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("abudhabi")), "vcn")] -# vcn_dns_label = "abudhabi" -# vcn_name = "abudhabi" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "abudhabi" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("abudhabi")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("abudhabi")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.abudhabi -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "abudhabi")) ? 1 : 0 - -# } diff --git a/modules/clusters/uk.tf b/modules/clusters/uk.tf deleted file mode 100644 index f361cbb..0000000 --- a/modules/clusters/uk.tf +++ /dev/null @@ -1,174 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "london" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["london"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "london" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "london" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "london")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("london")), "vcn")] -# vcn_dns_label = "london" -# vcn_name = "london" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "london" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("london")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("london")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.london -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "london")) ? 1 : 0 - -# } - -# module "newport" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["newport"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "newport" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "newport" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "newport")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("newport")), "vcn")] -# vcn_dns_label = "newport" -# vcn_name = "newport" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "newport" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("newport")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("newport")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.newport -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "newport")) ? 1 : 0 - -# } diff --git a/modules/clusters/usa.tf b/modules/clusters/usa.tf deleted file mode 100644 index 1e7eaf6..0000000 --- a/modules/clusters/usa.tf +++ /dev/null @@ -1,346 +0,0 @@ -# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. -# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl - -# module "ashburn" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["ashburn"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "ashburn" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "ashburn" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "ashburn")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("ashburn")), "vcn")] -# vcn_dns_label = "ashburn" -# vcn_name = "ashburn" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "ashburn" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("ashburn")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("ashburn")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.ashburn -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "ashburn")) ? 1 : 0 - -# } - -# module "phoenix" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["phoenix"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "phoenix" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "phoenix" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "phoenix")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("phoenix")), "vcn")] -# vcn_dns_label = "phoenix" -# vcn_name = "phoenix" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = false -# cluster_name = "phoenix" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("phoenix")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("phoenix")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.phoenix -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "phoenix")) ? 1 : 0 - -# } - -# module "sanjose" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["sanjose"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "sanjose" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "sanjose" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "sanjose")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("sanjose")), "vcn")] -# vcn_dns_label = "sanjose" -# vcn_name = "sanjose" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = true -# cluster_name = "sanjose" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("sanjose")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("sanjose")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.sanjose -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "sanjose")) ? 1 : 0 - -# } - -# module "chicago" { -# source = "oracle-terraform-modules/oke/oci" -# version = "4.5.9" - -# home_region = var.home_region -# region = local.regions["chicago"] - -# tenancy_id = var.tenancy_id - -# # general oci parameters -# compartment_id = var.compartment_id -# label_prefix = var.label_prefix - -# # ssh keys -# ssh_private_key_path = "~/.ssh/id_rsa" -# ssh_public_key_path = "~/.ssh/id_rsa.pub" - -# # networking -# create_drg = true -# drg_display_name = "chicago" - -# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "chicago" } : { "rpc-to-admin" : {} } - -# nat_gateway_route_rules = concat([ -# { -# destination = lookup(var.admin_region, "vcn_cidr") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "To Admin" -# }], var.connectivity_mode == "mesh" ? -# [for c in keys(var.clusters) : -# { -# destination = lookup(lookup(var.cidrs, c), "vcn") -# destination_type = "CIDR_BLOCK" -# network_entity_id = "drg" -# description = "Routing to allow connectivity to ${title(c)} cluster" -# } if tobool(lookup(var.clusters, c) && c != "chicago")] : [] -# ) - -# vcn_cidrs = [lookup(lookup(var.cidrs, lower("chicago")), "vcn")] -# vcn_dns_label = "chicago" -# vcn_name = "chicago" - -# # bastion host -# create_bastion_host = false - -# # operator host -# create_operator = false -# enable_operator_instance_principal = false - - -# # oke cluster options -# allow_worker_ssh_access = true -# cluster_name = "chicago" -# control_plane_type = var.oke_control_plane -# cni_type = var.preferred_cni -# control_plane_allowed_cidrs = ["0.0.0.0/0"] -# kubernetes_version = var.kubernetes_version -# pods_cidr = lookup(lookup(var.cidrs, lower("chicago")), "pods") -# services_cidr = lookup(lookup(var.cidrs, lower("chicago")), "services") - - -# # node pools -# kubeproxy_mode = "ipvs" -# node_pools = local.managed_nodepools -# cloudinit_nodepool_common = var.cloudinit_nodepool_common - -# node_pool_image_type = "oke" - -# # oke load balancers -# load_balancers = "both" -# preferred_load_balancer = "public" -# internal_lb_allowed_cidrs = [lookup(var.admin_region, "vcn_cidr")] -# internal_lb_allowed_ports = [80, 443, 15012, 15017, 15021, 15443] -# public_lb_allowed_cidrs = ["0.0.0.0/0"] -# public_lb_allowed_ports = [80, 443] - -# providers = { -# oci = oci.chicago -# oci.home = oci.home -# } - -# count = tobool(lookup(var.clusters, "chicaco")) ? 1 : 0 - -# } \ No newline at end of file diff --git a/modules/clusters/variables.tf b/modules/clusters/variables.tf index 406e9d3..5d73eac 100644 --- a/modules/clusters/variables.tf +++ b/modules/clusters/variables.tf @@ -59,8 +59,9 @@ variable "nodepools" { type = any } -variable "cloudinit_nodepool_common" { - type = string +variable "worker_cloud_init" { + default = [] + type = list(map(string)) } variable "thanos" { diff --git a/modules/clusters/vinhedo.tf b/modules/clusters/vinhedo.tf new file mode 100644 index 0000000..d3cd56a --- /dev/null +++ b/modules/clusters/vinhedo.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "vinhedo" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["vinhedo"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "vinhedo" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "vinhedo" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "vinhedo")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("vinhedo")), "vcn")] +# vcn_dns_label = "vinhedo" +# vcn_name = "vinhedo" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "vinhedo" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.vinhedo +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "vinhedo")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_vinhedo" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.vinhedo + +# count = tobool(lookup(var.clusters, "vinhedo", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "vinhedo_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "vinhedo-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.vinhedo[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "vinhedo" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "vinhedo", "false")) ? 1 : 0 +# providers = { +# oci = oci.vinhedo +# } +# } \ No newline at end of file diff --git a/modules/clusters/zurich.tf b/modules/clusters/zurich.tf new file mode 100644 index 0000000..dbbe91e --- /dev/null +++ b/modules/clusters/zurich.tf @@ -0,0 +1,160 @@ +# # Copyright (c) 2023 Oracle Corporation and/or its affiliates. +# # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl + +# module "zurich" { +# # source = "oracle-terraform-modules/oke/oci" +# # version = "4.5.9" + +# source = "github.com/oracle-terraform-modules/terraform-oci-oke?ref=5.x&depth=1" + +# home_region = var.home_region +# region = local.regions["zurich"] + +# tenancy_id = var.tenancy_id + +# # general oci parameters +# compartment_id = var.compartment_id + +# # ssh keys +# ssh_private_key_path = "~/.ssh/id_rsa" +# ssh_public_key_path = "~/.ssh/id_rsa.pub" + +# # networking +# # create_drg = true +# drg_display_name = "zurich" + +# # remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "zurich" } : { "rpc-to-admin" : {} } + +# nat_gateway_route_rules = concat([ +# { +# destination = lookup(var.admin_region, "vcn_cidr") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "To Admin" +# }], var.connectivity_mode == "mesh" ? +# [for c in keys(var.clusters) : +# { +# destination = lookup(lookup(var.cidrs, c), "vcn") +# destination_type = "CIDR_BLOCK" +# network_entity_id = "drg" +# description = "Routing to allow connectivity to ${title(c)} cluster" +# } if tobool(lookup(var.clusters, c) && c != "zurich")] : [] +# ) + +# vcn_cidrs = [lookup(lookup(var.cidrs, lower("zurich")), "vcn")] +# vcn_dns_label = "zurich" +# vcn_name = "zurich" + +# #subnets +# subnets = { +# # bastion = { newbits = 13, dns_label = "bastion" } +# # operator = { newbits = 13, dns_label = "operator" } +# cp = { newbits = 13, dns_label = "cp" } +# int_lb = { newbits = 11, dns_label = "ilb" } +# pub_lb = { newbits = 11, dns_label = "plb" } +# workers = { newbits = 2, dns_label = "workers" } +# pods = { newbits = 2, dns_label = "pods" } +# } + +# # bastion host +# create_bastion = false + +# # operator host +# create_operator = false +# create_iam_operator_policy = "never" + + +# # oke cluster options +# cluster_name = "zurich" +# cni_type = var.preferred_cni +# control_plane_is_public = var.oke_control_plane == "public" +# control_plane_allowed_cidrs = [local.anywhere] +# kubernetes_version = var.kubernetes_version +# pods_cidr = lookup(var.admin_region, "pods") +# services_cidr = lookup(var.admin_region, "services") + +# allow_worker_ssh_access = false + +# # node pools +# kubeproxy_mode = "ipvs" +# worker_pool_mode = "node-pool" + +# worker_pools = var.nodepools + +# worker_cloud_init = var.worker_cloud_init + +# worker_image_type = "oke" + + +# # oke load balancers +# load_balancers = "both" +# preferred_load_balancer = "public" + +# allow_rules_internal_lb = { +# for p in local.service_mesh_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } +# # internal_lb_allowed_ports = var.connectivity_mode == "mesh" ? [80, 443, 15012, 15017, 15021, 15443] : [80, 443] +# # TODO: allow configuration of source cidr +# allow_rules_public_lb = { + +# for p in local.public_lb_allowed_ports : + +# format("Allow ingress to port %v", p) => { +# protocol = local.tcp_protocol, port = p, source = "10.0.0.0/16", source_type = local.rule_type_cidr, +# } +# } + +# # user_id = var.user_id + +# providers = { +# oci = oci.zurich +# oci.home = oci.home +# } + +# count = tobool(lookup(var.clusters, "zurich")) ? 1 : 0 + +# } + +# resource "oci_objectstorage_bucket" "thanos_zurich" { +# compartment_id = var.compartment_id +# name = "mel-${lookup(var.thanos, "bucket_name", "thanos")}" +# namespace = lookup(var.thanos, "bucket_namespace") + +# provider = oci.zurich + +# count = tobool(lookup(var.clusters, "zurich", "false")) && tobool(lookup(var.thanos, "enabled", "false")) ? 1 : 0 +# } + +# module "zurich_drg" { +# source = "oracle-terraform-modules/drg/oci" +# version = "1.0.5" + +# # general oci parameters +# compartment_id = var.compartment_id +# label_prefix = var.label_prefix + +# # drg parameters +# drg_display_name = "zurich-drg" + +# drg_vcn_attachments = { +# drg = { +# vcn_id = module.zurich[0].vcn_id +# vcn_transit_routing_rt_id = null +# drg_route_table_id = null +# } +# } + +# # var.drg_id can either contain an existing DRG ID or be null. +# drg_id = null + +# remote_peering_connections = var.connectivity_mode == "mesh" ? { for k, v in merge({ "admin" = true }, var.clusters) : "rpc-to-${k}" => {} if tobool(v) && k != "zurich" } : { "rpc-to-admin" : {} } + +# count = tobool(lookup(var.clusters, "zurich", "false")) ? 1 : 0 +# providers = { +# oci = oci.zurich +# } +# } \ No newline at end of file diff --git a/variables.tf b/variables.tf index 42a0f93..637b826 100644 --- a/variables.tf +++ b/variables.tf @@ -232,10 +232,10 @@ variable "nodepools" { } } -variable "cloudinit_nodepool_common" { - type = string - description = "Path to custom cloud init file for OKE workner nodes" - default = "" +variable "worker_cloud_init" { + default = [] + description = "List of maps containing cloud init MIME part configuration for worker nodes. Merged with pool-specific definitions. See https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/cloudinit_config.html#part for expected schema of each element." + type = list(map(string)) } # verrazzano