diff --git a/data/data/aws/main.tf b/data/data/aws/main.tf index 2390ab054fb..fef5344e6cb 100644 --- a/data/data/aws/main.tf +++ b/data/data/aws/main.tf @@ -72,9 +72,10 @@ module "dns" { module "vpc" { source = "./vpc" - cidr_block = "${var.machine_cidr}" - cluster_id = "${var.cluster_id}" - region = "${var.aws_region}" + cidr_block = "${var.machine_cidr}" + cluster_id = "${var.cluster_id}" + region = "${var.aws_region}" + availability_zones = "${distinct(concat(var.aws_master_availability_zones, var.aws_worker_availability_zones))}" tags = "${local.tags}" } diff --git a/data/data/aws/variables-aws.tf b/data/data/aws/variables-aws.tf index 41f04cb7770..a2f67cbb141 100644 --- a/data/data/aws/variables-aws.tf +++ b/data/data/aws/variables-aws.tf @@ -62,3 +62,8 @@ variable "aws_master_availability_zones" { type = "list" description = "The availability zones in which to create the masters. The length of this list must match master_count." } + +variable "aws_worker_availability_zones" { + type = "list" + description = "The availability zones to provision for workers. Worker instances are created by the machine-API operator, but this variable controls their supporting infrastructure (subnets, routing, etc.)." +} diff --git a/data/data/aws/vpc/common.tf b/data/data/aws/vpc/common.tf index e451515c368..b8d6d3c4957 100644 --- a/data/data/aws/vpc/common.tf +++ b/data/data/aws/vpc/common.tf @@ -1,19 +1,10 @@ # Canonical internal state definitions for this module. # read only: only locals and data source definitions allowed. No resources or module blocks in this file -data "aws_region" "current" {} -// Fetch a list of available AZs -data "aws_availability_zones" "azs" { - state = "available" -} - -// Only reference data sources which are gauranteed to exist at any time (above) in this locals{} block +// Only reference data sources which are guaranteed to exist at any time (above) in this locals{} block locals { - // List of possible AZs for each type of subnet - new_subnet_azs = "${data.aws_availability_zones.azs.names}" - // How many AZs to create subnets in - new_az_count = "${length(local.new_subnet_azs)}" + new_az_count = "${length(var.availability_zones)}" // The VPC ID to use to build the rest of the vpc data sources vpc_id = "${aws_vpc.new_vpc.id}" diff --git a/data/data/aws/vpc/outputs.tf b/data/data/aws/vpc/outputs.tf index 48974ea4b1d..5daf08d93b3 100644 --- a/data/data/aws/vpc/outputs.tf +++ b/data/data/aws/vpc/outputs.tf @@ -3,11 +3,11 @@ output "vpc_id" { } output "az_to_private_subnet_id" { - value = "${zipmap(local.new_subnet_azs, local.private_subnet_ids)}" + value = "${zipmap(var.availability_zones, local.private_subnet_ids)}" } output "az_to_public_subnet_id" { - value = "${zipmap(local.new_subnet_azs, local.public_subnet_ids)}" + value = "${zipmap(var.availability_zones, local.public_subnet_ids)}" } output "public_subnet_ids" { diff --git a/data/data/aws/vpc/variables.tf b/data/data/aws/vpc/variables.tf index e2241af898b..59adeaed354 100644 --- a/data/data/aws/vpc/variables.tf +++ b/data/data/aws/vpc/variables.tf @@ -1,3 +1,8 @@ +variable "availability_zones" { + type = "list" + description = "The availability zones in which to provision subnets." +} + variable "cidr_block" { type = "string" } diff --git a/data/data/aws/vpc/vpc-private.tf b/data/data/aws/vpc/vpc-private.tf index 1df4a74d3a9..fa606b49cd3 100644 --- a/data/data/aws/vpc/vpc-private.tf +++ b/data/data/aws/vpc/vpc-private.tf @@ -3,7 +3,7 @@ resource "aws_route_table" "private_routes" { vpc_id = "${data.aws_vpc.cluster_vpc.id}" tags = "${merge(map( - "Name","${var.cluster_id}-private-${local.new_subnet_azs[count.index]}", + "Name","${var.cluster_id}-private-${var.availability_zones[count.index]}", ), var.tags)}" } @@ -22,10 +22,10 @@ resource "aws_subnet" "private_subnet" { cidr_block = "${cidrsubnet(local.new_private_cidr_range, 3, count.index)}" - availability_zone = "${local.new_subnet_azs[count.index]}" + availability_zone = "${var.availability_zones[count.index]}" tags = "${merge(map( - "Name", "${var.cluster_id}-private-${local.new_subnet_azs[count.index]}", + "Name", "${var.cluster_id}-private-${var.availability_zones[count.index]}", "kubernetes.io/role/internal-elb", "", ), var.tags)}" } diff --git a/data/data/aws/vpc/vpc-public.tf b/data/data/aws/vpc/vpc-public.tf index c20e1f85fb2..1b60d15ba51 100644 --- a/data/data/aws/vpc/vpc-public.tf +++ b/data/data/aws/vpc/vpc-public.tf @@ -31,10 +31,10 @@ resource "aws_subnet" "public_subnet" { cidr_block = "${cidrsubnet(local.new_public_cidr_range, 3, count.index)}" - availability_zone = "${local.new_subnet_azs[count.index]}" + availability_zone = "${var.availability_zones[count.index]}" tags = "${merge(map( - "Name", "${var.cluster_id}-public-${local.new_subnet_azs[count.index]}", + "Name", "${var.cluster_id}-public-${var.availability_zones[count.index]}", ), var.tags)}" } @@ -49,7 +49,7 @@ resource "aws_eip" "nat_eip" { vpc = true tags = "${merge(map( - "Name", "${var.cluster_id}-eip-${local.new_subnet_azs[count.index]}", + "Name", "${var.cluster_id}-eip-${var.availability_zones[count.index]}", ), var.tags)}" # Terraform does not declare an explicit dependency towards the internet gateway. @@ -64,6 +64,6 @@ resource "aws_nat_gateway" "nat_gw" { subnet_id = "${aws_subnet.public_subnet.*.id[count.index]}" tags = "${merge(map( - "Name", "${var.cluster_id}-nat-${local.new_subnet_azs[count.index]}", + "Name", "${var.cluster_id}-nat-${var.availability_zones[count.index]}", ), var.tags)}" } diff --git a/docs/user/aws/limits.md b/docs/user/aws/limits.md index 1182eca1d03..af89d56eaec 100644 --- a/docs/user/aws/limits.md +++ b/docs/user/aws/limits.md @@ -23,25 +23,27 @@ limit. ## Elastic Network Interfaces (ENI) -The default installation creates 21 + the number of availability zones of ENIs (e.g. us-east-1 = 21 + 6 = 27 ENIs). +The default installation creates 21 + the number of availability zones of ENIs (e.g. 21 + 3 = 24 ENIs for a three-zone cluster). The default limit per region is 350. Additional ENIs are created for additional machines and elastic load balancers created by cluster usage and deployed workloads. A service limit increase here may be required to satisfy the needs of additional clusters and deployed workloads. ## Elastic IP (EIP) -For a single, default cluster, your account will have the needed capacity limits required. There is one exception, -"EC2-VPC Elastic IPs". The installer creates a public and private subnet for each -[availability zone within a region][availability-zones] to provision the cluster in a highly available configuration. In -each private subnet, a separate [NAT Gateway][nat-gateways] is created and requires a separate [elastic IP][elastic-ip]. -The default limit of 5 is sufficient for most regions and a single cluster. For the us-east-1 region, a higher limit is -required. For multiple clusters, a higher limit is required. Please see [this map][az-map] for a current region map with -availability zone count. We recommend selecting regions with 3 or more availability zones. +By default, the installer distributes control-plane and compute machines across [all availability zones within a region][availability-zones] to provision the cluster in a highly available configuration. +Please see [this map][az-map] for a current region map with availability zone count. +We recommend selecting regions with 3 or more availability zones. +You can [provide an install-config](../overview.md#multiple-invocations) to [configure](customization.md) the installer to use specific zones to override that default. -### Example: Using N. Virginia (us-east-1) +The installer creates a public and private subnet for each configured availability zone. +In each private subnet, a separate [NAT Gateway][nat-gateways] is created and requires a separate [EC2-VPC Elastic IP (EIP)][elastic-ip]. +The default limit of 5 is sufficient for a single cluster, unless you have configured your cluster to use more than five zones. +For multiple clusters, a higher limit will likely be required (and will certainly be required to support more than five clusters, even if they are each single-zone clusters). -To use N. Virginia (us-east-1) for a new cluster, please submit a limit increase for VPC Elastic IPs similar to the -following in the support dashboard (to create more than one cluster, a higher limit will be necessary): +### Example: Using North Virginia (us-east-1) + +North Virginia (us-east-1) has six availablity zones, so a higher limit is required unless you configure your cluster to use fewer zones. +To support the default, all-zone installation, please submit a limit increase for VPC Elastic IPs similar to the following in the support dashboard (to create more than one cluster, a higher limit will be necessary): ![Increase Elastic IP limit in AWS](images/support_increase_elastic_ip.png) diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 9de5e8be5fa..ad87b00a469 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -61,6 +61,7 @@ func (t *TerraformVariables) Dependencies() []asset.Asset { &bootstrap.Bootstrap{}, &machine.Master{}, &machines.Master{}, + &machines.Worker{}, } } @@ -71,8 +72,9 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { bootstrapIgnAsset := &bootstrap.Bootstrap{} masterIgnAsset := &machine.Master{} mastersAsset := &machines.Master{} + workersAsset := &machines.Worker{} rhcosImage := new(rhcos.Image) - parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, rhcosImage) + parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, rhcosImage) platform := installConfig.Config.Platform.Name() switch platform { @@ -83,8 +85,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { bootstrapIgn := string(bootstrapIgnAsset.Files()[0].Data) masterIgn := string(masterIgnAsset.Files()[0].Data) - masters := mastersAsset.Machines() - masterCount := len(masters) + masterCount := len(mastersAsset.MachineFiles) data, err := tfvars.TFVars( clusterID.InfraID, installConfig.Config.ClusterDomain(), @@ -110,7 +111,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { switch platform { case aws.Name: - masters, err := mastersAsset.StructuredMachines() + masters, err := mastersAsset.Machines() if err != nil { return err } @@ -118,7 +119,15 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { for i, m := range masters { masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*awsprovider.AWSMachineProviderConfig) } - data, err := awstfvars.TFVars(masterConfigs) + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*awsprovider.AWSMachineProviderConfig, len(workers)) + for i, m := range workers { + workerConfigs[i] = m.Spec.Template.Spec.ProviderSpec.Value.Object.(*awsprovider.AWSMachineProviderConfig) + } + data, err := awstfvars.TFVars(masterConfigs, workerConfigs) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) } @@ -127,7 +136,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Data: data, }) case libvirt.Name: - masters, err := mastersAsset.StructuredMachines() + masters, err := mastersAsset.Machines() if err != nil { return err } @@ -146,7 +155,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Data: data, }) case openstack.Name: - masters, err := mastersAsset.StructuredMachines() + masters, err := mastersAsset.Machines() if err != nil { return err } diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go index 5e8e2fcf3fe..ba186349a0b 100644 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ b/pkg/asset/ignition/bootstrap/bootstrap.go @@ -57,6 +57,7 @@ func (a *Bootstrap) Dependencies() []asset.Asset { &kubeconfig.AdminClient{}, &kubeconfig.Kubelet{}, &machines.Master{}, + &machines.Worker{}, &manifests.Manifests{}, &manifests.Openshift{}, &tls.AdminKubeConfigCABundle{}, @@ -368,6 +369,7 @@ func (a *Bootstrap) addParentFiles(dependencies asset.Parents) { &manifests.Manifests{}, &manifests.Openshift{}, &machines.Master{}, + &machines.Worker{}, } { dependencies.Get(asset) a.Config.Storage.Files = append(a.Config.Storage.Files, ignition.FilesFromAsset(rootDir, "root", 0644, asset)...) diff --git a/pkg/asset/machines/machineconfig/manifest.go b/pkg/asset/machines/machineconfig/manifest.go index 9b1a4cb29ff..8970e63cdc4 100644 --- a/pkg/asset/machines/machineconfig/manifest.go +++ b/pkg/asset/machines/machineconfig/manifest.go @@ -46,8 +46,12 @@ func Manifests(configs []*mcfgv1.MachineConfig, role, directory string) ([]*asse } // IsManifest tests whether the specified filename is a MachineConfig manifest. -func IsManifest(role, filename string) bool { - return fmt.Sprintf(machineConfigFileName, role) == filename +func IsManifest(filename string) (bool, error) { + matched, err := filepath.Match(machineConfigFileNamePattern, filename) + if err != nil { + return false, err + } + return matched, nil } // Load loads the MachineConfig manifests. diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index 9b2f9e38608..3c38f7238e8 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -218,17 +218,8 @@ func (m *Master) Load(f asset.FileFetcher) (found bool, err error) { return true, nil } -// Machines returns master Machine manifest YAML. -func (m *Master) Machines() [][]byte { - machines := make([][]byte, len(m.MachineFiles)) - for i, file := range m.MachineFiles { - machines[i] = file.Data - } - return machines -} - -// StructuredMachines returns master Machine manifest structures. -func (m *Master) StructuredMachines() ([]machineapi.Machine, error) { +// Machines returns master Machine manifest structures. +func (m *Master) Machines() ([]machineapi.Machine, error) { scheme := runtime.NewScheme() awsapi.AddToScheme(scheme) libvirtapi.AddToScheme(scheme) @@ -259,21 +250,28 @@ func (m *Master) StructuredMachines() ([]machineapi.Machine, error) { return machines, nil } -// IsMasterManifest tests whether a file is a manifest that belongs to the -// Master Machines asset. -func IsMasterManifest(file *asset.File) bool { +// IsMachineManifest tests whether a file is a manifest that belongs to the +// Master Machines or Worker Machines asset. +func IsMachineManifest(file *asset.File) bool { if filepath.Dir(file.Filename) != directory { return false } filename := filepath.Base(file.Filename) - if filename == masterUserDataFileName { + if filename == masterUserDataFileName || filename == workerUserDataFileName { return true } - if machineconfig.IsManifest("master", filename) { + if matched, err := machineconfig.IsManifest(filename); err != nil { + panic(err) + } else if matched { return true } if matched, err := filepath.Match(masterMachineFileNamePattern, filename); err != nil { panic("bad format for master machine file name pattern") + } else if matched { + return true + } + if matched, err := filepath.Match(workerMachineSetFileNamePattern, filename); err != nil { + panic("bad format for worker machine file name pattern") } else { return matched } diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 885e898e9ae..63b244ef27e 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -2,12 +2,21 @@ package machines import ( "fmt" + "os" "path/filepath" "github.com/ghodss/yaml" + libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1alpha1" + machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + awsapi "sigs.k8s.io/cluster-api-provider-aws/pkg/apis" + awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1beta1" + openstackapi "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis" + openstackprovider "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/ignition/machine" @@ -23,17 +32,22 @@ import ( nonetypes "github.com/openshift/installer/pkg/types/none" openstacktypes "github.com/openshift/installer/pkg/types/openstack" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" - mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" ) const ( - // workerMachineSetFileName is the filename used for the worker MachineSet manifest. - workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset.yaml" + // workerMachineSetFileName is the format string for constructing the worker MachineSet filenames. + workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset-%s.yaml" // workerUserDataFileName is the filename used for the worker user-data secret. workerUserDataFileName = "99_openshift-cluster-api_worker-user-data-secret.yaml" ) +var ( + workerMachineSetFileNamePattern = fmt.Sprintf(workerMachineSetFileName, "*") + + _ asset.WritableAsset = (*Worker)(nil) +) + func defaultAWSMachinePoolPlatform() awstypes.MachinePool { return awstypes.MachinePool{ EC2RootVolume: awstypes.EC2RootVolume{ @@ -57,11 +71,9 @@ func defaultOpenStackMachinePoolPlatform(flavor string) openstacktypes.MachinePo type Worker struct { UserDataFile *asset.File MachineConfigFiles []*asset.File - MachineSetFile *asset.File + MachineSetFiles []*asset.File } -var _ asset.Asset = (*Worker)(nil) - // Name returns a human friendly name for the Worker Asset. func (w *Worker) Name() string { return "Worker Machines" @@ -172,44 +184,88 @@ func (w *Worker) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create MachineConfig manifests for worker machines") } - if len(machineSets) == 0 { - return nil - } - list := &metav1.List{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "List", - }, - Items: make([]runtime.RawExtension, len(machineSets)), - } - for i, set := range machineSets { - list.Items[i] = runtime.RawExtension{Object: set} - } - data, err = yaml.Marshal(list) - if err != nil { - return errors.Wrap(err, "failed to marshal") - } - w.MachineSetFile = &asset.File{ - Filename: filepath.Join(directory, workerMachineSetFileName), - Data: data, + w.MachineSetFiles = make([]*asset.File, len(machineSets)) + padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machineSets)))) + for i, machineSet := range machineSets { + data, err := yaml.Marshal(machineSet) + if err != nil { + return errors.Wrapf(err, "marshal worker %d", i) + } + + padded := fmt.Sprintf(padFormat, i) + w.MachineSetFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(workerMachineSetFileName, padded)), + Data: data, + } } + return nil } // Files returns the files generated by the asset. func (w *Worker) Files() []*asset.File { - files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+1) + files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+len(w.MachineSetFiles)) if w.UserDataFile != nil { files = append(files, w.UserDataFile) } files = append(files, w.MachineConfigFiles...) - if w.MachineSetFile != nil { - files = append(files, w.MachineSetFile) - } + files = append(files, w.MachineSetFiles...) return files } -// Load returns false since this asset is not written to disk by the installer. +// Load reads the asset files from disk. func (w *Worker) Load(f asset.FileFetcher) (found bool, err error) { - return false, nil + file, err := f.FetchByName(filepath.Join(directory, workerUserDataFileName)) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + w.UserDataFile = file + + w.MachineConfigFiles, err = machineconfig.Load(f, "worker", directory) + if err != nil { + return true, err + } + + fileList, err := f.FetchByPattern(filepath.Join(directory, workerMachineSetFileNamePattern)) + if err != nil { + return true, err + } + + w.MachineSetFiles = fileList + return true, nil +} + +// MachineSets returns MachineSet manifest structures. +func (w *Worker) MachineSets() ([]machineapi.MachineSet, error) { + scheme := runtime.NewScheme() + awsapi.AddToScheme(scheme) + libvirtapi.AddToScheme(scheme) + openstackapi.AddToScheme(scheme) + decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( + awsprovider.SchemeGroupVersion, + libvirtprovider.SchemeGroupVersion, + openstackprovider.SchemeGroupVersion, + ) + + machineSets := []machineapi.MachineSet{} + for i, file := range w.MachineSetFiles { + machineSet := &machineapi.MachineSet{} + err := yaml.Unmarshal(file.Data, &machineSet) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + obj, _, err := decoder.Decode(machineSet.Spec.Template.Spec.ProviderSpec.Value.Raw, nil, nil) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + machineSet.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj} + machineSets = append(machineSets, *machineSet) + } + + return machineSets, nil } diff --git a/pkg/asset/manifests/openshift.go b/pkg/asset/manifests/openshift.go index 9ed55e85441..4289440faaa 100644 --- a/pkg/asset/manifests/openshift.go +++ b/pkg/asset/manifests/openshift.go @@ -39,7 +39,6 @@ func (o *Openshift) Name() string { func (o *Openshift) Dependencies() []asset.Asset { return []asset.Asset{ &installconfig.InstallConfig{}, - &machines.Worker{}, &password.KubeadminPassword{}, &openshift.BindingDiscovery{}, @@ -53,8 +52,7 @@ func (o *Openshift) Dependencies() []asset.Asset { func (o *Openshift) Generate(dependencies asset.Parents) error { installConfig := &installconfig.InstallConfig{} kubeadminPassword := &password.KubeadminPassword{} - worker := &machines.Worker{} - dependencies.Get(installConfig, worker, kubeadminPassword) + dependencies.Get(installConfig, kubeadminPassword) var cloudCreds cloudCredsSecretData platform := installConfig.Config.Platform.Name() switch platform { @@ -133,7 +131,6 @@ func (o *Openshift) Generate(dependencies asset.Parents) error { Data: data, }) } - o.FileList = append(o.FileList, worker.Files()...) asset.SortFiles(o.FileList) @@ -153,7 +150,7 @@ func (o *Openshift) Load(f asset.FileFetcher) (bool, error) { } for _, file := range fileList { - if machines.IsMasterManifest(file) { + if machines.IsMachineManifest(file) { continue } diff --git a/pkg/asset/store/assetcreate_test.go b/pkg/asset/store/assetcreate_test.go index e72d743c648..0c6c878619b 100644 --- a/pkg/asset/store/assetcreate_test.go +++ b/pkg/asset/store/assetcreate_test.go @@ -88,6 +88,7 @@ func TestCreatedAssetsAreNotDirty(t *testing.T) { emptyAssets := map[string]bool{ "Master Machines": true, // no files for the 'none' platform + "Worker Machines": true, // no files for the 'none' platform "Metadata": true, // read-only } for _, a := range tc.targets { diff --git a/pkg/asset/targets/targets.go b/pkg/asset/targets/targets.go index e63819d2ca2..a91db2e95b4 100644 --- a/pkg/asset/targets/targets.go +++ b/pkg/asset/targets/targets.go @@ -23,6 +23,7 @@ var ( // Manifests are the manifests targeted assets. Manifests = []asset.WritableAsset{ &machines.Master{}, + &machines.Worker{}, &manifests.Manifests{}, &manifests.Openshift{}, } diff --git a/pkg/tfvars/aws/aws.go b/pkg/tfvars/aws/aws.go index 456eba934e1..84e683f0796 100644 --- a/pkg/tfvars/aws/aws.go +++ b/pkg/tfvars/aws/aws.go @@ -11,19 +11,20 @@ import ( ) type config struct { - AMI string `json:"aws_ami"` - ExtraTags map[string]string `json:"aws_extra_tags,omitempty"` - BootstrapInstanceType string `json:"aws_bootstrap_instance_type,omitempty"` - MasterInstanceType string `json:"aws_master_instance_type,omitempty"` - AvailabilityZones []string `json:"aws_master_availability_zones"` - IOPS int64 `json:"aws_master_root_volume_iops"` - Size int64 `json:"aws_master_root_volume_size,omitempty"` - Type string `json:"aws_master_root_volume_type,omitempty"` - Region string `json:"aws_region,omitempty"` + AMI string `json:"aws_ami"` + ExtraTags map[string]string `json:"aws_extra_tags,omitempty"` + BootstrapInstanceType string `json:"aws_bootstrap_instance_type,omitempty"` + MasterInstanceType string `json:"aws_master_instance_type,omitempty"` + MasterAvailabilityZones []string `json:"aws_master_availability_zones"` + WorkerAvailabilityZones []string `json:"aws_worker_availability_zones"` + IOPS int64 `json:"aws_master_root_volume_iops"` + Size int64 `json:"aws_master_root_volume_size,omitempty"` + Type string `json:"aws_master_root_volume_type,omitempty"` + Region string `json:"aws_region,omitempty"` } // TFVars generates AWS-specific Terraform variables launching the cluster. -func TFVars(masterConfigs []*v1beta1.AWSMachineProviderConfig) ([]byte, error) { +func TFVars(masterConfigs []*v1beta1.AWSMachineProviderConfig, workerConfigs []*v1beta1.AWSMachineProviderConfig) ([]byte, error) { masterConfig := masterConfigs[0] tags := make(map[string]string, len(masterConfig.Tags)) @@ -31,9 +32,19 @@ func TFVars(masterConfigs []*v1beta1.AWSMachineProviderConfig) ([]byte, error) { tags[tag.Name] = tag.Value } - availabilityZones := make([]string, len(masterConfigs)) + masterAvailabilityZones := make([]string, len(masterConfigs)) for i, c := range masterConfigs { - availabilityZones[i] = c.Placement.AvailabilityZone + masterAvailabilityZones[i] = c.Placement.AvailabilityZone + } + + exists := struct{}{} + availabilityZoneMap := map[string]struct{}{} + for _, c := range workerConfigs { + availabilityZoneMap[c.Placement.AvailabilityZone] = exists + } + workerAvailabilityZones := make([]string, 0, len(availabilityZoneMap)) + for zone := range availabilityZoneMap { + workerAvailabilityZones = append(workerAvailabilityZones, zone) } if len(masterConfig.BlockDevices) == 0 { @@ -60,14 +71,15 @@ func TFVars(masterConfigs []*v1beta1.AWSMachineProviderConfig) ([]byte, error) { instanceClass := defaults.InstanceClass(masterConfig.Placement.Region) cfg := &config{ - Region: masterConfig.Placement.Region, - ExtraTags: tags, - AMI: *masterConfig.AMI.ID, - AvailabilityZones: availabilityZones, - BootstrapInstanceType: fmt.Sprintf("%s.large", instanceClass), - MasterInstanceType: masterConfig.InstanceType, - Size: *rootVolume.EBS.VolumeSize, - Type: *rootVolume.EBS.VolumeType, + Region: masterConfig.Placement.Region, + ExtraTags: tags, + AMI: *masterConfig.AMI.ID, + MasterAvailabilityZones: masterAvailabilityZones, + WorkerAvailabilityZones: workerAvailabilityZones, + BootstrapInstanceType: fmt.Sprintf("%s.large", instanceClass), + MasterInstanceType: masterConfig.InstanceType, + Size: *rootVolume.EBS.VolumeSize, + Type: *rootVolume.EBS.VolumeType, } if rootVolume.EBS.Iops != nil {