Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
* 'master' of https://github.com/Azure/acs-engine: (44 commits)
  remove duplicate ebtables (Azure#3308)
  Kubernetes: Remove Windows-specific foo for control plane config (Azure#3240)
  add k8s 1.8.14 (Azure#3303)
  run /usr/lib/apt/apt.systemd.daily in background (Azure#3304)
  add annotation to secrets field (Azure#3292)
  fix(oms): pull oms directly from docker (Azure#3294)
  ensure params vals are bool, with test (Azure#3285)
  udpate to latest 16.04 LTS image (Azure#3286)
  Enable openshift e2e tests on centos (Azure#2910)
  remove docker dependency from hyperkube extract (Azure#3296)
  remove pathological test (Azure#3291)
  Refactor validateDNSPrefix function in one place (Azure#3276)
  remove unnecessary nil check (Azure#3290)
  Fixing panic issue in Validate when properties are nil (Azure#3242)
  Updating the omsagent yaml file to include new changes for node zero dependency (Azure#3277)
  revert Azure CNI to 1.0.4 (Azure#3289)
  Add e2e test for openshift vnet (Azure#3274)
  Support upgrading a Kubernetes cluster that contains Virtual Machine Scale Sets (Azure#3223)
  Set extension for custom DNS (Azure#3264)
  don't exit CSE on apt.systemd.daily error (Azure#3278)
  ...
  • Loading branch information
PaulCharlton committed Jun 20, 2018
2 parents 6c2ddab + bbc665a commit 5579523
Show file tree
Hide file tree
Showing 80 changed files with 3,191 additions and 1,121 deletions.
90 changes: 89 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,70 @@ jobs:
path: /go/src/github.com/Azure/acs-engine/_logs
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_output
openshift-3.9-rhel-e2e-vnet:
working_directory: /go/src/github.com/Azure/acs-engine
docker:
- image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9
environment:
GOPATH: /go
steps:
- checkout
- run: |
echo 'export TIMEOUT=30m' >> $BASH_ENV
echo 'export DISTRO=openshift39_rhel' >> $BASH_ENV
echo 'export LOCATION=eastus' >> $BASH_ENV
echo 'export ORCHESTRATOR_RELEASE=3.9' >> $BASH_ENV
echo 'export ORCHESTRATOR_VERSION=3.9.0' >> $BASH_ENV
echo 'export CLUSTER_DEFINITION=examples/e2e-tests/openshift/definition.json' >> $BASH_ENV
echo 'export CREATE_VNET=true' >> $BASH_ENV
echo 'export CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT}' >> $BASH_ENV
echo 'export RETAIN_SSH=false' >> $BASH_ENV
echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV
- run:
name: compile
command: make build-binary
- run:
name: ginkgo openshift e2e tests
command: make test-openshift
no_output_timeout: "30m"
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_logs
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_output
openshift-3.9-centos-e2e:
working_directory: /go/src/github.com/Azure/acs-engine
docker:
- image: registry.svc.ci.openshift.org/ci/acs-engine-tests:v3.9
environment:
GOPATH: /go
steps:
- checkout
- run: |
echo 'export TIMEOUT=30m' >> $BASH_ENV
echo 'export DISTRO=openshift39_centos' >> $BASH_ENV
echo 'export LOCATION=eastus' >> $BASH_ENV
echo 'export ORCHESTRATOR_RELEASE=3.9' >> $BASH_ENV
echo 'export ORCHESTRATOR_VERSION=3.9.0' >> $BASH_ENV
echo 'export CLUSTER_DEFINITION=examples/openshift.json' >> $BASH_ENV
echo 'export CREATE_VNET=false' >> $BASH_ENV
echo 'export CLEANUP_ON_EXIT=false' >> $BASH_ENV
echo 'export RETAIN_SSH=false' >> $BASH_ENV
echo 'export SUBSCRIPTION_ID=${SUBSCRIPTION_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_ID=${SERVICE_PRINCIPAL_CLIENT_ID_E2E_KUBERNETES}' >> $BASH_ENV
echo 'export CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET_E2E_KUBERNETES}' >> $BASH_ENV
- run:
name: compile
command: make build-binary
- run:
name: ginkgo openshift e2e tests
command: make test-openshift
no_output_timeout: "30m"
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_logs
- store_artifacts:
path: /go/src/github.com/Azure/acs-engine/_output
workflows:
version: 2
build_and_test_pr:
Expand Down Expand Up @@ -499,6 +563,18 @@ workflows:
filters:
branches:
ignore: master
- openshift-3.9-rhel-e2e-vnet:
requires:
- pr-e2e-hold
filters:
branches:
ignore: master
- openshift-3.9-centos-e2e:
requires:
- pr-e2e-hold
filters:
branches:
ignore: master
- swarm-e2e:
requires:
- pr-e2e-hold
Expand Down Expand Up @@ -589,9 +665,21 @@ workflows:
filters:
branches:
only: master
- openshift-3.9-rhel-e2e-vnet:
requires:
- test
filters:
branches:
only: master
- openshift-3.9-centos-e2e:
requires:
- test
filters:
branches:
only: master
- dcos-e2e:
requires:
- test
filters:
branches:
only: master
only: master
88 changes: 77 additions & 11 deletions cmd/deploy.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package cmd

import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
Expand Down Expand Up @@ -51,6 +52,7 @@ type deployCmd struct {
caPrivateKeyPath string
classicMode bool
parametersOnly bool
set []string

// derived
containerService *api.ContainerService
Expand All @@ -71,12 +73,18 @@ func newDeployCmd() *cobra.Command {
Short: deployShortDescription,
Long: deployLongDescription,
RunE: func(cmd *cobra.Command, args []string) error {
if err := dc.validate(cmd, args); err != nil {
if err := dc.validateArgs(cmd, args); err != nil {
log.Fatalf(fmt.Sprintf("error validating deployCmd: %s", err.Error()))
}
if err := dc.load(cmd, args); err != nil {
if err := dc.mergeAPIModel(); err != nil {
log.Fatalf(fmt.Sprintf("error merging API model in deployCmd: %s", err.Error()))
}
if err := dc.loadAPIModel(cmd, args); err != nil {
log.Fatalln("failed to load apimodel: %s", err.Error())
}
if _, _, err := dc.validateApimodel(); err != nil {
log.Fatalln("Failed to validate the apimodel after populating values: %s", err.Error())
}
return dc.run()
},
}
Expand All @@ -91,13 +99,14 @@ func newDeployCmd() *cobra.Command {
f.StringVarP(&dc.resourceGroup, "resource-group", "g", "", "resource group to deploy to (will use the DNS prefix from the apimodel if not specified)")
f.StringVarP(&dc.location, "location", "l", "", "location to deploy to (required)")
f.BoolVarP(&dc.forceOverwrite, "force-overwrite", "f", false, "automatically overwrite existing files in the output directory")
f.StringArrayVar(&dc.set, "set", []string{}, "set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2)")

addAuthFlags(&dc.authArgs, f)

return deployCmd
}

func (dc *deployCmd) validate(cmd *cobra.Command, args []string) error {
func (dc *deployCmd) validateArgs(cmd *cobra.Command, args []string) error {
var err error

dc.locale, err = i18n.LoadTranslations()
Expand Down Expand Up @@ -129,7 +138,29 @@ func (dc *deployCmd) validate(cmd *cobra.Command, args []string) error {
return nil
}

func (dc *deployCmd) load(cmd *cobra.Command, args []string) error {
func (dc *deployCmd) mergeAPIModel() error {
var err error

// if --set flag has been used
if dc.set != nil && len(dc.set) > 0 {
m := make(map[string]transform.APIModelValue)
transform.MapValues(m, dc.set)

// overrides the api model and generates a new file
dc.apimodelPath, err = transform.MergeValuesWithAPIModel(dc.apimodelPath, m)
if err != nil {
return fmt.Errorf(fmt.Sprintf("error merging --set values with the api model: %s", err.Error()))
}

log.Infoln(fmt.Sprintf("new api model file has been generated during merge: %s", dc.apimodelPath))
}

return nil
}

func (dc *deployCmd) loadAPIModel(cmd *cobra.Command, args []string) error {
var caCertificateBytes []byte
var caKeyBytes []byte
var err error

apiloader := &api.Apiloader{
Expand All @@ -144,6 +175,35 @@ func (dc *deployCmd) load(cmd *cobra.Command, args []string) error {
return fmt.Errorf(fmt.Sprintf("error parsing the api model: %s", err.Error()))
}

if dc.outputDirectory == "" {
if dc.containerService.Properties.MasterProfile != nil {
dc.outputDirectory = path.Join("_output", dc.containerService.Properties.MasterProfile.DNSPrefix)
} else {
dc.outputDirectory = path.Join("_output", dc.containerService.Properties.HostedMasterProfile.DNSPrefix)
}
}

// consume dc.caCertificatePath and dc.caPrivateKeyPath
if (dc.caCertificatePath != "" && dc.caPrivateKeyPath == "") || (dc.caCertificatePath == "" && dc.caPrivateKeyPath != "") {
return errors.New("--ca-certificate-path and --ca-private-key-path must be specified together")
}

if dc.caCertificatePath != "" {
if caCertificateBytes, err = ioutil.ReadFile(dc.caCertificatePath); err != nil {
return fmt.Errorf(fmt.Sprintf("failed to read CA certificate file: %s", err.Error()))
}
if caKeyBytes, err = ioutil.ReadFile(dc.caPrivateKeyPath); err != nil {
return fmt.Errorf(fmt.Sprintf("failed to read CA private key file: %s", err.Error()))
}

prop := dc.containerService.Properties
if prop.CertificateProfile == nil {
prop.CertificateProfile = &api.CertificateProfile{}
}
prop.CertificateProfile.CaCertificate = string(caCertificateBytes)
prop.CertificateProfile.CaPrivateKey = string(caKeyBytes)
}

if dc.containerService.Location == "" {
dc.containerService.Location = dc.location
} else if dc.containerService.Location != dc.location {
Expand All @@ -163,11 +223,6 @@ func (dc *deployCmd) load(cmd *cobra.Command, args []string) error {
return err
}

_, _, err = validateApimodel(apiloader, dc.containerService, dc.apiVersion)
if err != nil {
return fmt.Errorf("Failed to validate the apimodel after populating values: %s", err)
}

dc.random = rand.New(rand.NewSource(time.Now().UnixNano()))

return nil
Expand Down Expand Up @@ -293,9 +348,15 @@ func autofillApimodel(dc *deployCmd) error {
return nil
}

func validateApimodel(apiloader *api.Apiloader, containerService *api.ContainerService, apiVersion string) (*api.ContainerService, string, error) {
func (dc *deployCmd) validateApimodel() (*api.ContainerService, string, error) {
apiloader := &api.Apiloader{
Translator: &i18n.Translator{
Locale: dc.locale,
},
}

// This isn't terribly elegant, but it's the easiest way to go for now w/o duplicating a bunch of code
rawVersionedAPIModel, err := apiloader.SerializeContainerService(containerService, apiVersion)
rawVersionedAPIModel, err := apiloader.SerializeContainerService(dc.containerService, dc.apiVersion)
if err != nil {
return nil, "", err
}
Expand Down Expand Up @@ -366,5 +427,10 @@ func (dc *deployCmd) run() error {
log.Fatalln(err)
}

if dc.containerService.Properties.OrchestratorProfile.OrchestratorType == api.OpenShift {
// TODO: when the Azure client library is updated, read this from the template `masterFQDN` output
fmt.Printf("OpenShift web UI available at https://%s.%s.cloudapp.azure.com:8443/\n", dc.containerService.Properties.MasterProfile.DNSPrefix, dc.location)
}

return nil
}
63 changes: 61 additions & 2 deletions cmd/deploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ func TestValidate(t *testing.T) {
}

for _, c := range cases {
err = c.dc.validate(r, c.args)
err = c.dc.validateArgs(r, c.args)
if err != nil && c.expectedErr != nil {
if err.Error() != c.expectedErr.Error() {
t.Fatalf("expected validate deploy command to return error %s, but instead got %s", c.expectedErr.Error(), err.Error())
Expand Down Expand Up @@ -447,7 +447,7 @@ func testAutodeployCredentialHandling(t *testing.T, useManagedIdentity bool, cli
// cleanup, since auto-populations creates dirs and saves the SSH private key that it might create
defer os.RemoveAll(deployCmd.outputDirectory)

cs, _, err = validateApimodel(apiloader, cs, ver)
cs, _, err = deployCmd.validateApimodel()
if err != nil {
t.Fatalf("unexpected error validating apimodel after populating defaults: %s", err)
}
Expand All @@ -464,3 +464,62 @@ func testAutodeployCredentialHandling(t *testing.T, useManagedIdentity bool, cli
}
}
}

func testDeployCmdMergeAPIModel(t *testing.T) {
d := &deployCmd{}
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
err := d.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with no --set flag defined: %s", err.Error())
}

d = &deployCmd{}
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
d.set = []string{"masterProfile.count=3,linuxProfile.adminUsername=testuser"}
err = d.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag: %s", err.Error())
}

d = &deployCmd{}
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
d.set = []string{"masterProfile.count=3", "linuxProfile.adminUsername=testuser"}
err = d.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with multiple --set flags: %s", err.Error())
}

d = &deployCmd{}
d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
d.set = []string{"agentPoolProfiles[0].count=1"}
err = d.mergeAPIModel()
if err != nil {
t.Fatalf("unexpected error calling mergeAPIModel with one --set flag to override an array property: %s", err.Error())
}
}

func testDeployCmdMLoadAPIModel(t *testing.T) {
d := &deployCmd{}
r := &cobra.Command{}
f := r.Flags()

addAuthFlags(&d.authArgs, f)

fakeRawSubscriptionID := "6dc93fae-9a76-421f-bbe5-cc6460ea81cb"
fakeSubscriptionID, err := uuid.FromString(fakeRawSubscriptionID)
if err != nil {
t.Fatalf("Invalid SubscriptionId in Test: %s", err)
}

d.apimodelPath = "../pkg/acsengine/testdata/simple/kubernetes.json"
d.set = []string{"agentPoolProfiles[0].count=1"}
d.SubscriptionID = fakeSubscriptionID
d.rawSubscriptionID = fakeRawSubscriptionID

d.validateArgs(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json"})
d.mergeAPIModel()
err = d.loadAPIModel(r, []string{"../pkg/acsengine/testdata/simple/kubernetes.json"})
if err != nil {
t.Fatalf("unexpected error loading api model: %s", err.Error())
}
}
Loading

0 comments on commit 5579523

Please sign in to comment.