diff --git a/ci-operator/config/openshift/installer/master.yaml b/ci-operator/config/openshift/installer/master.yaml index f21b0826b420..9f451bb5b4b1 100644 --- a/ci-operator/config/openshift/installer/master.yaml +++ b/ci-operator/config/openshift/installer/master.yaml @@ -4,41 +4,15 @@ base_images: name: origin-v4.0 namespace: openshift tag: base - base-smoke: - cluster: https://api.ci.openshift.org - namespace: openshift - name: release - tag: bazel -binary_build_commands: go build ./installer/cmd/tectonic canonical_go_repository: github.com/openshift/installer images: -- dockerfile_path: images/tectonic-installer/Dockerfile.ci +- dockerfile_path: images/installer/Dockerfile.ci from: base inputs: - bin: - paths: - - destination_dir: . - source_path: /go/src/github.com/openshift/installer/tectonic root: as: - build to: installer -- dockerfile_path: images/tectonic-installer/Dockerfile.ci - optional: true - from: base-smoke - inputs: - bin: - paths: - - destination_dir: . - source_path: /go/src/github.com/openshift/installer/tectonic - root: - as: - - build - to: installer-bazel -- dockerfile_path: images/installer-origin-release/Dockerfile.ci - optional: true - from: installer-bazel - to: installer-smoke resources: '*': limits: @@ -47,13 +21,6 @@ resources: requests: cpu: 100m memory: 200Mi - bin: - limits: - cpu: '7' - memory: 9Gi - requests: - cpu: '3' - memory: 7Gi unit: limits: cpu: '7' @@ -75,7 +42,7 @@ build_root: tag: golang-1.10 tests: - as: unit - commands: go test ./pkg/... ./installer/pkg/... + commands: go test ./pkg/... from: src - as: gofmt commands: IS_CONTAINER=TRUE ./hack/go-fmt.sh . diff --git a/ci-operator/config/openshift/installer/release-3.11.yaml b/ci-operator/config/openshift/installer/release-3.11.yaml deleted file mode 100644 index 5c08881ae06c..000000000000 --- a/ci-operator/config/openshift/installer/release-3.11.yaml +++ /dev/null @@ -1,58 +0,0 @@ -base_images: - base: - cluster: https://api.ci.openshift.org - name: origin-v3.11 - namespace: openshift - tag: base -binary_build_commands: go build ./installer/cmd/tectonic -canonical_go_repository: github.com/openshift/installer -images: -- dockerfile_path: images/tectonic-installer/Dockerfile.ci - from: base - inputs: - bin: - paths: - - destination_dir: . - source_path: /go/src/github.com/openshift/installer/tectonic - root: - as: - - build - to: installer -resources: - '*': - limits: - cpu: '2' - memory: 4Gi - requests: - cpu: 100m - memory: 200Mi - bin: - limits: - cpu: '7' - memory: 9Gi - requests: - cpu: '3' - memory: 7Gi - unit: - limits: - cpu: '7' - memory: 9Gi - requests: - cpu: '3' - memory: 5Gi -tag_specification: - cluster: https://api.ci.openshift.org - name: origin-v3.11 - namespace: openshift - tag: '' - tag_overrides: {} -build_root: - image_stream_tag: - cluster: https://api.ci.openshift.org - name: release - namespace: openshift - tag: golang-1.10 -tests: -- as: unit - commands: go test ./pkg/... ./installer/pkg/... - from: src diff --git a/ci-operator/jobs/openshift/installer/openshift-installer-master-presubmits.yaml b/ci-operator/jobs/openshift/installer/openshift-installer-master-presubmits.yaml index 02c4204ccb60..8d79fbf45859 100644 --- a/ci-operator/jobs/openshift/installer/openshift-installer-master-presubmits.yaml +++ b/ci-operator/jobs/openshift/installer/openshift-installer-master-presubmits.yaml @@ -1,30 +1,5 @@ presubmits: openshift/installer: - - agent: kubernetes - always_run: true - context: ci/prow/build-tarball - decorate: true - name: pull-ci-openshift-installer-bazel-build-tarball - rerun_command: /test build-tarball - spec: - containers: - - args: - - ./hack/test-bazel-build-tarball.sh - - --action_env=HOME=/tmp - command: - - sh - env: - - name: HOME - value: /tmp - - name: IS_CONTAINER - value: "TRUE" - - name: USER - value: bazel - image: quay.io/coreos/tectonic-builder:bazel-v0.3 - imagePullPolicy: Always - name: "" - resources: {} - trigger: (?m)^/test build-tarball - agent: kubernetes always_run: false branches: @@ -145,63 +120,6 @@ presubmits: - configMap: name: cluster-profile-aws trigger: ((?m)^/test( e2e-aws-all),?(\s+|$)) - - agent: kubernetes - always_run: false - branches: - - master - context: ci/prow/e2e-aws-smoke - decorate: true - name: pull-ci-openshift-installer-master-e2e-aws-smoke - rerun_command: /test e2e-aws-smoke - run_if_changed: ^([^D]|D(D|oD|ocD|ocuD|ocum(D|e(D|n(D|t(D|aD|atD|atiD|atioD)))))*([^Do]|o[^Dc]|oc[^Du]|ocu[^Dm]|ocum([^De]|e([^Dn]|n([^Dt]|t([^Da]|a[^Dt]|at[^Di]|ati[^Do]|atio[^Dn]))))))*(D(D|oD|ocD|ocuD|ocum(D|e(D|n(D|t(D|aD|atD|atiD|atioD)))))*(o|oc|ocu|ocum(e(n(t(a|at|ati|atio)?)?)?)?)?)?$ - skip_cloning: true - spec: - containers: - - command: - - ci-operator - - --give-pr-author-access-to-namespace=true - - --artifact-dir=$(ARTIFACTS) - - --secret-dir=/usr/local/e2e-aws-smoke-cluster-profile - - --template=/usr/local/e2e-aws-smoke - - --target=e2e-aws-smoke - - --give-pr-author-access-to-namespace - env: - - name: JOB_NAME_SAFE - value: e2e-aws-smoke - - name: CLUSTER_TYPE - value: aws - - name: CONFIG_SPEC - valueFrom: - configMapKeyRef: - key: master.yaml - name: ci-operator-openshift-installer - image: ci-operator:latest - imagePullPolicy: Always - name: "" - resources: - limits: - cpu: 500m - requests: - cpu: 10m - volumeMounts: - - mountPath: /usr/local/e2e-aws-smoke - name: job-definition - subPath: cluster-launch-installer-e2e-smoke.yaml - - mountPath: /usr/local/e2e-aws-smoke-cluster-profile - name: cluster-profile - serviceAccountName: ci-operator - volumes: - - configMap: - name: prow-job-cluster-launch-installer-e2e-smoke - name: job-definition - - name: cluster-profile - projected: - sources: - - secret: - name: cluster-secrets-aws - - configMap: - name: cluster-profile-aws - trigger: ((?m)^/test( all| e2e-aws-smoke),?(\s+|$)) - agent: kubernetes always_run: true branches: diff --git a/ci-operator/templates/cluster-launch-installer-e2e-smoke.yaml b/ci-operator/templates/cluster-launch-installer-e2e-smoke.yaml deleted file mode 100644 index 57032f700b13..000000000000 --- a/ci-operator/templates/cluster-launch-installer-e2e-smoke.yaml +++ /dev/null @@ -1,342 +0,0 @@ -kind: Template -apiVersion: template.openshift.io/v1 - -parameters: -- name: JOB_NAME_SAFE - required: true -- name: JOB_NAME_HASH - required: true -- name: NAMESPACE - required: true -- name: IMAGE_CLI - required: true -- name: IMAGE_FORMAT - required: true -- name: IMAGE_INSTALLER - required: true -- name: IMAGE_TESTS - required: true -- name: LOCAL_IMAGE_INSTALLER_SMOKE - required: true -- name: CLUSTER_TYPE - required: true -# Ensures the release image is created and tested -- name: RELEASE_IMAGE_LATEST - -objects: - -# We want the cluster to be able to access these images -- kind: RoleBinding - apiVersion: authorization.openshift.io/v1 - metadata: - name: ${JOB_NAME_SAFE}-image-puller - namespace: ${NAMESPACE} - roleRef: - name: system:image-puller - subjects: - - kind: SystemGroup - name: system:unauthenticated - -# The e2e pod spins up a cluster, runs e2e tests, and then cleans up the cluster. -- kind: Pod - apiVersion: v1 - metadata: - name: ${JOB_NAME_SAFE} - namespace: ${NAMESPACE} - annotations: - # we want to gather the teardown logs no matter what - ci-operator.openshift.io/wait-for-container-artifacts: teardown - spec: - restartPolicy: Never - activeDeadlineSeconds: 10800 - terminationGracePeriodSeconds: 900 - volumes: - - name: artifacts - emptyDir: {} - - name: shared-tmp - emptyDir: {} - - name: cluster-profile - secret: - secretName: ${JOB_NAME_SAFE}-cluster-profile - - initContainers: - - name: cli - image: ${IMAGE_CLI} - volumeMounts: - - name: shared-tmp - mountPath: /tmp/shared - command: - - cp - - /usr/bin/oc - - /tmp/shared/oc - - - name: smoke-test - image: ${LOCAL_IMAGE_INSTALLER_SMOKE} - volumeMounts: - - name: shared-tmp - mountPath: /tmp/shared - command: - - cp - - /usr/bin/smoke - - /tmp/shared/smoke - - containers: - - # Once admin.kubeconfig exists, executes shared tests - - name: test - image: ${LOCAL_IMAGE_INSTALLER_SMOKE} - resources: - requests: - cpu: 1 - memory: 300Mi - limits: - cpu: 3 - memory: 2Gi - volumeMounts: - - name: shared-tmp - mountPath: /tmp/shared - - name: cluster-profile - mountPath: /tmp/cluster - - name: artifacts - mountPath: /tmp/artifacts - env: - - name: HOME - value: /tmp/home - - name: USER - value: bazel - - name: KUBECONFIG - value: /tmp/admin.kubeconfig - command: - - /bin/bash - - -c - - | - #!/bin/bash - set -euo pipefail - - trap 'touch /tmp/shared/exit' EXIT - trap 'kill $(jobs -p); exit 0' TERM - - mkdir -p "${HOME}" - - # wait until the setup job creates admin.kubeconfig - while true; do - if [[ -f /tmp/shared/exit ]]; then - echo "Another process exited" 2>&1 - exit 1 - fi - if [[ ! -f /tmp/shared/admin.kubeconfig ]]; then - sleep 15 & wait - continue - fi - break - done - echo "Found shared kubeconfig" - - # don't let clients impact the global kubeconfig - cp /tmp/shared/admin.kubeconfig /tmp/admin.kubeconfig - - PATH=/usr/libexec/origin:$PATH - - # set up cloud provider specific env vars - if [[ "${CLUSTER_TYPE}" == "gcp" ]]; then - export GOOGLE_APPLICATION_CREDENTIALS="/tmp/cluster/gce.json" - export KUBE_SSH_USER=cloud-user - mkdir -p ~/.ssh - cp /tmp/cluster/ssh-privatekey ~/.ssh/google_compute_engine || true - export PROVIDER_ARGS='-provider=gce -gce-zone=us-east1-c -gce-project=openshift-gce-devel-ci' - elif [[ "${CLUSTER_TYPE}" == "aws" ]]; then - region="$( python -c 'import sys, json; print json.load(sys.stdin)["tectonic_aws_region"]' /tmp/cluster/inputs.yaml - ) - mkdir /tmp/artifacts/installer - cp /tmp/cluster/inputs.yaml /tmp/artifacts/installer/ - - echo "Invoking installer ..." - - cd /tmp/cluster - tectonic init --config=inputs.yaml - mv -f ${NAME}/* /tmp/cluster/ - cp config.yaml internal.yaml /tmp/artifacts/installer/ - - tectonic install --dir=. --log-level=debug - cp terraform.tfvars /tmp/artifacts/installer/ - - # wait until oc shows up - while true; do - if [[ -f /tmp/exit ]]; then - echo "Interrupted" - cp $KUBECONFIG /tmp/admin.kubeconfig - exit 1 - fi - if [[ ! -f /tmp/oc ]]; then - echo "Waiting for oc binary to show up ..." - sleep 15 & wait - continue - fi - if ! /tmp/oc get nodes 2>/dev/null; then - echo "Waiting for API at $(/tmp/oc whoami --show-server) to respond ..." - sleep 15 & wait - continue - fi - # check multiple namespaces while we are transitioning to the new locations - if /tmp/oc get deploy/router -n tectonic-ingress 2>/dev/null; then - router_namespace=tectonic-ingress - elif /tmp/oc get deploy/router -n openshift-ingress 2>/dev/null; then - router_namespace=openshift-ingress - elif /tmp/oc get deploy/router -n default 2>/dev/null; then - router_namespace=default - else - echo "Waiting for router to be created ..." - sleep 15 & wait - continue - fi - break - done - if ! /tmp/oc wait deploy/router -n "${router_namespace}" --for condition=available --timeout=20m; then - echo "Installation failed" - cp $KUBECONFIG /tmp/admin.kubeconfig - exit 1 - fi - cp $KUBECONFIG /tmp/admin.kubeconfig - echo "Installation successful" - echo "Starting installer smoke tests..." - export SMOKE_KUBECONFIG=${KUBECONFIG} - export SMOKE_MANIFEST_PATHS=/tmp/cluster/generated/manifests - # 3 masters/3 workers/1 bootstrap - export SMOKE_NODE_COUNT=7 - if ! /tmp/smoke -cluster -test.v; then - echo "Smoke tests failed" - exit 1 - fi - echo "Smoke tests passed" - - # Performs cleanup of all created resources - - name: teardown - image: ${LOCAL_IMAGE_INSTALLER_SMOKE} - volumeMounts: - - name: shared-tmp - mountPath: /tmp/shared - - name: cluster-profile - mountPath: /etc/openshift-installer - - name: artifacts - mountPath: /tmp/artifacts - env: - - name: INSTANCE_PREFIX - value: ${NAMESPACE}-${JOB_NAME_HASH} - - name: TYPE - value: ${CLUSTER_TYPE} - - name: KUBECONFIG - value: /tmp/shared/admin.kubeconfig - command: - - /bin/bash - - -c - - | - #!/bin/bash - function teardown() { - set +e - touch /tmp/shared/exit - export PATH=$PATH:/tmp/shared - - echo "Gathering artifacts ..." - mkdir -p /tmp/artifacts/pods /tmp/artifacts/nodes /tmp/artifacts/metrics - - oc --request-timeout=5s get nodes -o jsonpath --template '{range .items[*]}{.metadata.name}{"\n"}{end}' > /tmp/nodes - oc --request-timeout=5s get pods --all-namespaces --template '{{ range .items }}{{ $name := .metadata.name }}{{ $ns := .metadata.namespace }}{{ range .spec.containers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ range .spec.initContainers }}-n {{ $ns }} {{ $name }} -c {{ .name }}{{ "\n" }}{{ end }}{{ end }}' > /tmp/containers - oc --request-timeout=5s get nodes -o json > /tmp/artifacts/nodes.json - oc --request-timeout=5s get events --all-namespaces -o json > /tmp/artifacts/events.json - oc --request-timeout=5s get pods -l openshift.io/component=api --all-namespaces --template '{{ range .items }}-n {{ .metadata.namespace }} {{ .metadata.name }}{{ "\n" }}{{ end }}' > /tmp/pods-api - - # gather nodes first in parallel since they may contain the most relevant debugging info - while IFS= read -r i; do - mkdir -p /tmp/artifacts/nodes/$i - ( - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/logs/messages | gzip -c > /tmp/artifacts/nodes/$i/messages.gz - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/logs/journal | sed -e 's|.*href="\(.*\)".*|\1|;t;d' > /tmp/journals - while IFS= read -r j; do - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/logs/journal/${j}system.journal | gzip -c > /tmp/artifacts/nodes/$i/journal.gz - done < /tmp/journals - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/metrics | gzip -c > /tmp/artifacts/metrics/node-$i.gz - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/debug/pprof/heap > /tmp/artifacts/nodes/$i/heap - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/logs/secure | gzip -c > /tmp/artifacts/nodes/$i/secure.gz - oc get --request-timeout=20s --raw /api/v1/nodes/$i/proxy/logs/audit | gzip -c > /tmp/artifacts/nodes/$i/audit.gz - ) & - done < /tmp/nodes - - while IFS= read -r i; do - file="$( echo "$i" | cut -d ' ' -f 3 | tr -s ' ' '_' )" - oc exec $i -- /bin/bash -c 'oc get --raw /debug/pprof/heap --server "https://$( hostname ):8443" --config /etc/origin/master/admin.kubeconfig' > /tmp/artifacts/metrics/${file}-heap - oc exec $i -- /bin/bash -c 'oc get --raw /metrics --server "https://$( hostname ):8443" --config /etc/origin/master/admin.kubeconfig' | gzip -c > /tmp/artifacts/metrics/${file}-api.gz - oc exec $i -- /bin/bash -c 'oc get --raw /debug/pprof/heap --server "https://$( hostname ):8444" --config /etc/origin/master/admin.kubeconfig' > /tmp/artifacts/metrics/${file}-controllers-heap - oc exec $i -- /bin/bash -c 'oc get --raw /metrics --server "https://$( hostname ):8444" --config /etc/origin/master/admin.kubeconfig' | gzip -c > /tmp/artifacts/metrics/${file}-controllers.gz - done < /tmp/pods-api - - while IFS= read -r i; do - file="$( echo "$i" | cut -d ' ' -f 2,3,5 | tr -s ' ' '_' )" - oc logs --request-timeout=20s $i | gzip -c > /tmp/artifacts/pods/${file}.log.gz - oc logs --request-timeout=20s -p $i | gzip -c > /tmp/artifacts/pods/${file}_previous.log.gz - done < /tmp/containers - - echo "Waiting for node logs to finish ..." - wait - - echo "Deprovisioning cluster ..." - export AWS_SHARED_CREDENTIALS_FILE=/etc/openshift-installer/.awscred - cd /tmp/shared/cluster - tectonic destroy --dir=. --log-level=debug --continue-on-error - } - - trap 'teardown' EXIT - trap 'kill $(jobs -p); exit 0' TERM - - for i in `seq 1 120`; do - if [[ -f /tmp/shared/exit ]]; then - exit 0 - fi - sleep 60 & wait - done diff --git a/ci-operator/templates/cluster-launch-installer-e2e.yaml b/ci-operator/templates/cluster-launch-installer-e2e.yaml index 95be6dc6c7f9..898b69f090c7 100644 --- a/ci-operator/templates/cluster-launch-installer-e2e.yaml +++ b/ci-operator/templates/cluster-launch-installer-e2e.yaml @@ -10,7 +10,7 @@ parameters: required: true - name: IMAGE_FORMAT required: true -- name: IMAGE_INSTALLER +- name: LOCAL_IMAGE_INSTALLER required: true - name: IMAGE_TESTS required: true @@ -64,7 +64,7 @@ objects: containers: - # Once admin.kubeconfig exists, executes shared tests + # Once the cluster is up, executes shared tests - name: test image: ${IMAGE_TESTS} resources: @@ -85,7 +85,7 @@ objects: - name: HOME value: /tmp/home - name: KUBECONFIG - value: /tmp/admin.kubeconfig + value: /tmp/artifacts/installer/auth/kubeconfig command: - /bin/bash - -c @@ -96,30 +96,61 @@ objects: trap 'touch /tmp/shared/exit' EXIT trap 'kill $(jobs -p); exit 0' TERM - cp "$(which oc)" /tmp/shared/ - mkdir -p "${HOME}" - # wait until the setup job creates admin.kubeconfig + # wait for the router namespace + FOUND_KUBECONFIG= + API_UP= + ROUTER_NAMESPACE= while true; do if [[ -f /tmp/shared/exit ]]; then echo "Another process exited" 2>&1 exit 1 fi - if [[ ! -f /tmp/shared/admin.kubeconfig ]]; then + if [[ ! -f "${KUBECONFIG}" ]]; then + sleep 15 & wait + continue + elif [[ -z "${FOUND_KUBECONFIG}" ]]; then + echo "Found shared kubeconfig" + FOUND_KUBECONFIG=1 + + # don't let clients impact the global kubeconfig + cp "${KUBECONFIG}" /tmp/admin.kubeconfig + export KUBECONFIG=/tmp/admin.kubeconfig + fi + if ! oc get nodes 2>/dev/null; then + echo "Waiting for API at $(oc whoami --show-server) to respond ..." sleep 15 & wait continue + elif [[ -z "${API_UP}" ]]; then + echo "API at $(oc whoami --show-server) has responded" + API_UP=1 + fi + if [[ -z "${ROUTER_NAMESPACE}" ]]; then + # check multiple namespaces while we are transitioning to the new locations + if oc get deploy/router -n tectonic-ingress 2>/dev/null; then + ROUTER_NAMESPACE=tectonic-ingress + elif oc get deploy/router -n openshift-ingress 2>/dev/null; then + ROUTER_NAMESPACE=openshift-ingress + elif oc get deploy/router -n default 2>/dev/null; then + ROUTER_NAMESPACE=default + else + echo "Waiting for router to be created ..." + sleep 15 & wait + continue + fi + echo "Found router in ${ROUTER_NAMESPACE}" fi break done - echo "Found shared kubeconfig" - # don't let clients impact the global kubeconfig - cp /tmp/shared/admin.kubeconfig /tmp/admin.kubeconfig - - PATH=/usr/libexec/origin:$PATH + if ! oc wait deploy/router -n "${ROUTER_NAMESPACE}" --for condition=available --timeout=20m; then + echo "Installation failed" + cp $KUBECONFIG /tmp/admin.kubeconfig + exit 1 + fi - # set up cloud provider specific env vars + # set up cloud-provider-specific env vars if [[ "${CLUSTER_TYPE}" == "gcp" ]]; then export GOOGLE_APPLICATION_CREDENTIALS="/tmp/cluster/gce.json" export KUBE_SSH_USER=cloud-user @@ -127,8 +158,7 @@ objects: cp /tmp/cluster/ssh-privatekey ~/.ssh/google_compute_engine || true export PROVIDER_ARGS='-provider=gce -gce-zone=us-east1-c -gce-project=openshift-gce-devel-ci' elif [[ "${CLUSTER_TYPE}" == "aws" ]]; then - region="$( python -c 'import sys, json; print json.load(sys.stdin)["tectonic_aws_region"]' /tmp/cluster/inputs.yaml - ) - mkdir /tmp/artifacts/installer - cp /tmp/cluster/inputs.yaml /tmp/artifacts/installer/ - - echo "Invoking installer ..." - - cd /tmp/cluster - tectonic init --config=inputs.yaml - mv -f ${NAME}/* /tmp/cluster/ - cp config.yaml internal.yaml /tmp/artifacts/installer/ - - tectonic install --dir=. --log-level=debug - cp terraform.tfvars /tmp/artifacts/installer/ - - # wait until oc shows up - while true; do - if [[ -f /tmp/exit ]]; then - echo "Interrupted" - cp $KUBECONFIG /tmp/admin.kubeconfig - exit 1 - fi - if [[ ! -f /tmp/oc ]]; then - echo "Waiting for oc binary to show up ..." - sleep 15 & wait - continue - fi - if ! /tmp/oc get nodes 2>/dev/null; then - echo "Waiting for API at $(/tmp/oc whoami --show-server) to respond ..." - sleep 15 & wait - continue - fi - # check multiple namespaces while we are transitioning to the new locations - if /tmp/oc get deploy/router -n tectonic-ingress 2>/dev/null; then - router_namespace=tectonic-ingress - elif /tmp/oc get deploy/router -n openshift-ingress 2>/dev/null; then - router_namespace=openshift-ingress - elif /tmp/oc get deploy/router -n default 2>/dev/null; then - router_namespace=default - else - echo "Waiting for router to be created ..." - sleep 15 & wait - continue - fi - break - done - if ! /tmp/oc wait deploy/router -n "${router_namespace}" --for condition=available --timeout=20m; then - echo "Installation failed" - cp $KUBECONFIG /tmp/admin.kubeconfig - exit 1 - fi - cp $KUBECONFIG /tmp/admin.kubeconfig - echo "Installation successful" + - /bin/openshift-install + - --dir=/tmp/artifacts/installer + - --log-level=debug + - cluster # Performs cleanup of all created resources - name: teardown - image: ${IMAGE_INSTALLER} + image: ${LOCAL_IMAGE_INSTALLER} volumeMounts: - name: shared-tmp mountPath: /tmp/shared @@ -271,7 +247,7 @@ objects: - name: TYPE value: ${CLUSTER_TYPE} - name: KUBECONFIG - value: /tmp/shared/admin.kubeconfig + value: /tmp/artifacts/installer/auth/kubeconfig command: - /bin/bash - -c @@ -326,8 +302,7 @@ objects: echo "Deprovisioning cluster ..." export AWS_SHARED_CREDENTIALS_FILE=/etc/openshift-installer/.awscred - cd /tmp/shared/cluster - tectonic destroy --dir=. --log-level=debug --continue-on-error + openshift-install --dir /tmp/artifacts/installer --log-level debug destroy-cluster } trap 'teardown' EXIT diff --git a/cluster/test-deploy/aws/.gitignore b/cluster/test-deploy/aws/.gitignore deleted file mode 100644 index 7af82b26cace..000000000000 --- a/cluster/test-deploy/aws/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -* -!.type -!.gitignore -!OWNERS -!vars*.yaml -!openshift.yaml -!bootstrap-script.sh diff --git a/cluster/test-deploy/aws/.type b/cluster/test-deploy/aws/.type deleted file mode 100644 index 0eb8bf97e53e..000000000000 --- a/cluster/test-deploy/aws/.type +++ /dev/null @@ -1 +0,0 @@ -aws \ No newline at end of file diff --git a/cluster/test-deploy/aws/OWNERS b/cluster/test-deploy/aws/OWNERS deleted file mode 100644 index cf5509f59662..000000000000 --- a/cluster/test-deploy/aws/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md - -approvers: - - installer-approvers -reviewers: - - installer-reviewers diff --git a/cluster/test-deploy/aws/openshift.yaml b/cluster/test-deploy/aws/openshift.yaml deleted file mode 100644 index 04a3017b621c..000000000000 --- a/cluster/test-deploy/aws/openshift.yaml +++ /dev/null @@ -1,259 +0,0 @@ -admin: - email: "${EMAIL}" - password: "${PASSWORD}" - sshKey: "${SSH_KEY}" -aws: - # (optional) Unique name under which the Amazon S3 bucket will be created. Bucket name must start with a lower case name and is limited to 63 characters. - # The Tectonic Installer uses the bucket to store tectonic assets and kubeconfig. - # If name is not provided the installer will construct the name using "name", current AWS region and "baseDomain" - # assetsS3BucketName: - - # (optional) AMI override for all nodes. Example: `ami-foobar123`. - ${EC2_AMI_OVERRIDE} - - external: - # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # masterSubnetIDs: - - # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. - # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. - # If set, no additional private zone will be created. - # - # Example: `"Z1ILINNUJGTAO1"` - # privateZone: - - # (optional) ID of an existing VPC to launch nodes into. - # If unset a new VPC is created. - # - # Example: `vpc-123456` - # vpcID: - - # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # workerSubnetIDs: - - # (optional) Extra AWS tags to be applied to created resources. - # - # Example: `{ "key" = "value", "foo" = "bar" }` - extraTags: {"expirationDate": "${EXPIRATION_DATE}"} - - # (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster. - # The name is also the full role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # installerRole: - - master: - # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. - # - # Example: - # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` - # customSubnets: - - # Instance size for the master node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for master nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of master nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - rootVolume: - # The amount of provisioned IOPS for the root block device of master nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of master nodes. - size: 30 - - # The type of volume for the root block device of master nodes. - type: gp2 - - # (optional) If set to true, create private-facing ingress resources (ELB, A-records). - # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. - # privateEndpoints: true - - # (optional) This declares the AWS credentials profile to use. - # profile: default - - # (optional) If set to true, create public-facing ingress resources (ELB, A-records). - # If set to false, no public-facing ingress resources will be created. - publicEndpoints: true - - # The target AWS region for the cluster. - region: us-east-1 - - # Block of IP addresses used by the VPC. - # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. - vpcCIDRBlock: 10.0.0.0/16 - - worker: - # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. - # - # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` - # customSubnets: - - # Instance size for the worker node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for worker nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of worker nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - # (optional) List of ELBs to attach all worker instances to. - # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. - # - # Example: - # * `["ingress-nginx"]` - # loadBalancers: - - rootVolume: - # The amount of provisioned IOPS for the root block device of worker nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of worker nodes. - size: 30 - - # The type of volume for the root block device of worker nodes. - type: gp2 - -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openstack.dev.coreos.systems`. -# -# Note: This field MUST be set manually prior to creating the cluster. -# This applies only to cloud platforms. -# -# [Azure-specific NOTE] -# To use Azure-provided DNS, `BaseDomain` should be set to `""` -# If using DNS records, ensure that `BaseDomain` is set to a properly configured external DNS zone. -# Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns -baseDomain: origin-ci-int-aws.dev.rhcloud.com - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -containerLinux: - # (optional) The Container Linux update channel. - # - # Examples: `stable`, `beta`, `alpha` - # channel: stable - - # The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel. - # - # Examples: `latest`, `1465.6.0` - version: latest - - # (optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on master and worker nodes. - # customCAPEMList: - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -# The path to the tectonic licence file. -# You can download the Tectonic license file from your Account overview page at [1]. -# -# [1] https://account.coreos.com/overview -licensePath: license - -master: - # The name of the node pool(s) to use for master nodes - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: ${NAME} - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: canal - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 3 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 3 - name: worker - -# The platform used for deploying. -platform: aws - -# The path the pull secret file in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecretPath: pull-secret - -worker: - # The name of the node pool(s) to use for workers - nodePools: - - worker \ No newline at end of file