Skip to content

Commit

Permalink
cmd/openshift-install/upi: Add a user-provided-infrastructure subcommand
Browse files Browse the repository at this point in the history
To support the workflow described in upiLong.

I've also replaced openshiftcorp.com with example.com to comply with
[1].  Who knows, maybe openshiftcorp.com will exist someday ;).

[1]: https://tools.ietf.org/html/rfc6761#section-6.5
  • Loading branch information
wking committed Mar 12, 2019
1 parent 12af0c9 commit 45fbb29
Show file tree
Hide file tree
Showing 5 changed files with 129 additions and 25 deletions.
45 changes: 25 additions & 20 deletions cmd/openshift-install/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,25 +104,18 @@ var (
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
}

err = destroyBootstrap(ctx, config, rootOpts.dir)
err = waitForBootstrapComplete(ctx, config, rootOpts.dir)
if err != nil {
logrus.Fatal(err)
}

if err := waitForInitializedCluster(ctx, config); err != nil {
logrus.Fatal(err)
}

consoleURL, err := waitForConsole(ctx, config, rootOpts.dir)
logrus.Info("Destroying the bootstrap resources...")
err = destroybootstrap.Destroy(rootOpts.dir)
if err != nil {
logrus.Fatal(err)
}

if err = addRouterCAToClusterCA(config, rootOpts.dir); err != nil {
logrus.Fatal(err)
}

err = logComplete(rootOpts.dir, consoleURL)
err = finish(ctx, config, rootOpts.dir)
if err != nil {
logrus.Fatal(err)
}
Expand Down Expand Up @@ -244,7 +237,7 @@ func addRouterCAToClusterCA(config *rest.Config, directory string) (err error) {

// FIXME: pulling the kubeconfig and metadata out of the root
// directory is a bit cludgy when we already have them in memory.
func destroyBootstrap(ctx context.Context, config *rest.Config, directory string) (err error) {
func waitForBootstrapComplete(ctx context.Context, config *rest.Config, directory string) (err error) {
client, err := kubernetes.NewForConfig(config)
if err != nil {
return errors.Wrap(err, "creating a Kubernetes client")
Expand All @@ -253,7 +246,7 @@ func destroyBootstrap(ctx context.Context, config *rest.Config, directory string
discovery := client.Discovery()

apiTimeout := 30 * time.Minute
logrus.Infof("Waiting up to %v for the Kubernetes API...", apiTimeout)
logrus.Infof("Waiting up to %v for the Kubernetes API at %s...", apiTimeout, config.Host)
apiContext, cancel := context.WithTimeout(ctx, apiTimeout)
defer cancel()
// Poll quickly so we notice changes, but only log when the response
Expand Down Expand Up @@ -288,12 +281,7 @@ func destroyBootstrap(ctx context.Context, config *rest.Config, directory string

eventTimeout := 30 * time.Minute
logrus.Infof("Waiting up to %v for the bootstrap-complete event...", eventTimeout)
if err := waitForEvent(ctx, client.CoreV1().RESTClient(), "bootstrap-complete", eventTimeout); err != nil {
return err
}

logrus.Info("Destroying the bootstrap resources...")
return destroybootstrap.Destroy(rootOpts.dir)
return waitForEvent(ctx, client.CoreV1().RESTClient(), "bootstrap-complete", eventTimeout)
}

// waitForEvent watches the events in the kube-system namespace, waits
Expand Down Expand Up @@ -331,7 +319,7 @@ func waitForEvent(ctx context.Context, client cache.Getter, name string, timeout
// that the cluster has been initialized.
func waitForInitializedCluster(ctx context.Context, config *rest.Config) error {
timeout := 30 * time.Minute
logrus.Infof("Waiting up to %v for the cluster to initialize...", timeout)
logrus.Infof("Waiting up to %v for the cluster at %s to initialize...", timeout, config.Host)
cc, err := configclient.NewForConfig(config)
if err != nil {
return errors.Wrap(err, "failed to create a config client")
Expand Down Expand Up @@ -454,3 +442,20 @@ func logComplete(directory, consoleURL string) error {
logrus.Infof("Login to the console with user: kubeadmin, password: %s", pw)
return nil
}

func finish(ctx context.Context, config *rest.Config, directory string) error {
if err := waitForInitializedCluster(ctx, config); err != nil {
return err
}

consoleURL, err := waitForConsole(ctx, config, rootOpts.dir)
if err != nil {
return err
}

if err = addRouterCAToClusterCA(config, rootOpts.dir); err != nil {
return err
}

return logComplete(rootOpts.dir, consoleURL)
}
1 change: 1 addition & 0 deletions cmd/openshift-install/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ func installerMain() {
for _, subCmd := range []*cobra.Command{
newCreateCmd(),
newDestroyCmd(),
newUPICmd(),
newVersionCmd(),
newGraphCmd(),
newCompletionCmd(),
Expand Down
98 changes: 98 additions & 0 deletions cmd/openshift-install/upi.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
package main

import (
"context"
"path/filepath"

"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
)

var (
upiLong = `Entry-points for user-provided infrastructure.
Most users will want to use 'create cluster' to have the installer
create the required infrastructure for their cluster. But in some
installations the infrastructure needs to be adapted in ways that
installer-created infrastructure does not support. This command
provides entry points to support the following workflow:
1. Call 'create ignition-configs' to create the bootstrap Ignition
config and admin kubeconfig.
2. Creates all required cluster resources, after which the cluster
will being bootstrapping.
3. Call 'user-provided-infrastructure bootstrap-complete' to wait
until the bootstrap phase has completed.
4. Destroy the bootstrap resources.
5. Call 'user-provided-infrastructure finish' to wait until the
cluster finishes deploying its initial version. This also
retrieves the router certificate authority from the cluster and
inserts it into the admin kubeconfig.`
)

func newUPICmd() *cobra.Command {
cmd := &cobra.Command{
Use: "user-provided-infrastructure",
Aliases: []string{"upi"},
Short: "Entry-points for user-provided infrastructure",
Long: upiLong,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
cmd.AddCommand(newUPIBootstrapCompleteCmd())
cmd.AddCommand(newUPIFinishCmd())
return cmd
}

func newUPIBootstrapCompleteCmd() *cobra.Command {
return &cobra.Command{
Use: "bootstrap-complete",
Short: "Wait until cluster bootstrapping has completed",
Args: cobra.ExactArgs(0),
Run: func(_ *cobra.Command, _ []string) {
ctx := context.Background()

cleanup := setupFileHook(rootOpts.dir)
defer cleanup()

config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
if err != nil {
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
}

err = waitForBootstrapComplete(ctx, config, rootOpts.dir)
if err != nil {
logrus.Fatal(err)
}

logrus.Info("It is now safe to remove the bootstrap resources")
},
}
}

func newUPIFinishCmd() *cobra.Command {
return &cobra.Command{
Use: "finish",
Short: "Wait for the cluster to finish updating and update local resources",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()

cleanup := setupFileHook(rootOpts.dir)
defer cleanup()

config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig"))
if err != nil {
logrus.Fatal(errors.Wrap(err, "loading kubeconfig"))
}

err = finish(ctx, config, rootOpts.dir)
if err != nil {
logrus.Fatal(err)
}
},
}
}
6 changes: 3 additions & 3 deletions docs/user/aws/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Step 3: Download the Installer.
? SSH Public Key /home/user_id/.ssh/id_rsa.pub
? Platform aws
? Region us-east-1
? Base Domain openshiftcorp.com
? Base Domain example.com
? Cluster Name test
? Pull Secret [? for help]
```
Expand All @@ -21,15 +21,15 @@ Step 3: Download the Installer.

```console
[~]$ openshift-install-linux-amd64 create cluster
INFO Waiting up to 30m0s for the Kubernetes API...
INFO Waiting up to 30m0s for the Kubernetes API at api.test.example.com...
INFO API v1.11.0+85a0623 up
INFO Waiting up to 30m0s for the bootstrap-complete event...
INFO Destroying the bootstrap resources...
INFO Waiting up to 10m0s for the openshift-console route to be created...
INFO Install complete!
INFO Run 'export KUBECONFIG=/home/user/auth/kubeconfig' to manage the cluster with 'oc', the OpenShift CLI.
INFO The cluster is ready when 'oc login -u kubeadmin -p XXXX' succeeds (wait a few minutes).
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.test.openshiftcorp.com
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.test.example.com
INFO Login to the console with user: kubeadmin, password: XXXX
```

Expand Down
4 changes: 2 additions & 2 deletions docs/user/customization.md
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ For example:
INFO Consuming "Master Machines" from target directory
INFO Consuming "Common Manifests" from target directory
INFO Creating cluster...
INFO Waiting up to 30m0s for the Kubernetes API...
INFO Waiting up to 30m0s for the Kubernetes API at api.test-cluster.example.com...
...
```

Expand All @@ -184,7 +184,7 @@ For example:
02-master-kubelet 2.2.0 137m
01-worker-container-runtime 3.11.0-744-g5b05d9d3-dirty 2.2.0 137m
01-worker-kubelet 3.11.0-744-g5b05d9d3-dirty 2.2.0 137m
99-master-3c81ffa3-3b8d-11e9-ac1e-52fdfc072182-registries 3.11.0-744-g5b05d9d3-dirty 133m
99-master-3c81ffa3-3b8d-11e9-ac1e-52fdfc072182-registries 3.11.0-744-g5b05d9d3-dirty docs/user/customization.md 133m
99-worker-3c83a226-3b8d-11e9-ac1e-52fdfc072182-registries 3.11.0-744-g5b05d9d3-dirty 133m
master-55491738d7cd1ad6c72891e77c35e024 3.11.0-744-g5b05d9d3-dirty 2.2.0 137m
worker-edab0895c59dba7a566f4b955d87d964 3.11.0-744-g5b05d9d3-dirty 2.2.0 137m
Expand Down

0 comments on commit 45fbb29

Please sign in to comment.