diff --git a/pkg/buildinfo/version.go b/pkg/buildinfo/version.go index e050ad150..f9bc6c1d8 100644 --- a/pkg/buildinfo/version.go +++ b/pkg/buildinfo/version.go @@ -20,13 +20,13 @@ limitations under the License. package buildinfo // Version is the current version of Sonobuoy, set by the go linker's -X flag at build time -var Version = "v0.17.2" +var Version = "v0.18.0" // GitSHA is the actual commit that is being built, set by the go linker's -X flag at build time. var GitSHA string // MinimumKubeVersion is the lowest API version of Kubernetes this release of Sonobuoy supports. -var MinimumKubeVersion = "1.15.0" +var MinimumKubeVersion = "1.16.0" // MaximumKubeVersion is the highest API version of Kubernetes this release of Sonobuoy supports. -var MaximumKubeVersion = "1.17.99" +var MaximumKubeVersion = "1.18.99" diff --git a/pkg/image/manifest.go b/pkg/image/manifest.go index 1032349bd..8ef119064 100644 --- a/pkg/image/manifest.go +++ b/pkg/image/manifest.go @@ -35,6 +35,7 @@ const ( googleContainerRegistry = "gcr.io/google-containers" invalidRegistry = "invalid.com/invalid" privateRegistry = "gcr.io/k8s-authenticated-test" + promoterE2eRegistry = "us.gcr.io/k8s-artifacts-prod/e2e-test-images" quayIncubator = "quay.io/kubernetes_incubator" quayK8sCSI = "quay.io/k8scsi" sampleRegistry = "gcr.io/google-samples" @@ -52,6 +53,7 @@ type RegistryList struct { GoogleContainerRegistry string `yaml:"googleContainerRegistry,omitempty"` InvalidRegistry string `yaml:"invalidRegistry,omitempty"` PrivateRegistry string `yaml:"privateRegistry,omitempty"` + PromoterE2eRegistry string `yaml:"promoterE2eRegistry"` QuayIncubator string `yaml:"quayIncubator,omitempty"` QuayK8sCSI string `yaml:"quayK8sCSI,omitempty"` SampleRegistry string `yaml:"sampleRegistry,omitempty"` @@ -125,6 +127,8 @@ func (r *RegistryList) getImageConfigs() (map[string]Config, error) { return r.v1_16(), nil case 17: return r.v1_17(), nil + case 18: + return r.v1_18(), nil } } return map[string]Config{}, fmt.Errorf("No matching configuration for k8s version: %v", r.K8sVersion) @@ -239,6 +243,23 @@ func GetDefaultImageRegistries(version string) (*RegistryList, error) { // GcAuthenticatedRegistry: gcAuthenticatedRegistry, // PrivateRegistry: privateRegistry, }, nil + case 18: + return &RegistryList{ + E2eRegistry: e2eRegistry, + DockerLibraryRegistry: dockerLibraryRegistry, + GcRegistry: gcRegistry, + GoogleContainerRegistry: googleContainerRegistry, + DockerGluster: dockerGluster, + QuayIncubator: quayIncubator, + PromoterE2eRegistry: promoterE2eRegistry, + + // The following keys are used in the v1.18 registry list however their images + // cannot be pulled as they are used as part of tests for checking image pull + // behavior. They are omitted from the resulting config. + // InvalidRegistry: invalidRegistry, + // GcAuthenticatedRegistry: gcAuthenticatedRegistry, + // PrivateRegistry: privateRegistry, + }, nil } } return nil, fmt.Errorf("No matching configuration for k8s version: %v", v) diff --git a/pkg/image/v1.18.go b/pkg/image/v1.18.go new file mode 100644 index 000000000..6c1650655 --- /dev/null +++ b/pkg/image/v1.18.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: This is manually replicated from: https://github.com/kubernetes/kubernetes/blob/v1.18.0/test/utils/image/manifest.go#L208-L248 + +package image + +func (r *RegistryList) v1_18() map[string]Config { + + e2eRegistry := r.E2eRegistry + dockerLibraryRegistry := r.DockerLibraryRegistry + gcRegistry := r.GcRegistry + gcAuthenticatedRegistry := r.GcAuthenticatedRegistry + googleContainerRegistry := r.GoogleContainerRegistry + invalidRegistry := r.InvalidRegistry + privateRegistry := r.PrivateRegistry + dockerGluster := r.DockerGluster + quayIncubator := r.QuayIncubator + promoterE2eRegistry := r.PromoterE2eRegistry + + configs := map[string]Config{} + configs["Agnhost"] = Config{promoterE2eRegistry, "agnhost", "2.12"} + configs["AgnhostPrivate"] = Config{privateRegistry, "agnhost", "2.6"} + configs["AuthenticatedAlpine"] = Config{gcAuthenticatedRegistry, "alpine", "3.7"} + configs["AuthenticatedWindowsNanoServer"] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"} + configs["APIServer"] = Config{e2eRegistry, "sample-apiserver", "1.17"} + configs["AppArmorLoader"] = Config{e2eRegistry, "apparmor-loader", "1.0"} + configs["BusyBox"] = Config{dockerLibraryRegistry, "busybox", "1.29"} + configs["CheckMetadataConcealment"] = Config{e2eRegistry, "metadata-concealment", "1.2"} + configs["CudaVectorAdd"] = Config{e2eRegistry, "cuda-vector-add", "1.0"} + configs["CudaVectorAdd2"] = Config{e2eRegistry, "cuda-vector-add", "2.0"} + configs["EchoServer"] = Config{e2eRegistry, "echoserver", "2.2"} + configs["Etcd"] = Config{gcRegistry, "etcd", "3.4.3"} + configs["GlusterDynamicProvisioner"] = Config{dockerGluster, "glusterdynamic-provisioner", "v1.0"} + configs["Httpd"] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"} + configs["HttpdNew"] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"} + configs["InvalidRegistryImage"] = Config{invalidRegistry, "alpine", "3.1"} + configs["IpcUtils"] = Config{e2eRegistry, "ipc-utils", "1.0"} + configs["JessieDnsutils"] = Config{e2eRegistry, "jessie-dnsutils", "1.0"} + configs["Kitten"] = Config{e2eRegistry, "kitten", "1.0"} + configs["Mounttest"] = Config{e2eRegistry, "mounttest", "1.0"} + configs["MounttestUser"] = Config{e2eRegistry, "mounttest-user", "1.0"} + configs["Nautilus"] = Config{e2eRegistry, "nautilus", "1.0"} + configs["NFSProvisioner"] = Config{quayIncubator, "nfs-provisioner", "v2.2.2"} + configs["Nginx"] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"} + configs["NginxNew"] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"} + configs["Nonewprivs"] = Config{e2eRegistry, "nonewprivs", "1.0"} + configs["NonRoot"] = Config{e2eRegistry, "nonroot", "1.0"} + // Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go + configs["Pause"] = Config{gcRegistry, "pause", "3.2"} + configs["Perl"] = Config{dockerLibraryRegistry, "perl", "5.26"} + configs["PrometheusDummyExporter"] = Config{gcRegistry, "prometheus-dummy-exporter", "v0.1.0"} + configs["PrometheusToSd"] = Config{gcRegistry, "prometheus-to-sd", "v0.5.0"} + configs["Redis"] = Config{dockerLibraryRegistry, "redis", "5.0.5-alpine"} + configs["RegressionIssue74839"] = Config{e2eRegistry, "regression-issue-74839-amd64", "1.0"} + configs["ResourceConsumer"] = Config{e2eRegistry, "resource-consumer", "1.5"} + configs["SdDummyExporter"] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"} + configs["StartupScript"] = Config{googleContainerRegistry, "startup-script", "v1"} + configs["VolumeNFSServer"] = Config{e2eRegistry, "volume/nfs", "1.0"} + configs["VolumeISCSIServer"] = Config{e2eRegistry, "volume/iscsi", "2.0"} + configs["VolumeGlusterServer"] = Config{e2eRegistry, "volume/gluster", "1.0"} + configs["VolumeRBDServer"] = Config{e2eRegistry, "volume/rbd", "1.0.1"} + return configs +} diff --git a/site/_config.yml b/site/_config.yml index dbd86e41d..9c3795991 100644 --- a/site/_config.yml +++ b/site/_config.yml @@ -52,6 +52,12 @@ defaults: version: master gh: https://github.com/vmware-tanzu/sonobuoy/tree/master layout: "docs" + - scope: + path: docs/v0.18.0 + values: + version: v0.18.0 + gh: https://github.com/vmware-tanzu/sonobuoy/tree/v0.18.0 + layout: "docs" - scope: path: docs/v0.17.2 values: @@ -179,9 +185,10 @@ collections: - plugin-list versioning: true -latest: v0.17.2 +latest: v0.18.0 versions: - master +- v0.18.0 - v0.17.2 - v0.17.1 - v0.17.0 diff --git a/site/_data/toc-mapping.yml b/site/_data/toc-mapping.yml index 273f7f2aa..8679d0ed6 100644 --- a/site/_data/toc-mapping.yml +++ b/site/_data/toc-mapping.yml @@ -5,6 +5,7 @@ # Below is a commented out example of what this may look like: master: master-toc +v0.18.0: v0-18-0toc v0.17.2: v0-17-2toc v0.17.1: v0-17-1toc v0.17.0: v0-17-0toc diff --git a/site/_data/v0-18-0toc.yml b/site/_data/v0-18-0toc.yml new file mode 100644 index 000000000..373ba0b93 --- /dev/null +++ b/site/_data/v0-18-0toc.yml @@ -0,0 +1,32 @@ +toc: + - title: Basics + subfolderitems: + - page: Overview + url: /index.html + - page: Checking Results + url: /results + - title: Plugins + subfolderitems: + - page: Overview + url: /plugins + - page: E2E & Conformance + url: /e2eplugin + - page: Examples + url: /examples + github: true + - title: Advanced + subfolderitems: + - page: Detailed result contents + url: /snapshot + - page: Configuration Options + url: /sonobuoy-config + - page: Custom Registries & Airgap Testing + url: /airgap + - page: Using Private Images + url: /pullsecrets + - page: Advanced Customization + url: /gen + - title: Resources + subfolderitems: + - page: Frequently Asked Questions + url: /faq diff --git a/site/docs/v0.18.0/README.md b/site/docs/v0.18.0/README.md new file mode 100644 index 000000000..cdaf70d97 --- /dev/null +++ b/site/docs/v0.18.0/README.md @@ -0,0 +1,173 @@ +# Sonobuoy logo [![CircleCI](https://circleci.com/gh/vmware-tanzu/sonobuoy.svg?style=svg)](https://circleci.com/gh/vmware-tanzu/sonobuoy) + +## [Overview][oview] + +Sonobuoy is a diagnostic tool that makes it easier to understand the +state of a Kubernetes cluster by running a set of plugins (including [Kubernetes][k8s] conformance +tests) in an accessible and non-destructive manner. It is a customizable, +extendable, and cluster-agnostic way to generate clear, informative reports +about your cluster. + +Its selective data dumps of Kubernetes resource objects and cluster nodes allow +for the following use cases: + +* Integrated end-to-end (e2e) [conformance-testing][e2ePlugin] +* Workload debugging +* Custom data collection via extensible plugins + +Sonobuoy supports 3 Kubernetes minor versions: the current release and 2 minor versions before. Sonobuoy is currently versioned to track the Kubernetes minor version to clarify the support matrix. For example, Sonobuoy v0.14.x would support Kubernetes 1.14.x, 1.13.x, and 1.12.x. + +> Note: You can skip this version enforcement by running Sonobuoy with the `--skip-preflight` flag. + +## Prerequisites + +* Access to an up-and-running Kubernetes cluster. If you do not have a cluster, + we recommend following the [AWS Quickstart for Kubernetes][quickstart] instructions. + +* An admin `kubeconfig` file, and the KUBECONFIG environment variable set. + +* For some advanced workflows it may be required to have `kubectl` installed. See [installing via Homebrew (MacOS)][brew] or [building + the binary (Linux)][linux]. + +* The `sonobuoy images` subcommand requires [Docker](https://www.docker.com) to be installed. See [installing Docker](docker). + +## Installation + +1. Download the [latest release][releases] for your client platform. +2. Extract the tarball: + + ``` + tar -xvf .tar.gz + ``` + + Move the extracted `sonobuoy` executable to somewhere on your `PATH`. + +## Getting Started + +To launch conformance tests (ensuring [CNCF][cncf] conformance) and wait until they are finished run: + +```bash +sonobuoy run --wait +``` + +> Note: Using `--mode quick` will significantly shorten the runtime of Sonobuoy. It runs just a single test, helping to quickly validate your Sonobuoy and Kubernetes configuration. + +Get the results from the plugins (e.g. e2e test results): + +```bash +results=$(sonobuoy retrieve) +``` + +Inspect results for test failures. This will list the number of tests failed and their names: + +```bash +sonobuoy results $results +``` + +> Note: The `results` command has lots of useful options for various situations. See the [results page][results] for more details. + +You can also extract the entire contents of the file to get much more [detailed data][snapshot] about your cluster. + +Sonobuoy creates a few resources in order to run and expects to run within its +own namespace. + +Deleting Sonobuoy entails removing its namespace as well as a few cluster +scoped resources. + +```bash +sonobuoy delete --wait +``` + +> Note: The --wait option ensures the Kubernetes namespace is deleted, avoiding conflicts if another Sonobuoy run is started quickly. + +### Other Tests + +By default, `sonobuoy run` runs the Kubernetes conformance tests but this can easily be configured. The same plugin that has the conformance tests has all the Kubernetes end-to-end tests which include other tests such as: + +* tests for specific storage features +* performance tests +* scaling tests +* provider specific tests +* and many more + +To modify which tests you want to run, checkout our page on the [e2e plugin][e2ePlugin]. + +If you want to run other tests or tools which are not a part of the Kubernetes end-to-end suite, refer to our documentation on [custom plugins][customPlugins]. + +### Monitoring Sonobuoy during a run + +You can check on the status of each of the plugins running with: + +```bash +sonobuoy status +``` + +You can also inspect the logs of all Sonobuoy containers: + +```bash +sonobuoy logs +``` + +## Troubleshooting + +If you encounter any problems that the documentation does not address, [file an +issue][issue]. + +## Known Issues + +### Leaked End-to-end namespaces + +There are some Kubernetes e2e tests that may leak resources. Sonobuoy can +help clean those up as well by deleting all namespaces prefixed with `e2e`: + +```bash +sonobuoy delete --all +``` + +### Run on Google Cloud Platform (GCP) + +Sonobuoy requires admin permissions which won't be automatic if you are running via Google Kubernetes Engine (GKE) cluster. You must first create an admin role for the user under which you run Sonobuoy: + +```bash +kubectl create clusterrolebinding --clusterrole=cluster-admin --user= +``` + +## Contributing + +Thanks for taking the time to join our community and start contributing! We +welcome pull requests. Feel free to dig through the [issues][issue] and jump in. + +### Before you start + +* Please familiarize yourself with the [Code of Conduct][coc] before + contributing. +* See [CONTRIBUTING.md][contrib] for instructions on the developer certificate + of origin that we require. +* There is a [Slack channel][slack] if you want to + interact with other members of the community + +## Changelog + +See [the list of releases][releases] to find out about feature changes. + +[airgap]: airgap +[brew]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos +[cncf]: https://github.com/cncf/k8s-conformance#certified-kubernetes +[coc]: https://github.com/vmware-tanzu/sonobuoy/blob/master/CODE_OF_CONDUCT.md +[contrib]: https://github.com/vmware-tanzu/sonobuoy/blob/master/CONTRIBUTING.md +[docker]: https://docs.docker.com/install +[docs]: https://sonobuoy.io/docs/v0.18.0 +[e2ePlugin]: e2eplugin +[customPlugins]: plugins +[gen]: gen +[issue]: https://github.com/vmware-tanzu/sonobuoy/issues +[k8s]: https://github.com/kubernetes/kubernetes +[linux]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1 +[oview]: https://youtu.be/k-P4hXdruRs?t=9m27s +[plugins]: plugins +[quickstart]: https://aws.amazon.com/quickstart/architecture/vmware-kubernetes/ +[releases]: https://github.com/vmware-tanzu/sonobuoy/releases +[results]: results +[slack]: https://kubernetes.slack.com/messages/sonobuoy +[snapshot]:snapshot +[sonobuoyconfig]: sonobuoy-config diff --git a/site/docs/v0.18.0/airgap.md b/site/docs/v0.18.0/airgap.md new file mode 100644 index 000000000..b1ea749ba --- /dev/null +++ b/site/docs/v0.18.0/airgap.md @@ -0,0 +1,125 @@ +# Custom registries and air-gapped testing + +In air-gapped deployments where there is no access to the public Docker registries Sonobuoy supports running the end-to-end tests with custom registries. +This enables you to test your air-gapped deployment once you've loaded the necessary images into a registry that is reachable by your cluster. + +You will need to make the Sonobuoy image available as well as the images for any plugins you wish to run. +Below, you will find the details of how to use the Sonobuoy image, as well as the images for the `e2e` and `systemd-logs` plugins in this kind of deployment. + +## Sonobuoy Image +To run any Sonobuoy plugin in an air-gapped deployment, you must ensure that the Sonobuoy image is available in a registry that is reachable by your cluster. +You will need to pull, tag, and then push the image as follows: + +``` +PRIVATE_REG= +SONOBUOY_VERSION= + +docker pull sonobuoy/sonobuoy:$SONOBUOY_VERSION +docker tag sonobuoy/sonobuoy:$SONOBUOY_VERSION $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +docker push $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +``` + +By default, Sonobuoy will attempt to use the image available in the public registry. +To use the image in your own registry, you will need to override it when using the `gen` or `run` command with the `--sonobuoy-image` flag as follows: + +``` +sonobuoy run --sonobuoy-image $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +``` + +## E2E Plugin + +To use the `e2e` plugin, the conformance test image and the images the tests use must be available in your registry. + +### Conformance Image +The process for making the conformance image available in your registry is the same as the Sonobuoy image. +You need to pull, tag, and then push the image. +To ensure you use the correct version of the conformance image, check your server version using `kubectl version`. + + +``` +PRIVATE_REG= +CLUSTER_VERSION= + +docker pull gcr.io/google-containers/conformance:$CLUSTER_VERSION +docker tag gcr.io/google-containers/conformance:$CLUSTER_VERSION $PRIVATE_REG/conformance:$CLUSTER_VERSION +docker push $PRIVATE_REG/conformance:$CLUSTER_VERSION +``` + +To use the conformance image in your registry, you will need to override the default when using the `gen` or `run` commands with the `--kube-conformance-image` flag as follows: + +``` +sonobuoy run --kube-conformance-image $PRIVATE_REG/conformance:$CLUSTER_VERSION +``` + +### Test Images + +The end-to-end tests use a number of different images across multiple registries. +When running the `e2e` plugin, you must provide a mapping that details which custom registries should be used instead of the public registries. + +This mapping is a YAML file which maps the registry category to the corresponding registry URL. +The keys in this file are specified in the Kubernetes test framework. +The tests for each minor version of Kubernetes use a different set of registries so the mapping you create will depend on which Kubernetes version you are testing against. + +To create this mapping, you can use the `gen default-image-config` command to provide the mapping with the default registry values for your cluster version. +The following is an example of using this command with a v1.16 cluster: + +``` +$ sonobuoy gen default-image-config +dockerLibraryRegistry: docker.io/library +e2eRegistry: gcr.io/kubernetes-e2e-test-images +gcRegistry: k8s.gcr.io +googleContainerRegistry: gcr.io/google-containers +sampleRegistry: gcr.io/google-samples +``` + +You can save this output to a file and modify it to specify your own registries instead. +You can modify all of the registry values or just a subset. +If you specify only a subset, the defaults will be used instead. + +Sonobuoy provides the command `images` to help you easily pull the test images and push them to your own custom registries. +First, you must pull the images to your local machine using the following command: + +``` +sonobuoy images pull +``` + +> **NOTE:** Some versions of Kubernetes reference images that do not exist or cannot be pulled without authentication. +> You may see these errors when running the above command. This is expected behaviour. +> These images are referenced by some end-to-end tests, but **not** by the conformance tests. + +To push the images, you must provide the mapping using the `--e2e-repo-config` flag as follows: + +``` +sonobuoy images push --e2e-repo-config +``` + +Sonobuoy will read the mapping config and will push the images to the repositories defined in that mapping. + +When running the `e2e` plugin, you will need to provide this file using the same flag as follows: + +``` +sonobuoy run --e2e-repo-config +``` + +## systemd-logs plugin + +If you want to run the `systemd-logs` plugin you will again need to pull, tag, and push the image. + + +``` +PRIVATE_REG= + +docker pull gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest +docker tag gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +docker push $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +``` + +To use the image in your own registry, you will need to override the default when using the `gen` or `run` commands with the `--systemd-logs-image` flag as follows: + +``` +sonobuoy run --systemd-logs-image $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +``` + +If you do not wish to run this plugin, you can remove it from the list of [plugins][plugins] to be run within the manifest, or you can explicitly specify which plugin you with to run with the `--plugin` flag. + +[plugins]: plugins.md#choosing-which-plugins-to-run diff --git a/site/docs/v0.18.0/e2eplugin.md b/site/docs/v0.18.0/e2eplugin.md new file mode 100644 index 000000000..d074cd2c6 --- /dev/null +++ b/site/docs/v0.18.0/e2eplugin.md @@ -0,0 +1,74 @@ +# The Kubernetes End-To-End Testing Plugin + +The Kubernetes end-to-end testing plugin (the e2e plugin) is used to run tests which are maintained by the upstream Kubernetes community in the [kubernetes/kubernetes][kubernetesRepo] repo. + +There are numerous ways to run this plugin in order to meet your testing needs. + +## Choosing Which Tests To Run + +The most common point of customization is changing the set of tests to run. This is controlled by two environment variables the test image recognizes: + +* E2E_FOCUS +* E2E_SKIP + +Each of these is a regular expression describing which tests to run or skip. The "E2E_FOCUS" value is applied first and the "E2E_SKIP" value then further restricts that list. These can be set using Sonobuoy flags: + +``` +sonobuoy run \ + --e2e-focus= \ + --e2e-skip= +``` + +> Note: These flags are just special cases of the more general flag `--plugin-env`. For instance, you could set the env vars by using the flag `--plugin-env e2e.E2E_SKIP=` + +# Built-In Configurations + +There are a few commonly run configurations which Sonobuoy hard-codes for convenience: + +* non-disruptive-conformance + +This is the default mode and will run all the tests in the `e2e` plugin which are marked `Conformance` which are known to not be disruptive to other workloads in your cluster. This mode is ideal for checking that an existing cluster continues to behave is conformant manner. + +> NOTE: The length of time it takes to run conformance can vary based on the size of your cluster---the timeout can be adjusted in the Server.timeoutseconds field of the Sonobuoy `config.json` or on the CLI via the `--timeout` flag. + +* quick + +This mode will run a single test from the `e2e` test suite which is known to be simple and fast. Use this mode as a quick check that the cluster is responding and reachable. + +* certified-conformance + +This mode runs all of the `Conformance` tests and is the mode used when applying for the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance). Some of these tests may be disruptive to other workloads so it is not recommended that you run this mode on production clusters. In those situations, use the default "non-disruptive-conformance" mode. + +> NOTE: The length of time it takes to run conformance can vary based on the size of your cluster---the timeout can be adjusted in the Server.timeoutseconds field of the Sonobuoy `config.json` or on the CLI via the `--timeout` flag. + +## Dry Run + +When specifying your own focus/skip values, it may be useful to set the run to operate in dry run mode: + +``` +sonobuoy run \ + --plugin-env e2e.E2E_FOCUS=pods \ + --plugin-env e2e.E2E_DRYRUN=true +``` + +By setting `E2E_DRYRUN`, the run will execute and produce results like normal except that the actual test code won't execute, just the test selection. Each test that _would have been run_ will be reported as passing. This can help you fine-tune your focus/skip values to target just the tests you want without wasting hours on test runs which target unnecessary tests. + +## Why Conformance Matters + +With such a [wide array][configs] of Kubernetes distributions available, *conformance tests* help ensure that a Kubernetes cluster meets the minimal set of features. They are a subset of end-to-end (e2e) tests that should pass on any Kubernetes cluster. + +A conformance-passing cluster provides the following guarantees: + +* **Best practices**: Your Kubernetes is properly configured. This is useful to know whether you are running a distribution out of the box or handling your own custom setup. + +* **Predictability**: All your cluster behavior is well-documented. Available features in the official Kubernetes documentation can be taken as a given. Unexpected bugs should be rare, because distribution-specific issues are weeded out during the conformance tests. + +* **Interoperability**: Workloads from other conforming clusters can be ported into your cluster, or vice versa. This standardization of Kubernetes is a key advantage of open source software, and allows you to avoid vendor lock-in. + +Individual Kubernetes distributions may offer additional features beyond conformance testing, but if you change distributions, these features can't be expected to be provided. + +See the [official documentation][conformanceDocs] for Kubernetes's existing conformance tests. + +[configs]: https://docs.google.com/spreadsheets/d/1LxSqBzjOxfGx3cmtZ4EbB_BGCxT_wlxW_xgHVVa23es/edit#gid=0 +[conformanceDocs]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md#conformance-tests +[kubernetesRepo]: https://github.com/kubernetes/kubernetes/tree/master/cluster/images/conformance \ No newline at end of file diff --git a/site/docs/v0.18.0/faq.md b/site/docs/v0.18.0/faq.md new file mode 100644 index 000000000..7c39c06ea --- /dev/null +++ b/site/docs/v0.18.0/faq.md @@ -0,0 +1,172 @@ +# Frequently Asked Questions + +## Kubernetes Conformance and end-to-end testing +### Why were so many tests skipped? +When running the `e2e` plugin on Sonobuoy, you will notice that a large number of tests are skipped by default. +The reason for this is that the image used by Sonobuoy to run the Kubernetes conformance tests contains all the end-to-end tests for Kubernetes. +However, only a subset of those tests are required to check conformance. +For example, the v1.16 Kubernetes test image contains over 4000 tests however only 215 of those are conformance tests. + +The default mode for the e2e plugin (`non-disruptive-conformance`) will run all tests which contain the tag `[Conformance]` and exclude those that with the `[Disruptive]` tag. +This is to help prevent you from accidentally running tests which may disrupt workloads on your cluster. +To run all the conformance tests, use the `certified-conformance` mode. + +Please refer to our [documentation for the `e2e` plugin][e2ePlugin] for more details of the built-in configurations. + +### How do I determine why my tests failed? +Before debugging test failures, we recommend isolating any failures to verify that they are genuine and are not spurious or transient. +Unfortunately, such failures can be common in complex, distributed systems. +To do this, you can make use of the `--e2e-focus` flag when using the `run` command. +This flag accepts a regex which will be used to find and run only the tests matching that regex. +For example, you can provide the name of a test to run only that test: + +``` +sonobuoy run --e2e-focus "should update pod when spec was updated and update strategy is RollingUpdate" +``` + +If the test continues to fail and it appears to be a genuine failure, the next step would be to read the logs to understand why the test failed. +To read the logs for a test failure, you can find the log file within the results tarball from Sonobuoy (`plugins/e2e/results/global/e2e.log`) or you can use the `results` command to show details of test failures. +For example, the following commands retrieve the results tarball and then use [jq][jq] to return an object for each test failure with the failure message and the associated stdout. + +``` +outfile=$(sonobuoy retrieve) && \ + sonobuoy results --mode detailed --plugin e2e $outfile | jq '. | select(.status == "failed") | .details' +``` + +Carefully read the test logs to see if anything stands out which could be the cause of the failure. +For example: Were there difficulties when contacting a particular service? Are there any commonalities in the failed tests due to a particular feature? +Often, the test logs will provide enough detail to allow you to determine why a test failed. + +If you need more information, Sonobuoy also queries the cluster upon completion of plugins. +The details collected allow you to see the state of the cluster and whether there were any issues. +For example: Did any of the nodes have memory pressure? Did the scheduler pod go down? + +As a final resort, you can also read the upstream test code to determine what actions were being performed at the point when the test failed. +If you decide to take this approach, you must ensure that you are reading the version of the test code that corresponds to your test image. +You can verify which version of the test image was used by inspecting the plugin definition which is available in the results tarball in `plugins/e2e/definition.json` under the key `Definition.spec.image`. +For example, if the test image was `gcr.io/google-containers/conformance:v1.15.3`, you should read the code at the corresponding [v1.15.3 tag in GitHub][kubernetes-1.15.3]. +All the tests can be found within the `test/e2e` directory in the Kubernetes repository. + +### How can I run the E2E tests with certain test framework options set? What are the available options? +How you provide options to the E2E test framework and determining which options you can set depends on which version of Kubernetes you are testing. + +To view the available options that you can set when running the tests, you can run the test executable for the conformance image you will be using as follows: + +``` +KUBE_VERSION= +docker run -it gcr.io/google-containers/conformance:$KUBE_VERSION ./e2e.test --help +``` + +You can also view the definitions of these test framework flags in the [Kubernetes repository][framework-flags]. + +If you are running Kubernetes v1.16.0 or greater, a new feature was included in this release which makes it easier to specify your own options. +This new feature allows arbitrary options to be specified when the tests are invoked. +To use this, you must ensure the environment variable `E2E_USE_GO_RUNNER=true` is set. +This is the default behavior from Sonobuoy v0.16.1 in the CLI and only needs to be manually set if working with a Sonobuoy manifest generated by an earlier version. +If this is enabled, then you can provide your options with the flag `--plugin-env=e2e.E2E_EXTRA_ARGS`. +For example, the following allows you set provider specific flags for running on GCE: + +``` +sonobuoy run --plugin-env=e2e.E2E_USE_GO_RUNNER=true \ + --plugin-env=e2e.E2E_PROVIDER=gce \ + --plugin-env=e2e.E2E_EXTRA_ARGS="--gce-zone=foo --gce-region=bar" +``` + +Before this version, it was necessary to build your own custom image which could execute the tests with the desired options. + +For details on the two different approaches that you can take, please refer to [our blog post][custom-e2e-image] which describes in more detail how to use the new v1.16.0 Go test runner and how to build your own custom images. + + +### Some of the registries required for the tests are blocked with my test infrastructure. Can I still run the tests? +Yes! Sonobuoy can be configured to use custom registries so that you can run the tests in airgapped environments. + +For more information and details on how to configure your environment, please refer to [our documentation for custom registries and air-gapped environments][airgap]. + +### We have some nodes with custom taints in our cluster and the tests won't start. How can I run the tests? +Although Sonobuoy plugins can be adapted to use [custom Kubernetes PodSpecs][custom-podspecs] where tolerations for custom taints can be specified, these settings do not apply to workloads started by the Kubernetes end-to-end testing framework as part of running the `e2e` plugin. + +The end-to-end test framework checks the status of the cluster before beginning to run the tests. +One of the checks that it runs, is checking that all of the nodes are schedulable and ready to accept workloads. +This check deems any nodes with a taint other than the master node taint (`node-role.kubernetes.io/master`) to be unschedulable. +This means that any node with a different taint will not be considered ready for testing and will block the tests from starting. + +With the release of Kubernetes v1.17.0, you will be able to whitelist node taints so that any node with a whitelisted taint will be deemed schedulable as part of the pre-test checks. +This will ensure that these nodes will not block the tests from starting. +If you are running Kubernetes v1.17.0 or greater, you will be able to specify the taints to whitelist using the flag `--non-blocking-taints` which takes a comma-separated list of taints. +To find out how to set this flag via Sonobuoy, please refer to our previous answer on how to set test framework options. + +This solution does not enable workloads created by the tests to run on these nodes. +This is still an [open issue in Kubernetes][support-custom-taints]. +The workloads created by the end-to-end tests will continue to run only on untainted nodes. + +For all versions of Kubernetes prior to v1.17.0, there are two approaches that you may be able to take to allow the tests to run. + +The first is adjusting the number of nodes the test framework allows to be "not-ready". +By default, the test framework will wait for all nodes to be ready. +However, if only a subset of your nodes are tainted and the rest are otherwise suitable for accepting test workloads, you could provide the test framework flag `--allowed-not-ready-nodes` specifying the number of tainted nodes you have. +By setting this, the test framework will allow for your tainted nodes to be in a "not-ready" state. +This does not guarantee that your tests will start however as a node in your cluster may not be ready for another reason. +Also, this approach will only work if there are untainted nodes as some will still need to be available for the tests to run on. + +The only other approach is to untaint the nodes for the purposes of testing. + +### What tests can I run? How can I figure out what tests/tags I can select? +The `e2e` plugin has a number of preconfigured modes for running tests, with the default mode running all conformance tests which are non-disruptive. +It is possible to [configure the plugin][e2ePlugin] to provide a specific set of E2E tests to run instead. + +Which tests you can run depends on the version of Kubernetes you are testing as the list of tests changes with each release. + +A list of the conformance tests is maintained in the [Kubernetes repository][kubernetes-conformance]. +Within the GitHub UI, you can change the branch to the tag that matches your Kubernetes version to see all the tests for that version. +This list provides each test name as well where you can find the test in the repository. +You can include these test names in the `E2E_FOCUS` or `E2E_SKIP` environment variables when [running the plugin][e2ePlugin]. + +Although the default behavior is to run the Conformance tests, you can run any of the other Kubernetes E2E tests with Sonobuoy. +These are not required for checking that your cluster is conformant and we only recommend running these if there is specific behavior you wish to check. + +There are a large number of E2E tests available (over 4000 as of v1.16.0). +Many of these tests have "tags" which show that they belong to a specific group, or have a particular trait. +There isn't a definitive list of these tags, however below are some of the most commonly seen tags: + +- Conformance +- NodeConformance +- Slow +- Serial +- Disruptive +- Flaky +- LinuxOnly +- Feature:* (there are numerous feature tags) + +There are also specific tags for tests that belong to a particular [Special Interest Group (SIG)][sig-list]. +The following SIG tags exist within the E2E tests: + +- [sig-api-machinery] +- [sig-apps] +- [sig-auth] +- [sig-autoscaling] +- [sig-cli] +- [sig-cloud-provider] +- [sig-cloud-provider-gcp] +- [sig-cluster-lifecycle] +- [sig-instrumentation] +- [sig-network] +- [sig-node] +- [sig-scheduling] +- [sig-service-catalog] +- [sig-storage] +- [sig-ui] +- [sig-windows] + + +[kubernetes-podspec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#podspec-v1-core +[custom-e2e-image]: https://sonobuoy.io/custom-e2e-image/ +[custom-podspecs]: https://sonobuoy.io/customizing-plugin-podspecs/ +[sig-list]: https://github.com/kubernetes/community/blob/master/sig-list.md +[jq]: https://stedolan.github.io/jq/ +[kubernetes-1.15.3]: https://github.com/kubernetes/kubernetes/tree/v1.15.3 +[kubernetes-conformance]: https://github.com/kubernetes/kubernetes/blob/master/test/conformance/testdata/conformance.txt +[airgap]: airgap.md +[e2ePlugin]: e2eplugin.md +[customPlugins]: plugins.md +[support-custom-taints]: https://github.com/kubernetes/kubernetes/issues/83329 +[framework-flags]: https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/test_context.go diff --git a/site/docs/v0.18.0/gen.md b/site/docs/v0.18.0/gen.md new file mode 100644 index 000000000..74de0858c --- /dev/null +++ b/site/docs/v0.18.0/gen.md @@ -0,0 +1,24 @@ +# Customization + +Sonobuoy provides many flags to customize your run but sometimes you have a special use case that isn't supported yet. For these cases, Sonobuoy provides `sonobuoy gen`. + +The command `sonobuoy gen` will print the YAML for your run to stdout instead of actually creating it. It accepts all of the relevant flags for customizing the run just like `sonobuoy run` would. You can then edit it yourself and apply it as if Sonobuoy had run it. + +Output the YAML Sonobuoy would create to a file: + +``` +sonobuoy gen --e2e-focus="sig-networking" --e2e-skip="Alpha" > sonobuoy.yaml +``` + +Then manually modify it as necessary. Maybe you need special options for plugins or want your own sidecar to be running with the images. + +Finally, create the resources yourself via kubectl. + +``` +kubectl apply -f sonobuoy.yaml +``` + +> Note: If you find that you need this flow to accomplish your work, talk to us about it in our [Slack][slack] channel or file an [issue][issue] in Github. Others may have the same need and we'd love to help support you. + +[slack]: https://kubernetes.slack.com/messages/sonobuoy +[issue]: https://github.com/vmware-tanzu/sonobuoy/issues \ No newline at end of file diff --git a/site/docs/v0.18.0/img/README.md b/site/docs/v0.18.0/img/README.md new file mode 100644 index 000000000..f44575760 --- /dev/null +++ b/site/docs/v0.18.0/img/README.md @@ -0,0 +1 @@ +Some of these diagrams (for instance sonobuoy-plugins.png), have been created on [draw.io](https://www.draw.io), using the "Include a copy of my diagram" option. If you want to make changes to these diagrams, try importing them into draw.io, and you should have access to the original shapes/text that went into the originals. diff --git a/site/docs/v0.18.0/img/plugin-contract.png b/site/docs/v0.18.0/img/plugin-contract.png new file mode 100755 index 000000000..d18ec66d2 Binary files /dev/null and b/site/docs/v0.18.0/img/plugin-contract.png differ diff --git a/site/docs/v0.18.0/img/scanner.png b/site/docs/v0.18.0/img/scanner.png new file mode 100644 index 000000000..e3c5ebd6d Binary files /dev/null and b/site/docs/v0.18.0/img/scanner.png differ diff --git a/site/docs/v0.18.0/img/snapshot-00-overview.png b/site/docs/v0.18.0/img/snapshot-00-overview.png new file mode 100644 index 000000000..379331dfb Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-00-overview.png differ diff --git a/site/docs/v0.18.0/img/snapshot-10-resources.png b/site/docs/v0.18.0/img/snapshot-10-resources.png new file mode 100644 index 000000000..c3c4ec24f Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-10-resources.png differ diff --git a/site/docs/v0.18.0/img/snapshot-20-hosts.png b/site/docs/v0.18.0/img/snapshot-20-hosts.png new file mode 100644 index 000000000..166f2a2a9 Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-20-hosts.png differ diff --git a/site/docs/v0.18.0/img/snapshot-30-podlogs.png b/site/docs/v0.18.0/img/snapshot-30-podlogs.png new file mode 100644 index 000000000..9de295723 Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-30-podlogs.png differ diff --git a/site/docs/v0.18.0/img/snapshot-40-plugins.png b/site/docs/v0.18.0/img/snapshot-40-plugins.png new file mode 100644 index 000000000..dd1b59169 Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-40-plugins.png differ diff --git a/site/docs/v0.18.0/img/snapshot-50-meta.png b/site/docs/v0.18.0/img/snapshot-50-meta.png new file mode 100644 index 000000000..efbb9be92 Binary files /dev/null and b/site/docs/v0.18.0/img/snapshot-50-meta.png differ diff --git a/site/docs/v0.18.0/img/sonobuoy-logo.png b/site/docs/v0.18.0/img/sonobuoy-logo.png new file mode 100644 index 000000000..edd5379b6 Binary files /dev/null and b/site/docs/v0.18.0/img/sonobuoy-logo.png differ diff --git a/site/docs/v0.18.0/img/sonobuoy-plugins.png b/site/docs/v0.18.0/img/sonobuoy-plugins.png new file mode 100644 index 000000000..0fcea8160 Binary files /dev/null and b/site/docs/v0.18.0/img/sonobuoy-plugins.png differ diff --git a/site/docs/v0.18.0/plugins.md b/site/docs/v0.18.0/plugins.md new file mode 100644 index 000000000..302380344 --- /dev/null +++ b/site/docs/v0.18.0/plugins.md @@ -0,0 +1,171 @@ +# Sonobuoy Plugins + +## Overview + +The main function of Sonobuoy is running plugins; each plugin may run tests or gather data in the cluster. + +When you first run Sonobuoy, an aggregator pod is created in the cluster which reads the configuration you've chosen and launches each plugin. + +The aggregator then waits for each plugin to report results back to it. If the plugin fails to launch correctly or does not report results within the timeout period, an error is recorded. + +## Plugin Types + +There are two types of plugins: + +* Job plugins + +Job plugins are plugins which only need to run once. The Sonobuoy aggregator will create a single pod for this type of plugin. The Kubernetes E2E plugin is a job-type plugin. + +* Daemonset plugins + +Daemonset plugins are plugins which need to run on every node, even control-plane nodes. The systemd-logs gatherer is a daemonset-type plugin. + +## Built-in Plugins + +Two plugins are included in the Sonobuoy source code by default: + +* Kubernetes end-to-end tests (the e2e plugin) + +The upstream Kubernetes repo contains the code for this [image][conformance]. The test image includes all the pieces necessary to run the end-to-end tests (which includes, but is not limited to, the conformance tests). This is the most common plugin run by Sonobuoy and can be tweaked in numerous ways to run the set of tests that you need. See more details about how to use this plugin [here][e2ePlugin]. + +* systemd-logs gathering plugin + +Gathers the latest system logs from each node, using systemd's `journalctl` command. The image this plugin uses is built from the [heptio/sonobuoy-plugin-systemd-logs][systemd-repo] repo. + +## Specifying Which Plugins To Run + +By default both the `e2e` and `systemd-logs` plugin are run. If you set `--mode=quick` only the `e2e` plugin is run. + +Otherwise, you can specify the plugins to run (including custom plugins) by using the `--plugin` flag. This can accept the path to a plugin defintion file or the name of a built-in plugin. For example: + +``` +# Run just the e2e plugin +$ sonobuoy run --plugin e2e + +# Run your custom plugin and the systemd-logs gatherer +$ sonobuoy run --plugin customPlugin.yaml --plugin systemd-logs +``` + +> Note: All of the CLI options impact the generated YAML. If you would like to edit the YAML directly or see the impact your options have on the YAML, use `sonobuoy gen `. + +## How Plugins Work + +A plugin consists of two parts: + +* the core logic which runs tests/gathers data (typically a single container) +* a sidecar added by Sonobuoy which reports the data back to the aggregator + +After your container completes its work, it needs to signal to Sonobuoy that +it's done by writing out the name of the results file into a "done file". The default +value is `/tmp/results/done`, which you can configure with the `ResultsDir` value +in the Sonobuoy config. + +The Sonobuoy sidecar waits for the `done` file to be present, then transmits the indicated +file back to the aggregator. + +![sonobuoy plugins diagram][diagram] +[diagram]: img/plugin-contract.png + +### Writing your own plugin + +Use the `sonobuoy gen plugin` command to help generate the YAML for your plugin definition. Once you've saved that YAML locally, you can run your plugin via: + +``` +sonobuoy run --plugin myPlugin.yaml +``` + +For a thorough walkthrough of how to build a custom plugin from scratch, see our [blog post][customPluginsBlog] and our [example plugins][examplePlugins]. + +## Plugin Result Types + +When results get transmitted back to the aggregator, Sonobuoy inspects the results in order +to present results metadata to the end user such as the number of passed/failed tests or +the number of files gathered. + +This inspection process is informed by the YAML that described the plugin defintion. The +`result-type` field can be set to either `raw` or `junit`. + +When set to `junit`, Sonobuoy will look for XML files and process them as junit test results. + +When set to `raw`, Sonobuoy will simply inspect all the files and record the number of files generated. + +The data that Sonobuoy gathers during this step makes it possible for a user to do a few different tasks: + +* get high-level results without even downloading the results tarball via `sonobuoy status --json` +* get summary information about the run via `sonobuoy results $tarball` +* get detailed information about each test/file via `sonobuoy results $tarball --mode=detailed` + +To see more information about how Sonobuoy can process and present your results, see the [results][results] page and our previous [blog post][resultsBlog]. + +### Customizing PodSpec options + +By default, Sonobuoy will determine how to create and run the resources required for your plugin. +When creating your own plugins however, you may want additional control over how the plugin is run within your cluster. +To enable this, you can customize the [PodSpec][kubernetes-podspecs] used by Sonobuoy when creating the plugin's Pods or DaemonSets by supplying a `podSpec` object within your plugin defition. +The `podSpec` object corresponds directly to a Kubernetes [PodSpec][kubernetes-podspecs] so any fields that are available there can be set by your plugins. + +If a `podSpec` is provided, Sonobuoy will use it as is, only adding what is necessary for Sonobuoy to run your plugin (such as a Sonobuoy worker container). +Sonobuoy will only ever _add_ to your `podSpec` definition, it will not remove or override settings within it. +If you don't need to provide any additional settings, you can omit this object and Sonobuoy will use the defaults. + +#### Providing your own PodSpec +We recommend starting with the default `podSpec` used by Sonobuoy and then making any necessary modifications. +To view the default `podSpec`, you can use the flag `--show-default-podspec` with the `gen` and `gen plugin` commands. + +When creating a new plugin, you can include the default `podSpec` in the generated definition as follows: + +``` +sonobuoy gen plugin --show-default-podspec -n my-plugin -i my-plugin:latest +``` + +This will produce the following plugin definition: + +```yaml +podSpec: + containers: [] + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - key: CriticalAddonsOnly + operator: Exists +sonobuoy-config: + driver: Job + plugin-name: my-plugin + result-type: my-plugin +spec: + command: + - ./run.sh + image: my-plugin:latest + name: plugin + resources: {} + volumeMounts: + - mountPath: /tmp/results + name: results +``` + +You are then free to make modifications to the `podSpec` object as necessary. + +If you already have an existing plugin which you would like to customize, you can take the default `podSpec`, add it to your plugin definition and use it as the basis for customization. + +> **NOTE:** The default `podSpec` differs for Job and DaemonSet plugins. +To be sure you are using the appropriate defaults as your starting point, be sure to provide the `--type` flag when using `sonobuoy gen plugin`. + +You can also modify the `podSpec` from within a Sonobuoy manifest. +By providing the flag `--show-default-podspec` to `sonobuoy gen`, the default `podSpec` for each plugin will be included within the `sonobuoy-plugins-cm` ConfigMap in the manifest. + +> **NOTE:** Modifications to the `podSpec` are only persisted within that generated manifest. +If you generate a new manifest by running `sonobuoy gen` again, you will need to reapply any changes made. +We recommend adding your desired customizations to the plugin definition itself. + +[systemd-repo]: https://github.com/heptio/sonobuoy-plugin-systemd-logs +[e2e]: https://github.com/vmware-tanzu/sonobuoy/blob/master/examples/plugins.d/heptio-e2e.yaml +[conformance]: https://github.com/kubernetes/kubernetes/tree/master/cluster/images/conformance +[e2ePlugin]: e2eplugin.md +[kubernetes-podspecs]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#podspec-v1-core +[customPluginsBlog]: https://blogs.vmware.com/cloudnative/2019/04/30/sonobuoy-plugins-custom-testing/ +[examplePlugins]: https://github.com/vmware-tanzu/sonobuoy/tree/master/examples/plugins +[results]: results.md +[resultsBlog]: https://sonobuoy.io/simplified-results-reporting-with-sonobuoy/ \ No newline at end of file diff --git a/site/docs/v0.18.0/pullsecrets.md b/site/docs/v0.18.0/pullsecrets.md new file mode 100644 index 000000000..40b4b64da --- /dev/null +++ b/site/docs/v0.18.0/pullsecrets.md @@ -0,0 +1,42 @@ +# Using a Private Sonobuoy Image with ImagePullSecrets + +This document describes how to use the ImagePullSecrets option in order to run Sonobuoy using a private Sonobuoy image. + +## Setting ImagePullSecrets + +The name of the secret to use when pulling the image can be set easily in the configuration file passed to `sonobuoy run` or `sonobuoy gen`: + +``` +echo '{"ImagePullSecrets":"mysecret"}' > secretconfig.json +sonobuoy gen --config secretconfig.json +``` + +Doing this properly passes the value and places it into the YAML for the Sonobuoy aggregator pod and all the pods for each plugin. + +## Creating the Secret + +The main complication for this flow is that secrets can only be referenced from within their own namespace. As a result we need to create the secret at the same time we create the initial resources. + +Sonobuoy does not have built in support for this, but it can be manually achieved via the following process: + - Manually create the YAML for the secret + - Insert the YAML into the output from `sonobuoy gen --config secretconfig.json` + - Run with `kubectl apply -f ...` + +As an example of how to create the secret you can follow the instructions [here][dockersecret] in order to create a secret in the default namespace. + +Then use copy most of its YAML via: + +``` +kubectl get secret -o yaml > secret.json +``` + +Manually edit the file and remove/adjust the metadata as appropriate. The namespace should be adjusted to your desired Sonobuoy namespace (default: heptio-sonobuoy) and the following fields can be removed: + - annotations + - creationTimestamp + - resourceVersion + - selfLink + - uid + +Then just insert that YAML into the output from `sonobuoy gen` and run with `kubectl apply -f ...` + +[dockersecret]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ diff --git a/site/docs/v0.18.0/release.md b/site/docs/v0.18.0/release.md new file mode 100644 index 000000000..2b4d13516 --- /dev/null +++ b/site/docs/v0.18.0/release.md @@ -0,0 +1,142 @@ +# Release + +## Preparing a new release + +1. Update the version defined in the code to the new version number. + As of the time of writing, the version is defined in `pkg/buildinfo/version.go`. +1. Generate a new set of [versioned docs][gendocs] for this release. +1. If there an a Kubernetes release coming soon, do the following to ensure the upstream conformance script is +working appropriately: + * Build the kind images for this new version. + 1. Checkout K8s locally at the tag in question + 1. Run `make check-kind-env` to ensure the repo/tag are correct + 1. Run `make kind_images` + 1. Run `make push_kind_images` + * Update our CI build our kind cluster with the new image. +1. If the new release corresponds to a new Kubernetes release, the following steps must be performed: + * Add the new list of E2E test images. + For an example of the outcome of this process, see the [change corresponding to the Kubernetes v1.14 release](https://github.com/vmware-tanzu/sonobuoy/commit/68f15a260e60a288f91bc40347c817b382a3d45c). + 1. Within `pkg/image/`, copy the latest `v1.x.go` file to a file which corresponds to the new Kubernetes release number. + For example, if the new Sonobuoy release corresponds to Kubernetes `v1.15`, copy the `v1.14.go` file to `v1.15.go`. + ``` + cp pkg/image/v1.{14,15}.go + ``` + This file will contain a function to return the list of test images for this new release. + 1. Update the name of the function in the newly created file. + For example, if the file is for the v1.15 release, ensure the function name is `v1_15`. + 1. Replace the map of images within the previously mentioned function with the map of images for the new release. + To do this, copy the equivalent map entries for the release from the Kubernetes repository. + For an example, see the entries [from the v1.15.0 release](https://github.com/kubernetes/kubernetes/blob/v1.15.0/test/utils/image/manifest.go#L202-L252). + Within the new function, remove any entries in the `config` map and replace with those copied from the Kubernetes repository. + The entries from the Kubernetes repository use an `int` as the key in the map however in the Sonobuoy repository the keys are strings. + Convert the new key names to strings. + 1. To make use of these new images, update the `GetImageConfigs` function within `pkg/image/manifest.go`. + Add a new case to the minor version check which will be the minor version of the new Kubernetes release. + In this new case, call the newly created function (e.g. `r.v1_15()`). + * Add the new default image registry configuration. + Once the images for the release have been added, update the function `GetDefaultImageRegistries` within `pkg/image/manifest.go` to return the default image registries for the new version. + To do this, add a new case to the minor version check which will be the minor version of the new Kubernetes release. + Within this case, return a new `RegistryList` object which includes only the registry keys used within the registry config for that version. + Some registries are not applicable to include in this object as they are there to test specific image pull behavior such as pulling from a private or non-existent registry. + This object should only include registries that can be successfully pulled from. + The other registries are not used within the end-to-end tests. + For an example, see the addition [from the v1.17.0 release](https://github.com/vmware-tanzu/sonobuoy/commit/93f63ef51e135dccf22407a0cdbf22f6c4a2cd26#diff-655c3323e53de3dff85eadd7592ca218R173-R188). + * Update the minimum and maximum Kubernetes API versions that Sonobuoy supports. + Edit `pkg/buildinfo/version.go` and update the `MinimumKubeVersion` to be 2 minor version below the new Kubernetes release version and update the `MaximumKubeVersion` to support future point releases. + For example, for the Kubernetes 1.15.0 release, the `MinimumKubeVersion` would become `1.13.0` and the `MaximumKubeVersion` would become `1.15.99`. +1. Commit and open/merge a pull request with these changes. +1. Create an annotated tag for the commit once the changes are merged: + ``` + git tag -a v0.x.y -m "Release v0.x.y" + ``` + + > NOTE: Tag the new tip of master, not the branch you just merged. + +1. Push the tag to the [`github.com/vmware-tanzu/sonobuoy`](https://github.com/vmware-tanzu/sonobuoy/) repository. + * To ensure that the tag is pushed to the correct repository, check which remote corresponds to that repository using the following command: + ``` + git remote -v + ``` + The output of this command should include at least two configured remotes, typically `origin`, which refers to your personal fork, and `upstream` which refers to the upstream Sonobuoy repository. + For example: + ``` + origin git@github.com:/sonobuoy.git (fetch) + origin git@github.com:/sonobuoy.git (push) + upstream https://github.com/vmware-tanzu/sonobuoy (fetch) + upstream https://github.com/vmware-tanzu/sonobuoy (push) + ``` + For the following steps, use the remote configured for the `vmware-tanzu/sonobuoy` repository. + The following instructions will use `upstream`. + * Push the tag with the following command. + > NOTE: This will push all tags. + + ``` + git push upstream --tags + ``` + To push just one tag, use the following command format (replacing `v0.x.y` with the tag created in the previous step): + ``` + git push upstream refs/tags/v0.x.y + ``` + If there is a problem and you need to remove the tag, run the following commands: + ``` + git tag -d v0.x.y + git push upstream :refs/tags/v0.x.y + ``` + > NOTE: The `:` preceding the tag ref is necessary to delete the tag from the remote repository. + > Git refspecs have the format `<+>:`. + > By pushing an empty `src` to the remote `dst`, it makes the destination ref empty, effectively deleting it. + > For more details, see the [`git push` documentation](https://git-scm.com/docs/git-push) or [this concise explanation on Stack Overflow](https://stackoverflow.com/a/7303710). + + +## Validation +1. Open a browser tab and go to: https://circleci.com/gh/vmware-tanzu/sonobuoy and verify go releaser for tag v0.x.y completes successfully. +1. Upon successful completion of build job above, check the [releases tab of Sonobuoy](https://github.com/vmware-tanzu/sonobuoy/releases) and verify the artifacts and changelog were published correctly. +1. Run the following command to make sure the image was pushed correctly to [Docker Hub][dockerhub]: + ``` + docker run -it sonobuoy/sonobuoy:v0.x.y /sonobuoy version + ``` + The `Sonobuoy Version` in the output should match the release tag above. +1. Go to the [GitHub release page](https://github.com/vmware-tanzu/sonobuoy/releases) and download the release binaries and make sure the version matches the expected values. +2. Run a [Kind](https://github.com/kubernetes-sigs/kind) cluster locally and ensure that you can run `sonobuoy run --mode quick`. + If this release corresponds to a new Kubernetes release as well, ensure: + - you're testing with the new Kind images by checking the output from: + ``` + export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" + kubectl version --short + ``` + and verify that the server version matches the intended Kubernetes version. + - you can run `sonobuoy images` and get a list of test images as expected +2. Update the release notes if desired on GitHub by editing the newly created release. + +### Generating a new set of versioned docs +The changes for this can almost all be completed by running the command: +``` +./scripts/update_docs.sh v0.x.y +``` + +This will copy the current master docs into the version given and update +a few of the links in the README to be correct. It will also update +the website config to add the new version and consider it the newest +version of the docs. + +### Notes +1. Before releasing, ensure all parties are available to resolve any issues that come up. If not, just bump the release. + +[gendocs]: #generating-a-new-set-of-versioned-docs +[dockerhub]: https://cloud.docker.com/u/sonobuoy/repository/docker/sonobuoy/sonobuoy/tags + +2. If you are building a Windows release you must currently build/push the Windows image outside of CI and push the manifest to also include it. To do this you must: + + - Have built the Windows binaries (can be done on a Linux box and should be the default now) + - Have a Windows machine available for the build. The steps below will assume a `docker context` which is a Windows machine. + - (Recommended) Build the sample Windows plugin (in our examples directory) to test the image + - (Recommended) Have a cluster with Windows available for testing + +``` +docker context use default +make build/windows/amd64/sonobuoy.exe +docker context use 2019-box +make windows_containers +PUSH_WINDOWS=true make push + +``` \ No newline at end of file diff --git a/site/docs/v0.18.0/results.md b/site/docs/v0.18.0/results.md new file mode 100644 index 000000000..a5796ebd4 --- /dev/null +++ b/site/docs/v0.18.0/results.md @@ -0,0 +1,80 @@ +# Viewing Plugin Results + +The `sonobuoy results` command can be used to print the results of a plugin without first having to extract the files from the tarball. + +## Canonical Data Format + +Plugin results undergo post-processing on the server to produce a tree-like file which contains information about the tests run (or files generated) by the plugin. This is the file which enables `sonobuoy results` to present reports to the user and navigate the tarball effectively. + +Currently, plugins are specified as either producing `junit` results (like the `e2e` plugin) or `raw` results (like the `systemd-logs` plugin). + +To see this file directly you can either open the tarball and look for `plugins//sonobuoy_results.yaml` or run: + +``` +sonobuoy results $tarball --mode=dump +``` + +## Working with any Plugin + +By default, the command produces a human-readable report corresponding to the `e2e` plugin. However, you can specify other plugins by name. For example: + +``` +$ sonobuoy results $tarball --plugin systemd-logs +Plugin: systemd-logs +Status: passed +Total: 1 +Passed: 1 +Failed: 0 +Skipped: 0 +``` + +> In the above output, notice that even though the `systemd-logs` plugin doesn't run "tests" per se, each file produced by the plugin is reported on: a readable file is reported as a success. + +## Detailed Results + +If you would like to view or script around the individual tests/files, use the `--mode detailed` flag. In the case of junit tests, it will write a list of json objects which can be piped to other commands or saved to another file. + +To see the passed tests, one approach would be: + +``` +$ sonobuoy results $tarball --mode=detailed | jq 'select(.status=="passed")' +``` + +To list the conformance tests, one approach would be: + +``` +$ sonobuoy results $tarball --mode=detailed|jq 'select(.name | contains("[Conformance]"))' +``` + +When dealing with non-junit plugins, the `--mode detailed` results will print the file output with a prefix that reports on the nature/location of the file: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs|head -n1 +systemd-logs|kind-control-plane|systemd_logs {"_HOSTNAME":"kind-control-plane",...} +``` + +The prefix is telling you that this result came from the "systemd-logs" plugin, was from the "kind-control-plane" node, and the filename was "systemd_logs". + +If you had multiple nodes, you could look at just one by adding the `--node` flag. It walks the result tree and will return only results rooted from the given node: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs --node=kind-control-plane|head -n1 +kind-control-plane|systemd_logs {"_HOSTNAME":"kind-control-plane",...} +``` + +Now if you wanted to script around the actual file output (in this case it is json), you wouldn't want to keep that prefix around. Just add the `--skip-prefix` flag to get only the raw file output so that you can manipulate it easily: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs --node=kind-control-plane --skip-prefix|head -n1|jq .MESSAGE +{"_HOSTNAME":"kind-control-plane",...} +``` + +## Summary + + - `sonobuoy results` can show you results of a plugin without extracting the tarball + - Plugins are either `junit` or `raw` type currently + - When viewing `junit` results, json data is dumped for each test + - When viewing `raw` results, file contents are dumped directly + - Use the `--mode` flag to see either report, detail, or dump level data + - Use the `--node` flag to view results rooted at a different location + - Use the `--skip-prefix` flag to print only file output \ No newline at end of file diff --git a/site/docs/v0.18.0/snapshot.md b/site/docs/v0.18.0/snapshot.md new file mode 100644 index 000000000..1d10d2427 --- /dev/null +++ b/site/docs/v0.18.0/snapshot.md @@ -0,0 +1,128 @@ +# Sonobuoy Snapshot Layout + +- [Retrieving results](#retrieving-results) +- [Filename](#filename) +- [Contents](#contents) + - [/hosts](#hosts) + - [/meta](#meta) + - [/plugins](#plugins) + - [/podlogs](#podlogs) + - [/resources](#resources) + - [/servergroups.json](#servergroups.json) + - [/serverversion.json](#serverversionjson) + +This document describes retrieving the Sonobuoy results tarball, its layout, how it is formatted, and how data is named and laid out. + +## Retrieving results + +To view the output, copy the output directory from the aggregator Sonobuoy pod to +your local machine (and save the name of the file to a variable for reference): + +``` +output=$(sonobuoy retrieve) +``` + +The results of plugins can be inspected without being extracted. By default, it will give you a human-readable report about the tests but also has options to list detailed information and even print raw files generated by the plugin. See the [results page][results] for more details. + +``` +sonobuoy results $output [--plugin ] [--mode report|detailed|dump] +``` + +> Note: There is also an older `sonobuoy e2e $output` command which can inspect the results of the `e2e` plugin and even rerun failures. + +You can also extract the output locally so that you can view the other +information Sonobuoy gathered as well: + - detailed plugin results + - pod logs + - query results about the contents/state of your cluster + +``` +mkdir ./results; tar xzf $output -C ./results +``` + +## Filename + +A Sonobuoy snapshot is a gzipped tarball, named `YYYYmmDDHHMM_sonobuoy_.tar.gz`. + +where YYYYmmDDHHMM is a timestamp containing the year, month, day, hour, and minute of the run. The `` string is an RFC4122 UUID, consisting of lowercase hexadecimal characters and dashes (e.g. "dfe30ebc-f635-42f4-9608-8abcd6311916"). This UUID should match the UUID from the snapshot's [meta/config.json][1], stored at the root of the tarball. + +## Contents + +The top-level directories in the results tarball look like this: + +![tarball overview screenshot][3] + +### /hosts + +The `/hosts` directory contains the information gathered about each host in the system by directly querying their HTTP endpoints. +This is different from what you find in `/resources/cluster/Nodes.json` -- it contains items that aren't part of the Kubernetes API objects: + +- `/hosts//configz.json` - Contains the output of querying the `/configz` endpoint for this host -- that is, the component configuration for the host. +- `/hosts//healthz.json` - Contains a json-formatted representation of the result of querying `/healthz` for this host, for example `{"status":200}` + +This looks like the following: + +![tarball hosts screenshot][5] + +### /meta + +The `/meta` directory contains metadata about this Sonobuoy run, including configuration and query runtime. + +- `/meta/query-time.json` - Contains metadata about how long each query took, example: `{"queryobj":"Pods","time":12.345ms"}` +- `/meta/config.json` - A copy of the Sonobuoy configuration that was set up when this run was created, but with unspecified values filled in with explicit defaults, and with a `UUID` field in the root JSON, set to a randomly generated UUID created for that Sonobuoy run. + +This looks like the following: + +![tarball meta screenshot][8] + +### /plugins + +The `/plugins` directory contains output for each plugin selected for this Sonobuoy run: + +- `/plugins//results/` - For plugins that run on an arbitrary node to collect cluster-wide data, for example using the Job driver. Contains the results for the plugin. + +- `/plugins//results//` - For plugins that run once on every node to collect node-specific data, for example using the DaemonSet driver. Contains the results for the plugin, for each node. + +- `/plugins//sonobuoy_results.yaml` - A file generated by the server by post-processing the plugin results. This is the file that `sonobuoy results` relies on. + +This looks like the following: + +![tarball plugins screenshot][7] + +### /podlogs + +The `/podlogs` directory contains logs for each pod found during the Sonobuoy run, similar to what you get with `kubectl logs -n `. + +- `/podlogs///.log` - Contains the logs for each container, for each pod in each namespace. + +This looks like the following: + +![tarball podlogs screenshot][6] + +### /resources + +The `/resources` directory lists JSON-serialized Kubernetes objects, taken from querying the Kubernetes REST API. The directory has the following structure: + +- `/resources/ns//.json` - For all resources that belong to a namespace, where `` is the namespace of that resource (eg. `kube-system`), and `` is the type of resource, pluralized (eg. `Pods`). +- `/resources/cluster/.json` - For all resources that don't belong to a namespace, where `` is the type of resource, pluralized (eg. `Nodes`). + +This looks like the following: + +![tarball resources screenshot][4] + +### /servergroups.json + +`/servergroups.json` lists the Kubernetes APIs that the cluster supports. + +### /serverversion.json + +`/serverversion.json` contains the output from querying the server's version, including the major and minor version, git commit, etc. + +[1]: #meta +[3]: img/snapshot-00-overview.png +[4]: img/snapshot-10-resources.png +[5]: img/snapshot-20-hosts.png +[6]: img/snapshot-30-podlogs.png +[7]: img/snapshot-40-plugins.png +[8]: img/snapshot-50-meta.png +[results]: results.md diff --git a/site/docs/v0.18.0/sonobuoy-config.md b/site/docs/v0.18.0/sonobuoy-config.md new file mode 100644 index 000000000..a955d89e9 --- /dev/null +++ b/site/docs/v0.18.0/sonobuoy-config.md @@ -0,0 +1,98 @@ +# Sonobuoy Config + +The commands "run" and "gen" both accept a parameter for a Sonobuoy config file which allows you to customize multiple aspects of the run. + +We've provided a command to generate the JSON file necessary so that it is easier to edit for your runs. Run the command: + +``` +sonobuoy gen config +``` + +and you will see the default configuration. Below is a description of each of the values. + +## General options + +Description + - A string which provides consumers a way to add extra context to a configuration that may be in memory or saved to disk. Unused by Sonobuoy itself. + +UUID + - A unique identifier used to identify the run of this configuration. Used in a few places including the name of the results file. + +Namespace + - The namespace in which to run Sonobuoy. + +WorkerImage + - The image for the Sonobuoy worker container which runs as a sidecar along the plugins. Responsible for reporting results back to the Sonobuoy aggregator. + +ImagePullPolicy + - The image pull policy to set on the Sonobuoy worker sidecars as well as each of the plugins. + +ResultsDir + - The location on the Sonobuoy aggregator where the results are placed. + +Version + - The version of Sonobuoy which created the configuration file. + + +## Plugin options + +Plugins + - An array of plugin selection objects of the plugins you want to run. When running custom plugins (or avoiding running a particular plugin) this value needs modified. + +PluginSearchPath + - The aggregator pod looks for plugin configurations in these locations. You shouldn't need to edit this unless you are doing development work on the aggregator itself. + +## Query options + +Resources + - A list of resources which Sonobuoy will query for in every namespace in which it runs queries. In the namespace in which Sonobuoy is running, PodLogs, Events, and HorizontalPodAutoscalers are also added. + +Filters + - Namespace + - A regexp which specifies which namespaces to run queries against. + - LabelSelector + - A Kubernetes [label selector][labelselector] which will be added to every query run. + +Limits + - Options for limiting the scope of response. + - **Limits.PodLogs** limits the scope when getting logs from pods. The supported parameters are: + - **Namespaces**: string + - A regular expression for the targeted namespaces. + - Default is empty string + - To get logs from all namespaces use ".*" + - **SonobuoyNamespace**: bool + - If set to true, get pod logs from the namespace Sonobuoy is running in. Can be set along with a `Namespaces` field or on its own. + - Default value is true + - **FieldSelectors**: []string + - A list of field selectors, with OR logic. + - For example, to get logs from two specified namespaces + `FieldSelectors = ["metadata.namespace=default","metadata.namespace=heptio-sonobuoy"]` + - Each field selector contains one or more chained operators, with AND logic + - For example, to get logs from a specified pod + `FieldSelectors = ["metadata.namespace=default,metadata.name=pod1"]` + - Each field selector follows the same format as + `k8s.io/apimachinery/pkg/apis/meta/v1/types/ListOptions/FieldSelector` + - Can be set along with a Namespaces/SonobuoyNamespace field or on its own. + - **LabelSelector**: string + - Filtering candidate pods by their labels + - Using the same format as + `k8s.io/apimachinery/pkg/apis/meta/v1/types/ListOptions/LabelSelector` + - For example: + `LabelSelector = "app=nginx,layer in (frontend, backend)"` + - When set together with other fields, the scope of pods is defined by: + ``` + (Namespaces OR SonobuoyNamespace OR FieldSelectors) AND LabelSelector + ``` + + - For each candidate pod, the format and size of logs is defined by other fields. These will be passed onto Kubernetes [PodLogOptions][podlogopts] + - Previous: bool + - SinceSeconds: int + - SinceTime: string. RFC3339 format. + - Timestamps: bool + - TailLines: int + - LimitBytes: int + + + +[labelselector]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[podlogopts]: https://godoc.org/k8s.io/api/core/v1#PodLogOptions