diff --git a/_data/concepts.yml b/_data/concepts.yml index 087455eb36ac6..1d62f48195aa9 100644 --- a/_data/concepts.yml +++ b/_data/concepts.yml @@ -47,7 +47,6 @@ toc: - docs/concepts/workloads/controllers/replicationcontroller.md - docs/concepts/workloads/controllers/deployment.md - docs/concepts/workloads/controllers/statefulset.md - - docs/concepts/workloads/controllers/petset.md - docs/concepts/workloads/controllers/daemonset.md - docs/concepts/workloads/controllers/garbage-collection.md - docs/concepts/workloads/controllers/jobs-run-to-completion.md diff --git a/_redirects b/_redirects index a9aea70922731..eb3e73df92dec 100644 --- a/_redirects +++ b/_redirects @@ -41,7 +41,6 @@ /docs/api /docs/concepts/overview/kubernetes-api 301 /docs/concepts/abstractions/controllers/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 -/docs/concepts/abstractions/controllers/petsets /docs/concepts/workloads/controllers/petset 301 /docs/concepts/abstractions/controllers/statefulsets /docs/concepts/workloads/controllers/statefulset 301 /docs/concepts/abstractions/init-containers /docs/concepts/workloads/pods/init-containers 301 /docs/concepts/abstractions/overview /docs/concepts/overview/working-with-objects/kubernetes-objects 301 @@ -192,8 +191,6 @@ /docs/user-guide/node-selection/ /docs/concepts/configuration/assign-pod-node 301 /docs/user-guide/persistent-volumes/ /docs/concepts/storage/persistent-volumes 301 /docs/user-guide/persistent-volumes/walkthrough /docs/tasks/configure-pod-container/configure-persistent-volume-storage 301 -/docs/user-guide/petset /docs/concepts/workloads/controllers/petset 301 -/docs/user-guide/petset/bootstrapping/ /docs/concepts/workloads/controllers/petset 301 /docs/user-guide/pod-preset/ /docs/tasks/inject-data-application/podpreset 301 /docs/user-guide/pod-security-policy/ /docs/concepts/policy/pod-security-policy 301 /docs/user-guide/pod-states /docs/concepts/workloads/pods/pod-lifecycle 301 diff --git a/cn/docs/concepts/workloads/controllers/petset.yaml b/cn/docs/concepts/workloads/controllers/petset.yaml deleted file mode 100644 index 5c29237c4897d..0000000000000 --- a/cn/docs/concepts/workloads/controllers/petset.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx -spec: - ports: - - port: 80 - name: web - # *.nginx.default.svc.cluster.local - clusterIP: None - selector: - app: nginx ---- -apiVersion: apps/v1alpha1 -kind: PetSet -metadata: - name: web -spec: - serviceName: "nginx" - replicas: 2 - template: - metadata: - labels: - app: nginx - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - terminationGracePeriodSeconds: 0 - containers: - - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - diff --git a/cn/docs/concepts/workloads/controllers/statefulset.md b/cn/docs/concepts/workloads/controllers/statefulset.md index d52a8c1e1f2e3..fe6f85cf22296 100644 --- a/cn/docs/concepts/workloads/controllers/statefulset.md +++ b/cn/docs/concepts/workloads/controllers/statefulset.md @@ -11,9 +11,7 @@ title: StatefulSets {% capture overview %} **StatefulSets are a beta feature in 1.7. This feature replaces the -PetSets feature from 1.4. Users of PetSets are referred to the 1.5 -[Upgrade Guide](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) -for further information on how to upgrade existing PetSets to StatefulSets.** +PetSets feature from 1.4.** {% include templates/glossary/snippet.md term="statefulset" length="long" %} {% endcapture %} diff --git a/docs/concepts/workloads/controllers/petset.md b/docs/concepts/workloads/controllers/petset.md deleted file mode 100644 index 42c90cfd96365..0000000000000 --- a/docs/concepts/workloads/controllers/petset.md +++ /dev/null @@ -1,441 +0,0 @@ ---- -approvers: -- bprashanth -- enisoc -- erictune -- foxish -- janetkuo -- kow3ns -- smarterclayton -title: PetSets ---- - -__Warning:__ Starting in Kubernetes version 1.5, PetSet has been renamed to [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets). To use (or continue to use) PetSet in Kubernetes 1.5, you _must_ [migrate](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) your existing PetSets to StatefulSets. For information on working with StatefulSet, see the tutorial on [how to run replicated stateful applications](/docs/tutorials/stateful-application/run-replicated-stateful-application). - -__This document has been deprecated__, but can still apply if you're using - Kubernetes version 1.4 or earlier. - -* TOC -{:toc} - -__Terminology__ - -Throughout this doc you will see a few terms that are sometimes used interchangeably elsewhere, that might cause confusion. This section attempts to clarify them. - -* Node: A single virtual or physical machine in a Kubernetes cluster. -* Cluster: A group of nodes in a single failure domain, unless mentioned otherwise. -* Persistent Volume Claim (PVC): A request for storage, typically a [persistent volume](/docs/user-guide/persistent-volumes/walkthrough/). -* Host name: The hostname attached to the UTS namespace of the pod, i.e. the output of `hostname` in the pod. -* DNS/Domain name: A *cluster local* domain name resolvable using standard methods (e.g.: [gethostbyname](http://linux.die.net/man/3/gethostbyname)). -* Ordinality: the property of being "ordinal", or occupying a position in a sequence. -* Pet: a single member of a PetSet; more generally, a stateful application. -* Peer: a process running a server, capable of communicating with other such processes. - -__Prerequisites__ - -This doc assumes familiarity with the following Kubernetes concepts: - -* [Pods](/docs/user-guide/pods/single-container/) -* [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) -* [Headless Services](/docs/user-guide/services/#headless-services) -* [Persistent Volumes](/docs/concepts/storage/volumes/) -* [Persistent Volume Provisioning](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/staging/persistent-volume-provisioning/README.md) - -You need a working Kubernetes cluster at version >= 1.3, with a healthy DNS [cluster addon](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/README.md) at version >= 15. You cannot use PetSet on a hosted Kubernetes provider that has disabled `alpha` resources. - -## What is a PetSet? - -In Kubernetes, most pod management abstractions group them into disposable units of work that compose a micro service. Replication controllers for example, are designed with a weak guarantee - that there should be N replicas of a particular pod template. The pods are treated as stateless units, if one of them is unhealthy or superseded by a newer version, the system just disposes it. - -``` - foo.default.svc.cluster.local - |service| - / \ - | pod-asdf | | pod-zxcv | -``` - -A PetSet, in contrast, is a group of stateful pods that require a stronger notion of identity. The document refers to these as "clustered applications". - -``` - *.foo.default.svc.cluster.local - | mysql-0 | <-> | mysql-1 | - [pv 0] [pv 1] -``` - -The co-ordinated deployment of clustered applications is notoriously hard. They require stronger notions of identity and membership, which they use in opaque internal protocols, and are especially prone to race conditions and deadlock. Traditionally administrators have deployed these applications by leveraging nodes as stable, long-lived entities with persistent storage and static ips. - -The goal of PetSet is to decouple this dependency by assigning identities to individual instances of an application that are not anchored to the underlying physical infrastructure. For the rest of this document we will refer to these entities as "Pets". Our use of this term is predated by the "Pets vs Cattle" analogy. - -__Relationship between Pets and Pods__: PetSet requires there be {0..N-1} Pets. Each Pet has a deterministic name - PetSetName-Ordinal, and a unique identity. Each Pet has at most one pod, and each PetSet has at most one Pet with a given identity. - -## When to use PetSet? - -A PetSet ensures that a specified number of "pets" with unique identities are running at any given time. The identity of a Pet is comprised of: - -* a stable hostname, available in DNS -* an ordinal index -* stable storage: linked to the ordinal & hostname - -These properties are useful in deploying stateful applications. However most stateful applications are also clustered, meaning they form groups with strict membership requirements that rely on stored state. PetSet also helps with the 2 most common problems encountered managing such clustered applications: - -* discovery of peers for quorum -* startup/teardown ordering - -Only use PetSet if your application requires some or all of these properties. Managing pods as stateless replicas is vastly easier. - -Example workloads for PetSet: - -* Databases like MySQL or PostgreSQL that require a single instance attached to an NFS persistent volume at any time -* Clustered software like Zookeeper, Etcd, or Elasticsearch that require stable membership. - -## Alpha limitations - -Before you start deploying applications as PetSets, there are a few limitations you should understand. - -* PetSet is an *alpha* resource, not available in any Kubernetes release prior to 1.3. -* As with all alpha/beta resources, it can be disabled through the `--runtime-config` option passed to the apiserver, and in fact most likely will be disabled on hosted offerings of Kubernetes. -* The only updatable field on a PetSet is `replicas`. -* The storage for a given pet must either be provisioned by a [persistent volume provisioner](https://github.com/kubernetes/examples/tree/{{page.githubbranch}}/staging/persistent-volume-provisioning/README.md) based on the requested `storage class`, or pre-provisioned by an admin. Note that persistent volume provisioning is also currently in alpha. -* Deleting and/or scaling a PetSet down will *not* delete the volumes associated with the PetSet. This is done to ensure safety first, your data is more valuable than an auto purge of all related PetSet resources. **Deleting the Persistent Volume Claims will result in a deletion of the associated volumes**. -* All PetSets currently require a "governing service", or a Service responsible for the network identity of the pets. The user is responsible for this Service. -* Updating an existing PetSet is currently a manual process, meaning you either need to deploy a new PetSet with the new image version, or orphan Pets one by one, update their image, and join them back to the cluster. - -## Example PetSet - -We'll create a basic PetSet to demonstrate how Pets are assigned unique and "sticky" identities. - -{% include code.html language="yaml" file="petset.yaml" ghlink="/docs/concepts/workloads/controllers/petset.yaml" %} - -Saving this config into `petset.yaml` and submitting it to a Kubernetes cluster should create the defined PetSet and Pets it manages: - -```shell -$ kubectl create -f petset.yaml -service "nginx" created -petset "web" created -``` - -## Pet Identity - -The identity of a Pet sticks to it, regardless of which node it's (re) scheduled on. We can examine the identity of the pets we just created. - -### Ordinal index - -you should see 2 pods with predictable names formatted thus: `$(petset name)-$(ordinal index assigned by petset controller)` - -```shell -$ kubectl get po -NAME READY STATUS RESTARTS AGE -web-0 1/1 Running 0 10m -web-1 1/1 Running 0 10m -``` - -### Stable storage - -2 persistent volumes, one per pod. This is auto created by the PetSet based on the `volumeClaimTemplate` field - -```shell -$ kubectl get pv -NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE -pvc-90234946-3717-11e6-a46e-42010af00002 1Gi RWO Bound default/www-web-0 11m -pvc-902733c2-3717-11e6-a46e-42010af00002 1Gi RWO Bound default/www-web-1 11m -``` - -### Network identity - -The network identity has 2 parts. First, we created a headless Service that controls the domain within which we create Pets. The domain managed by this Service takes the form: `$(service name).$(namespace).svc.cluster.local`, where "cluster.local" is the [cluster domain](/docs/concepts/services-networking/dns-pod-service/). As each pet is created, it gets a matching DNS subdomain, taking the form: `$(petname).$(governing service domain)`, where the governing service is defined by the `serviceName` field on the PetSet. - -Here are some examples of choices for Cluster Domain, Service name, PetSet name, and how that affects the DNS names for the Pets and the hostnames in the Pet's pods: - -Cluster Domain | Service (ns/name) | PetSet (ns/name) | PetSet Domain | Pet DNS | Pet Hostname | --------------- | ----------------- | ----------------- | -------------- | ------- | ------------ | - cluster.local | default/nginx | default/web | nginx.default.svc.cluster.local | web-{0..N-1}.nginx.default.svc.cluster.local | web-{0..N-1} | - cluster.local | foo/nginx | foo/web | nginx.foo.svc.cluster.local | web-{0..N-1}.nginx.foo.svc.cluster.local | web-{0..N-1} | - kube.local | foo/nginx | foo/web | nginx.foo.svc.kube.local | web-{0..N-1}.nginx.foo.svc.kube.local | web-{0..N-1} | - -Note that Cluster Domain will be set to `cluster.local` unless [otherwise configured](https://github.com/kubernetes/kubernetes/blob/master/examples/cluster-dns/README.md). - -Let's verify our assertion with a simple test. - -```shell -$ kubectl get svc -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -nginx None 80/TCP 12m -... -``` - -First, the PetSet provides a stable hostname: - -```shell -$ for i in 0 1; do kubectl exec web-$i -- sh -c 'hostname'; done -web-0 -web-1 -``` - -And the hostname is linked to the in-cluster DNS address: - -```shell -$ kubectl run -i --tty --image busybox dns-test --restart=Never /bin/sh -dns-test # nslookup web-0.nginx -Server: 10.0.0.10 -Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local - -Name: web-0.nginx -Address 1: 10.180.3.5 - -dns-test # nslookup web-1.nginx -Server: 10.0.0.10 -Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local - -Name: web-1.nginx -Address 1: 10.180.0.9 -``` - -The containers are running nginx webservers, which by default will look for an index.html file in `/usr/share/nginx/html/index.html`. That directory is backed by a `PersistentVolume` created by the PetSet. So let's write our hostname there: - -```shell -$ for i in 0 1; do - kubectl exec web-$i -- sh -c 'echo $(hostname) > /usr/share/nginx/html/index.html'; -done -``` - -And verify each webserver serves its own hostname: - -```shell -$ for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done -web-0 -web-1 -``` - -Now delete all pods in the petset: - -```shell -$ kubectl delete po -l app=nginx -pod "web-0" deleted -pod "web-1" deleted -``` - -Wait for them to come back up, and try to retrieve the previously written hostname through the DNS name of the peer. They match, because the storage, DNS name, and hostname stick to the Pet no matter where it gets scheduled: - -```shell -$ kubectl exec -it web-1 -- curl web-0.nginx -web-0 -$ kubectl exec -it web-0 -- curl web-1.nginx -web-1 -``` - -## Peer discovery - -A pet can piece together its own identity: - -1. Use the [downward api](/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information/) to find its pod name -2. Run `hostname` to find its DNS name -3. Run `mount` or `df` to find its volumes (usually this is unnecessary) - -It's not necessary to "discover" the governing Service of a PetSet, since it's known at creation time you can simply pass it down through an [environment variable](/docs/user-guide/environment-guide). - -Usually pets also need to find their peers. In the previous nginx example, we just used `kubectl` to get the names of existing pods, and as humans, we could tell which ones belonged to a given PetSet. Another way to find peers is by contacting the API server, just like `kubectl`, but that has several disadvantages (you end up implementing a Kubernetes specific init system that runs as pid 1 in your application container). - -PetSet gives you a way to discover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container). - -```shell -$ kubectl exec -it web-0 /bin/sh -web-0 # apt-get update && apt-get install -y dnsutils -... - -web-0 # nslookup -type=srv nginx.default -Server: 10.0.0.10 -Address: 10.0.0.10#53 - -nginx.default.svc.cluster.local service = 10 50 0 web-1.ub.default.svc.cluster.local. -nginx.default.svc.cluster.local service = 10 50 0 web-0.ub.default.svc.cluster.local. -``` - -## Updating a PetSet - -You cannot update any field of the PetSet except `spec.replicas` and the `containers` in the podTemplate. Updating `spec.replicas` will scale the PetSet, updating `containers` will not have any effect till a Pet is deleted, at which time it is recreated with the modified podTemplate. - -## Scaling a PetSet - -You can scale a PetSet by updating the "replicas" field. Note however that the controller will only: - -1. Create one pet at a time, in order from {0..N-1}, and wait till each one is in [Running and Ready](/docs/user-guide/pod-states) before creating the next -2. Delete one pet at a time, in reverse order from {N-1..0}, and wait till each one is completely shutdown (past its [terminationGracePeriodSeconds](/docs/concepts/workloads/pods/pod/#termination-of-pods) before deleting the next - -```shell -$ kubectl get po -NAME READY STATUS RESTARTS AGE -web-0 1/1 Running 0 30s -web-1 1/1 Running 0 36s - -$ kubectl patch petset web -p '{"spec":{"replicas":3}}' -"web" patched - -$ kubectl get po -NAME READY STATUS RESTARTS AGE -web-0 1/1 Running 0 40s -web-1 1/1 Running 0 46s -web-2 1/1 Running 0 8s -``` - -You can also use the `kubectl scale` command: - -```shell -$ kubectl get petset -NAME DESIRED CURRENT AGE -web 3 3 24m - -$ kubectl scale petset web --replicas=5 -petset "web" scaled - -$ kubectl get po --watch-only -NAME READY STATUS RESTARTS AGE -web-0 1/1 Running 0 10m -web-1 1/1 Running 0 27m -web-2 1/1 Running 0 10m -web-3 1/1 Running 0 3m -web-4 0/1 ContainerCreating 0 48s - -$ kubectl get petset web -NAME DESIRED CURRENT AGE -web 5 5 30m -``` - -Note however, that scaling up to N and back down to M *will not* delete the volumes of the M-N pets, as described in the section on [deletion](#deleting-a-petset), i.e. scaling back up to M creates new pets that use the same volumes. To see this in action, scale the PetSet back down to 3: - -```shell -$ kubectl get po --watch-only -web-4 1/1 Terminating 0 4m -web-4 1/1 Terminating 0 4m -web-3 1/1 Terminating 0 6m -web-3 1/1 Terminating 0 6m -``` - -Note that we still have 5 pvcs: - -```shell -$ kubectl get pvc -NAME STATUS VOLUME CAPACITY ACCESSMODES AGE -www-web-0 Bound pvc-42ca5cef-8113-11e6-82f6-42010af00002 1Gi RWO 32m -www-web-1 Bound pvc-42de30af-8113-11e6-82f6-42010af00002 1Gi RWO 32m -www-web-2 Bound pvc-ba416413-8115-11e6-82f6-42010af00002 1Gi RWO 14m -www-web-3 Bound pvc-ba45f19c-8115-11e6-82f6-42010af00002 1Gi RWO 14m -www-web-4 Bound pvc-ba47674a-8115-11e6-82f6-42010af00002 1Gi RWO 14m -``` - -This allows you to upgrade the image of a petset and have it come back up with the same data, as described in the next section. - -## Image upgrades - -PetSet currently *does not* support automated image upgrade as noted in the section on [limitations](#alpha-limitations), however you can update the `image` field of any container in the podTemplate and delete Pets one by one, the PetSet controller will recreate it with the new image. - -Edit the image on the PetSet to `gcr.io/google_containers/nginx-slim:0.7` and delete 1 Pet: - -```shell{% raw %} -$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done -gcr.io/google_containers/nginx-slim:0.8 -gcr.io/google_containers/nginx-slim:0.8 -gcr.io/google_containers/nginx-slim:0.8 - -$ kubectl delete po web-0 -pod "web-0" deleted - -$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done -gcr.io/google_containers/nginx-slim:0.7 -gcr.io/google_containers/nginx-slim:0.8 -gcr.io/google_containers/nginx-slim:0.8 -{% endraw %}``` - -Delete the remaining 2: - -```shell -$ kubectl delete po web-1 web-2 -pod "web-1" deleted -pod "web-2" deleted -``` - -Wait till the PetSet is stable and check the images: - -```shell{% raw %} -$ for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done -gcr.io/google_containers/nginx-slim:0.7 -gcr.io/google_containers/nginx-slim:0.7 -gcr.io/google_containers/nginx-slim:0.7 -{% endraw %}``` - -## Deleting a PetSet - -Deleting a PetSet through kubectl will scale it down to 0, thereby deleting all the Pets. If you wish to delete just the PetSet and not the Pets, use `--cascade=false`: - -```shell -$ kubectl delete -f petset.yaml --cascade=false -petset "web" deleted - -$ kubectl get po -l app=nginx -NAME READY STATUS RESTARTS AGE -web-0 1/1 Running 0 21h -web-1 1/1 Running 0 21h - -$ kubectl delete po -l app=nginx -pod "web-0" deleted -pod "web-1" deleted -``` - -Deleting the pods will *not* delete the volumes. Until we finalize the recycle policy for these volumes they will have to get cleaned up by an admin. This is to ensure that you have the chance to copy data off the volume before deleting it. Simply deleting the PVC after the pods have left the [terminating state](/docs/concepts/workloads/pods/pod/#termination-of-pods) should trigger deletion of the backing Persistent Volumes. - -**Note: you will lose all your data once the PVC is deleted, do this with caution.** - -```shell -$ kubectl get po -l app=nginx -$ kubectl get pvc -l app=nginx -NAME STATUS VOLUME CAPACITY ACCESSMODES AGE -www-web-0 Bound pvc-62d271cd-3822-11e6-b1b7-42010af00002 0 21h -www-web-1 Bound pvc-62d6750e-3822-11e6-b1b7-42010af00002 0 21h - -$ kubectl delete pvc -l app=nginx -$ kubectl get pv -``` - -If you simply want to clean everything: - -```shell{% raw %} -$ grace=$(kubectl get po web-0 --template '{{.spec.terminationGracePeriodSeconds}}') -$ kubectl delete petset,po -l app=nginx -$ sleep $grace -$ kubectl delete pvc -l app=nginx -{% endraw %} -``` - -## Troubleshooting - -You might have noticed an `annotations` field in all the PetSets shown above. - -```yaml -annotations: - pod.alpha.kubernetes.io/initialized: "true" -``` - -This field is a debugging hook. It pauses any scale up/down operations on the entire PetSet. If you'd like to pause a petset after each pet, set it to `false` in the template, wait for each pet to come up, verify it has initialized correctly, and then set it to `true` using `kubectl edit` on the pet (setting it to `false` on *any pet* is enough to pause the PetSet). If you don't need it, create the PetSet with it set to `true` as shown. This is surprisingly useful in debugging bootstrapping race conditions. - -## Future Work - -There are a LOT of planned improvements since PetSet is still in alpha. - -* Data gravity and local storage -* Richer notification events -* Public network identities -* WAN cluster deployments (multi-AZ/region/cloud provider) -* Image and node upgrades - -This list goes on, if you have examples, ideas or thoughts, please contribute. - -## Alternatives - -Deploying one RC of size 1/Service per pod is a popular alternative, as is simply deploying a DaemonSet that utilizes the identity of a Node. - -## Next steps - -* Learn about [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/), - the replacement for PetSet introduced in Kubernetes version 1.5. -* [Migrate your existing PetSets to StatefulSets](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) - when upgrading to Kubernetes version 1.5 or higher. - diff --git a/docs/concepts/workloads/controllers/petset.yaml b/docs/concepts/workloads/controllers/petset.yaml deleted file mode 100644 index 5c29237c4897d..0000000000000 --- a/docs/concepts/workloads/controllers/petset.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx -spec: - ports: - - port: 80 - name: web - # *.nginx.default.svc.cluster.local - clusterIP: None - selector: - app: nginx ---- -apiVersion: apps/v1alpha1 -kind: PetSet -metadata: - name: web -spec: - serviceName: "nginx" - replicas: 2 - template: - metadata: - labels: - app: nginx - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - terminationGracePeriodSeconds: 0 - containers: - - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - diff --git a/docs/concepts/workloads/controllers/statefulset.md b/docs/concepts/workloads/controllers/statefulset.md index 52168247c7292..b0adb4e2350d8 100644 --- a/docs/concepts/workloads/controllers/statefulset.md +++ b/docs/concepts/workloads/controllers/statefulset.md @@ -11,9 +11,7 @@ title: StatefulSets {% capture overview %} **StatefulSets are a beta feature in 1.7. This feature replaces the -PetSets feature from 1.4. Users of PetSets are referred to the 1.5 -[Upgrade Guide](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) -for further information on how to upgrade existing PetSets to StatefulSets.** +PetSets feature from 1.4.** {% include templates/glossary/snippet.md term="statefulset" length="long" %} {% endcapture %} diff --git a/docs/concepts/workloads/pods/pod.md b/docs/concepts/workloads/pods/pod.md index dd7dbd5fa3dfb..44f9725f6ba7e 100644 --- a/docs/concepts/workloads/pods/pod.md +++ b/docs/concepts/workloads/pods/pod.md @@ -150,7 +150,7 @@ Pod is exposed as a primitive in order to facilitate: * clean composition of Kubelet-level functionality with cluster-level functionality — Kubelet is effectively the "pod controller" * high-availability applications, which will expect pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions, image prefetching, or live pod migration [#3949](http://issue.k8s.io/3949) -There is new first-class support for stateful pods with the [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/) controller (currently in beta). The feature was alpha in 1.4 and was called [PetSet](/docs/concepts/workloads/controllers/petset/). For prior versions of Kubernetes, best practice for having stateful pods is to create a replication controller with `replicas` equal to `1` and a corresponding service, see [this MySQL deployment example](/docs/tutorials/stateful-application/run-stateful-application/). +There is new first-class support for stateful pods with the [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets/) controller (currently in beta). The feature was alpha in 1.4 and was called PetSet. For prior versions of Kubernetes, best practice for having stateful pods is to create a replication controller with `replicas` equal to `1` and a corresponding service, see [this MySQL deployment example](/docs/tutorials/stateful-application/run-stateful-application/). ## Termination of Pods diff --git a/docs/tasks/index.md b/docs/tasks/index.md index 86a435eeae719..d52273874de39 100644 --- a/docs/tasks/index.md +++ b/docs/tasks/index.md @@ -52,7 +52,7 @@ Configure components in a cluster federation. #### Managing Stateful Applications -Perform common tasks for managing Stateful applications, including upgrading from PetSets and scaling, deleting, and debugging StatefulSets. +Perform common tasks for managing Stateful applications, including scaling, deleting, and debugging StatefulSets. #### Cluster Daemons diff --git a/docs/tasks/run-application/run-single-instance-stateful-application.md b/docs/tasks/run-application/run-single-instance-stateful-application.md index 6a1c305fa9dc8..115debd9f6310 100644 --- a/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -206,7 +206,7 @@ specific to stateful apps: * Don't scale the app. This setup is for single-instance apps only. The underlying PersistentVolume can only be mounted to one Pod. For clustered stateful apps, see the - [StatefulSet documentation](/docs/concepts/workloads/controllers/petset/). + [StatefulSet documentation](/docs/concepts/workloads/controllers/statefulset.md). * Use `strategy:` `type: Recreate` in the Deployment configuration YAML file. This instructs Kubernetes to _not_ use rolling updates. Rolling updates will not work, as you cannot have more than diff --git a/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md b/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md deleted file mode 100644 index 1909283b39fd7..0000000000000 --- a/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -approvers: -- bprashanth -- enisoc -- erictune -- foxish -- janetkuo -- kow3ns -- smarterclayton -title: Upgrade from PetSets to StatefulSets ---- - -{% capture overview %} -This page shows how to upgrade from PetSets (Kubernetes version 1.3 or 1.4) to *StatefulSets* (Kubernetes version 1.5 or later). -{% endcapture %} - -{% capture prerequisites %} - -* If you don't have PetSets in your current cluster, or you don't plan to upgrade - your master to Kubernetes 1.5 or later, you can skip this task. - -{% endcapture %} - -{% capture steps %} - -## Differences between alpha PetSets and beta StatefulSets - -PetSet was introduced as an alpha resource in Kubernetes release 1.3, and was renamed to StatefulSet as a beta resource in 1.5. -Here are some notable changes: - -* **StatefulSet is the new PetSet**: PetSet is no longer available in Kubernetes release 1.5 or later. It becomes beta StatefulSet. To understand why the name was changed, see this [discussion thread](https://github.com/kubernetes/kubernetes/issues/27430). -* **StatefulSet guards against split brain**: StatefulSets guarantee at most one Pod for a given ordinal index can be running anywhere in a cluster, to guard against split brain scenarios with distributed applications. *TODO: Link to doc about fencing.* -* **Flipped debug annotation behavior**: The default value of the debug annotation (`pod.alpha.kubernetes.io/initialized`) is now `true`. The absence of this annotation will pause PetSet operations, but will NOT pause StatefulSet operations. In most cases, you no longer need this annotation in your StatefulSet manifests. - - -## Upgrading from PetSets to StatefulSets - -Note that these steps need to be done in the specified order. You **should -NOT upgrade your Kubernetes master, nodes, or `kubectl` to Kubernetes version -1.5 or later**, until told to do so. - -### Find all PetSets and their manifests - -First, find all existing PetSets in your cluster: - -```shell -kubectl get petsets --all-namespaces -``` - -If you don't find any existing PetSets, you can safely upgrade your cluster to -Kubernetes version 1.5 or later. - -If you find existing PetSets and you have all their manifests at hand, you can continue to the next step to prepare StatefulSet manifests. - -Otherwise, you need to save their manifests so that you can recreate them as StatefulSets later. -Here's an example command for you to save all existing PetSets as one file. - -```shell -# Save all existing PetSets in all namespaces into a single file. Only needed when you don't have their manifests at hand. -kubectl get petsets --all-namespaces -o yaml > all-petsets.yaml -``` - -### Prepare StatefulSet manifests - -Now, for every PetSet manifest you have, prepare a corresponding StatefulSet manifest: - -1. Change `apiVersion` from `apps/v1alpha1` to `apps/v1beta1`. -2. Change `kind` from `PetSet` to `StatefulSet`. -3. If you have the debug hook annotation `pod.alpha.kubernetes.io/initialized` set to `true`, you can remove it because it's redundant. If you don't have this annotation, you should add one, with the value set to `false`, to pause StatefulSets operations. - -It's recommended that you keep both PetSet manifests and StatefulSet manifests, so that you can safely roll back and recreate your PetSets, -if you decide not to upgrade your cluster. - -### Delete all PetSets without cascading - -If you find existing PetSets in your cluster in the previous step, you need to delete all PetSets *without cascading*. You can do this from `kubectl` with `--cascade=false`. -Note that if the flag isn't set, **cascading deletion will be performed by default**, and all Pods managed by your PetSets will be gone. - -Delete those PetSets by specifying file names. This only works when -the files contain only PetSets, but not other resources such as Services: - -```shell -# Delete all existing PetSets without cascading -# Note that should only contain PetSets that you want to delete, but not any other resources -kubectl delete -f --cascade=false -``` - -Alternatively, delete them by specifying resource names: - -```shell -# Alternatively, delete them by name and namespace without cascading -kubectl delete petsets -n= --cascade=false -``` - -Make sure you've deleted all PetSets in the system: - -```shell -# Get all PetSets again to make sure you deleted them all -# This should return nothing -kubectl get petsets --all-namespaces -``` - -At this moment, you've deleted all PetSets in your cluster, but not their Pods, Persistent Volumes, or Persistent Volume Claims. -However, since the Pods are not managed by PetSets anymore, they will be vulnerable to node failures until you finish the master upgrade and recreate StatefulSets. - -### Upgrade your master to Kubernetes version 1.5 or later - -Now, you can [upgrade your Kubernetes master](/docs/admin/cluster-management/#upgrading-a-cluster) to Kubernetes version 1.5 or later. -Note that **you should NOT upgrade Nodes at this time**, because the Pods -(that were once managed by PetSets) are now vulnerable to node failures. - -### Upgrade kubectl to Kubernetes version 1.5 or later - -Upgrade `kubectl` to Kubernetes version 1.5 or later, following [the steps for installing and setting up -kubectl](/docs/tasks/kubectl/install/). - -### Create StatefulSets - -Make sure you have both master and `kubectl` upgraded to Kubernetes version 1.5 -or later before continuing: - -```shell -kubectl version -``` - -The output is similar to this: - -```shell -Client Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.0", GitCommit:"0776eab45fe28f02bbeac0f05ae1a203051a21eb", GitTreeState:"clean", BuildDate:"2016-11-24T22:35:03Z", GoVersion:"go1.7.3", Compiler:"gc", Platform:"linux/amd64"} -Server Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.0", GitCommit:"0776eab45fe28f02bbeac0f05ae1a203051a21eb", GitTreeState:"clean", BuildDate:"2016-11-24T22:30:23Z", GoVersion:"go1.7.3", Compiler:"gc", Platform:"linux/amd64"} -``` - -If both `Client Version` (`kubectl` version) and `Server Version` (master -version) are 1.5 or later, you are good to go. - -Create StatefulSets to adopt the Pods belonging to the deleted PetSets with the -StatefulSet manifests generated in the previous step: - -```shell -kubectl create -f -``` - -Make sure all StatefulSets are created and running as expected in the -newly-upgraded cluster: - -```shell -kubectl get statefulsets --all-namespaces -``` - -### Upgrade nodes to Kubernetes version 1.5 or later (optional) - -You can now [upgrade Kubernetes nodes](/docs/admin/cluster-management/#upgrading-a-cluster) -to Kubernetes version 1.5 or later. This step is optional, but needs to be done after all StatefulSets -are created to adopt PetSets' Pods. - -You should be running Node version >= 1.1.0 to run StatefulSets safely. Older versions do not support features which allow the StatefulSet to guarantee that at any time, there is **at most** one Pod with a given identity running in a cluster. - -{% endcapture %} - -{% capture whatsnext %} - -Learn more about [scaling a StatefulSet](/docs/tasks/manage-stateful-set/scale-stateful-set/). - -{% endcapture %} - -{% include templates/task.md %} diff --git a/docs/user-guide/petset.yaml b/docs/user-guide/petset.yaml deleted file mode 100644 index 5c29237c4897d..0000000000000 --- a/docs/user-guide/petset.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx -spec: - ports: - - port: 80 - name: web - # *.nginx.default.svc.cluster.local - clusterIP: None - selector: - app: nginx ---- -apiVersion: apps/v1alpha1 -kind: PetSet -metadata: - name: web -spec: - serviceName: "nginx" - replicas: 2 - template: - metadata: - labels: - app: nginx - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - terminationGracePeriodSeconds: 0 - containers: - - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - diff --git a/docs/user-guide/petset/OWNERS b/docs/user-guide/petset/OWNERS deleted file mode 100644 index 53954192933e0..0000000000000 --- a/docs/user-guide/petset/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -approvers: -- aledbf -- bprashanth -- foxish - diff --git a/docs/user-guide/petset/bootstrapping/petset_peers.yaml b/docs/user-guide/petset/bootstrapping/petset_peers.yaml deleted file mode 100644 index 197ef9d2dca59..0000000000000 --- a/docs/user-guide/petset/bootstrapping/petset_peers.yaml +++ /dev/null @@ -1,101 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - name: nginx - labels: - app: nginx -spec: - ports: - - port: 80 - name: web - # *.nginx.default.svc.cluster.local - clusterIP: None - selector: - app: nginx ---- -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: web -spec: - serviceName: "nginx" - replicas: 2 - template: - metadata: - labels: - app: nginx - annotations: - pod.beta.kubernetes.io/init-containers: '[ - { - "name": "peerfinder", - "image": "gcr.io/google_containers/peer-finder:0.1", - "args": [ - "-on-start=\"\"echo ` - readarray PEERS; - echo $(hostname) > /usr/share/nginx/html/index.html; - if [ 1 = ${#PEERS[@]} ]; then - echo \"events{} http { server{ } }\"; - else - echo \"events{} http { server{ location / { proxy_pass http://${PEERS[0]}; } } }\"; - fi;` > /conf/nginx.conf\"\"", - "-service=nginx" - ], - "env": [ - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - } - ], - "volumeMounts": [ - { - "name": "www", - "mountPath": "/usr/share/nginx/html" - }, - { - "name": "conf", - "mountPath": "/conf" - } - ] - } - ]' - spec: - containers: - - name: nginx - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 80 - name: web - command: - - nginx - args: - - -g - - "daemon off;" - - -c - - "/conf/nginx.conf" - volumeMounts: - - name: www - mountPath: /usr/share/nginx/html - - name: conf - mountPath: /conf - volumes: - - name: conf - emptyDir: {} - volumeClaimTemplates: - - metadata: - name: www - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - diff --git a/docs/user-guide/petset/bootstrapping/petset_vm.yaml b/docs/user-guide/petset/bootstrapping/petset_vm.yaml deleted file mode 100644 index 402a17022f935..0000000000000 --- a/docs/user-guide/petset/bootstrapping/petset_vm.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# A headless service to create DNS records -apiVersion: v1 -kind: Service -metadata: - name: ub - labels: - app: ub -spec: - ports: - - port: 80 - name: web - # *.ub.default.svc.cluster.local - clusterIP: None - selector: - app: ub ---- -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: vm -spec: - serviceName: "ub" - replicas: 2 - template: - metadata: - labels: - app: ub - annotations: - pod.beta.kubernetes.io/init-containers: '[ - { - "name": "rootfs", - "image": "ubuntu:15.10", - "command": [ - "/bin/sh", - "-c", - "for d in usr lib etc; do cp -vnpr /$d/* /${d}mnt; done;" - ], - "volumeMounts": [ - { - "name": "usr", - "mountPath": "/usrmnt" - }, - { - "name": "lib", - "mountPath": "/libmnt" - }, - { - "name": "etc", - "mountPath": "/etcmnt" - } - ] - } - ]' - spec: - containers: - - name: ub - image: ubuntu:15.10 - ports: - - containerPort: 80 - name: web - command: - - /bin/sh - - -c - - 'while true; do sleep 10; done' - volumeMounts: - - name: usr - mountPath: /usr - - name: lib - mountPath: /lib - - name: etc - mountPath: /etc - volumeClaimTemplates: - - metadata: - name: usr - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 300Mi - - metadata: - name: lib - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi - - metadata: - name: etc - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 150Mi - diff --git a/skip_toc_check.txt b/skip_toc_check.txt index 46d01b0ec2978..19fbc75e87c60 100644 --- a/skip_toc_check.txt +++ b/skip_toc_check.txt @@ -96,8 +96,6 @@ docs/user-guide/namespaces.md docs/user-guide/networkpolicies.md docs/user-guide/node-selection/index.md docs/user-guide/persistent-volumes/index.md -docs/user-guide/petset.md -docs/user-guide/petset/bootstrapping/index.md docs/user-guide/pod-preset/index.md docs/user-guide/pod-security-policy/index.md docs/user-guide/pod-states.md diff --git a/test/examples_test.go b/test/examples_test.go index 011a68d4bd84e..ae9664db8c56e 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -282,7 +282,6 @@ func TestExampleObjectSchemas(t *testing.T) { "nginx-probe-deployment": {&extensions.Deployment{}}, "nginx-secure-app": {&api.Service{}, &extensions.Deployment{}}, "nginx-svc": {&api.Service{}}, - "petset": {&api.Service{}, nil}, "pod": {&api.Pod{}}, "pod-w-message": {&api.Pod{}}, "redis-deployment": {&extensions.Deployment{}},