diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 795b340ba33c3..785257142deae 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -14,9 +14,11 @@ Use the default base branch, “master”, if you're documenting existing features in the English localization. - If you're working on a different localization (not English), or you - are documenting a feature that will be part of a future release, see + If you're working on a different localization (not English), see https://kubernetes.io/docs/contribute/new-content/overview/#choose-which-git-branch-to-use for advice. + If you're documenting a feature that will be part of a future release, see + https://kubernetes.io/docs/contribute/new-content/new-features/ for advice. + --> diff --git a/Makefile b/Makefile index 523775cd173fb..e8576459a4ff8 100644 --- a/Makefile +++ b/Makefile @@ -65,10 +65,10 @@ container-image: --build-arg HUGO_VERSION=$(HUGO_VERSION) container-build: module-check - $(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo --minify + $(CONTAINER_RUN) --read-only $(CONTAINER_IMAGE) hugo --minify container-serve: module-check - $(CONTAINER_RUN) --mount type=tmpfs,destination=/src/resources,tmpfs-mode=0777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 + $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir test-examples: scripts/test_examples.sh install diff --git a/config.toml b/config.toml index 943363c9f153b..bf24f39d28f8a 100644 --- a/config.toml +++ b/config.toml @@ -33,6 +33,23 @@ enableGitInfo = true # Hindi is disabled because it's currently in development. disableLanguages = ["hi", "no"] +[caches] + [caches.assets] + dir = ":cacheDir/_gen" + maxAge = -1 + [caches.getcsv] + dir = ":cacheDir/:project" + maxAge = "60s" + [caches.getjson] + dir = ":cacheDir/:project" + maxAge = "60s" + [caches.images] + dir = ":cacheDir/_images" + maxAge = -1 + [caches.modules] + dir = ":cacheDir/modules" + maxAge = -1 + [markup] [markup.goldmark] [markup.goldmark.extensions] @@ -66,6 +83,10 @@ date = ["date", ":filename", "publishDate", "lastmod"] [permalinks] blog = "/:section/:year/:month/:day/:slug/" +[sitemap] + filename = "sitemap.xml" + priority = 0.75 + # Be explicit about the output formats. We (currently) only want an RSS feed for the home page. [outputs] home = [ "HTML", "RSS", "HEADERS" ] diff --git a/content/de/docs/tasks/tools/install-kubectl.md b/content/de/docs/tasks/tools/install-kubectl.md index d7fb7aa759a49..2354fad25f5a7 100644 --- a/content/de/docs/tasks/tools/install-kubectl.md +++ b/content/de/docs/tasks/tools/install-kubectl.md @@ -334,7 +334,7 @@ Sie müssen nun sicherstellen, dass das kubectl-Abschlussskript in allen Ihren S ``` {{< note >}} -bash-completion bezieht alle Verfollständigungsskripte aus `/etc/bash_completion.d`. +bash-completion bezieht alle Vervollständigungsskripte aus `/etc/bash_completion.d`. {{< /note >}} Beide Ansätze sind gleichwertig. Nach dem erneuten Laden der Shell sollte kubectl autocompletion funktionieren. diff --git a/content/en/_index.html b/content/en/_index.html index 452c7ea325f28..c2ec627065fde 100644 --- a/content/en/_index.html +++ b/content/en/_index.html @@ -2,6 +2,8 @@ title: "Production-Grade Container Orchestration" abstract: "Automated container deployment, scaling, and management" cid: home +sitemap: + priority: 1.0 --- {{< blocks/section id="oceanNodes" >}} diff --git a/content/en/blog/_posts/2020-10-12-steering-committee-results.md b/content/en/blog/_posts/2020-10-12-steering-committee-results.md new file mode 100644 index 0000000000000..2acbb6d6e4a7f --- /dev/null +++ b/content/en/blog/_posts/2020-10-12-steering-committee-results.md @@ -0,0 +1,43 @@ +--- +layout: blog +title: "Announcing the 2020 Steering Committee Election Results" +date: 2020-10-12 +slug: steering-committee-results-2020 +--- + +**Author**: Kaslin Fields + +The [2020 Steering Committee Election](https://github.com/kubernetes/community/tree/master/events/elections/2020) is now complete. In 2019, the committee arrived at its final allocation of 7 seats, 3 of which were up for election in 2020. Incoming committee members serve a term of 2 years, and all members are elected by the Kubernetes Community. + +This community body is significant since it oversees the governance of the entire Kubernetes project. With that great power comes great responsibility. You can learn more about the steering committee’s role in their [charter](https://github.com/kubernetes/steering/blob/master/charter.md). + +## Results + +Congratulations to the elected committee members whose two year terms begin immediately (listed in alphabetical order by GitHub handle): + +* **Davanum Srinivas ([@dims](https://github.com/dims)), VMware** +* **Jordan Liggitt ([@liggitt](https://github.com/liggitt)), Google** +* **Bob Killen ([@mrbobbytables](https://github.com/mrbobbytables)), Google** + +They join continuing members Christoph Blecker ([@cblecker](https://github.com/cblecker)), Red Hat; Derek Carr ([@derekwaynecarr](https://github.com/derekwaynecarr)), Red Hat; Nikhita Raghunath ([@nikhita](https://github.com/nikhita)), VMware; and Paris Pittman ([@parispittman](https://github.com/parispittman)), Apple. Davanum Srinivas is returning for his second term on the committee. + +## Big Thanks! + +* Thank you and congratulations on a successful election to this round’s election officers: + * Jaice Singer DuMars ([@jdumars](https://github.com/jdumars)), Apple + * Ihor Dvoretskyi ([@idvoretskyi](https://github.com/idvoretskyi)), CNCF + * Josh Berkus ([@jberkus](https://github.com/jberkus)), Red Hat +* Thanks to the Emeritus Steering Committee Members. Your prior service is appreciated by the community: + * Aaron Crickenberger ([@spiffxp](https://github.com/spiffxp)), Google + * and Lachlan Evenson([@lachie8e)](https://github.com/lachie8e)), Microsoft +* And thank you to all the candidates who came forward to run for election. As [Jorge Castro put it](https://twitter.com/castrojo/status/1315718627639820288?s=20): we are spoiled with capable, kind, and selfless volunteers who put the needs of the project first. + +## Get Involved with the Steering Committee + +This governing body, like all of Kubernetes, is open to all. You can follow along with Steering Committee [backlog items](https://github.com/kubernetes/steering/projects/1) and weigh in by filing an issue or creating a PR against their [repo](https://github.com/kubernetes/steering). They have an open meeting on [the first Monday of the month at 6pm UTC](https://github.com/kubernetes/steering) and regularly attend Meet Our Contributors. They can also be contacted at their public mailing list steering@kubernetes.io. + +You can see what the Steering Committee meetings are all about by watching past meetings on the [YouTube Playlist](https://www.youtube.com/playlist?list=PL69nYSiGNLP1yP1B_nd9-drjoxp0Q14qM). + +---- + +_This post was written by the [Upstream Marketing Working Group](https://github.com/kubernetes/community/tree/master/communication/marketing-team#contributor-marketing). If you want to write stories about the Kubernetes community, learn more about us._ diff --git a/content/en/docs/_index.md b/content/en/docs/_index.md index e06ebf76a5fea..dc42c2d1581b3 100644 --- a/content/en/docs/_index.md +++ b/content/en/docs/_index.md @@ -1,4 +1,6 @@ --- linktitle: Kubernetes Documentation title: Documentation +sitemap: + priority: 1.0 --- diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index b1e1f0dc24827..180b4ca9c46e7 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -261,7 +261,7 @@ a Lease object. #### Reliability - In most cases, node controller limits the eviction rate to + In most cases, the node controller limits the eviction rate to `--node-eviction-rate` (default 0.1) per second, meaning it won't evict pods from more than 1 node per 10 seconds. diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index efbef476bca48..d6abd186b9206 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -115,7 +115,8 @@ metadata: spec: containers: - name: demo - image: game.example/demo-game + image: alpine + command: ["sleep", "3600"] env: # Define the environment variable - name: PLAYER_INITIAL_LIVES # Notice that the case is different here diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index 9eb31cb91575e..d267b83dd2ccf 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -47,6 +47,13 @@ Limits can be implemented either reactively (the system intervenes once it sees or by enforcement (the system prevents the container from ever exceeding the limit). Different runtimes can have different ways to implement the same restrictions. +{{< note >}} +If a Container specifies its own memory limit, but does not specify a memory request, Kubernetes +automatically assigns a memory request that matches the limit. Similarly, if a Container specifies its own +CPU limit, but does not specify a CPU request, Kubernetes automatically assigns a CPU request that matches +the limit. +{{< /note >}} + ## Resource types *CPU* and *memory* are each a *resource type*. A resource type has a base unit. diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 09d5530a29244..fa74d94221cef 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -38,7 +38,7 @@ No parameters are passed to the handler. This hook is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention and others. A call to the preStop hook fails if the container is already in terminated or completed state. It is blocking, meaning it is synchronous, -so it must complete before the call to delete the container can be sent. +so it must complete before the signal to stop the container can be sent. No parameters are passed to the handler. A more detailed description of the termination behavior can be found in @@ -56,7 +56,8 @@ Resources consumed by the command are counted against the Container. ### Hook handler execution When a Container lifecycle management hook is called, -the Kubernetes management system executes the handler in the Container registered for that hook.  +the Kubernetes management system execute the handler according to the hook action, +`exec` and `tcpSocket` are executed in the container, and `httpGet` is executed by the kubelet process. Hook handler calls are synchronous within the context of the Pod containing the Container. This means that for a `PostStart` hook, @@ -64,10 +65,21 @@ the Container ENTRYPOINT and hook fire asynchronously. However, if the hook takes too long to run or hangs, the Container cannot reach a `running` state. -The behavior is similar for a `PreStop` hook. -If the hook hangs during execution, -the Pod phase stays in a `Terminating` state and is killed after `terminationGracePeriodSeconds` of pod ends. -If a `PostStart` or `PreStop` hook fails, +`PreStop` hooks are not executed asynchronously from the signal +to stop the Container; the hook must complete its execution before +the signal can be sent. +If a `PreStop` hook hangs during execution, +the Pod's phase will be `Terminating` and remain there until the Pod is +killed after its `terminationGracePeriodSeconds` expires. +This grace period applies to the total time it takes for both +the `PreStop` hook to execute and for the Container to stop normally. +If, for example, `terminationGracePeriodSeconds` is 60, and the hook +takes 55 seconds to complete, and the Container takes 10 seconds to stop +normally after receiving the signal, then the Container will be killed +before it can stop normally, since `terminationGracePeriodSeconds` is +less than the total time (55+10) it takes for these two things to happen. + +If either a `PostStart` or `PreStop` hook fails, it kills the Container. Users should make their hook handlers as lightweight as possible. @@ -121,4 +133,3 @@ Events: * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). - diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index b32bce83dd1f4..0384754e35521 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -11,21 +11,17 @@ weight: 10 -{{< feature-state state="alpha" >}} -{{< caution >}}Alpha features can change rapidly. {{< /caution >}} - Network plugins in Kubernetes come in a few flavors: -* CNI plugins: adhere to the appc/CNI specification, designed for interoperability. +* CNI plugins: adhere to the [Container Network Interface](https://github.com/containernetworking/cni) (CNI) specification, designed for interoperability. + * Kubernetes follows the [v0.4.0](https://github.com/containernetworking/cni/blob/spec-v0.4.0/SPEC.md) release of the CNI specification. * Kubenet plugin: implements basic `cbr0` using the `bridge` and `host-local` CNI plugins - - ## Installation -The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it finds, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for Docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins: +The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it finds, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for Docker, as CRI manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins: * `cni-bin-dir`: Kubelet probes this directory for plugins on startup * `network-plugin`: The network plugin to use from `cni-bin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply "cni". @@ -166,9 +162,4 @@ This option is provided to the network-plugin; currently **only kubenet supports * `--network-plugin=kubenet` specifies that we use the `kubenet` network plugin with CNI `bridge` and `host-local` plugins placed in `/opt/cni/bin` or `cni-bin-dir`. * `--network-plugin-mtu=9001` specifies the MTU to use, currently only used by the `kubenet` network plugin. - - ## {{% heading "whatsnext" %}} - - - diff --git a/content/en/docs/concepts/overview/_index.md b/content/en/docs/concepts/overview/_index.md index a52c47044685f..fb6351ec67756 100755 --- a/content/en/docs/concepts/overview/_index.md +++ b/content/en/docs/concepts/overview/_index.md @@ -2,4 +2,6 @@ title: "Overview" weight: 20 description: Get a high-level outline of Kubernetes and the components it is built from. +sitemap: + priority: 0.9 --- diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index 4338c932d5e47..7580287fb3428 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -41,6 +41,7 @@ The Kubernetes API server serves an OpenAPI spec via the `/openapi/v2` endpoint. You can request the response format using request headers as follows: + @@ -68,7 +69,6 @@ You can request the response format using request headers as follows: -
Valid request header values for OpenAPI v2 queries
Headerserves application/json
Valid request header values for OpenAPI v2 queries
Kubernetes implements an alternative Protobuf based serialization format that @@ -102,13 +102,22 @@ to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling access to end-of-life and/or experimental APIs. -Refer to [API versions reference](/docs/reference/using-api/api-overview/#api-versioning) -for more details on the API version level definitions. - To make it easier to evolve and to extend its API, Kubernetes implements [API groups](/docs/reference/using-api/api-overview/#api-groups) that can be [enabled or disabled](/docs/reference/using-api/api-overview/#enabling-or-disabling). +API resources are distinguished by their API group, resource type, namespace +(for namespaced resources), and name. The API server may serve the same +underlying data through multiple API version and handle the conversion between +API versions transparently. All these different versions are actually +representations of the same resource. For example, suppose there are two +versions `v1` and `v1beta1` for the same resource. An object created by the +`v1beta1` version can then be read, updated, and deleted by either the +`v1beta1` or the `v1` versions. + +Refer to [API versions reference](/docs/reference/using-api/api-overview/#api-versioning) +for more details on the API version level definitions. + ## API Extension The Kubernetes API can be extended in one of two ways: diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index 418ee3d64481b..6df252ede2b9b 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -10,6 +10,8 @@ weight: 10 card: name: concepts weight: 10 +sitemap: + priority: 0.9 --- diff --git a/content/en/docs/concepts/overview/working-with-objects/namespaces.md b/content/en/docs/concepts/overview/working-with-objects/namespaces.md index 004c18ad2cd2f..f078cb86360d8 100644 --- a/content/en/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/en/docs/concepts/overview/working-with-objects/namespaces.md @@ -28,9 +28,6 @@ resource can only be in one namespace. Namespaces are a way to divide cluster resources between multiple users (via [resource quota](/docs/concepts/policy/resource-quotas/)). -In future versions of Kubernetes, objects in the same namespace will have the same -access control policies by default. - It is not necessary to use multiple namespaces just to separate slightly different resources, such as different versions of the same software: use [labels](/docs/concepts/overview/working-with-objects/labels) to distinguish diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index 9d6ab31889cdd..66ec279ad8495 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -20,7 +20,7 @@ The kube-scheduler can be configured to enable bin packing of resources along wi ## Enabling Bin Packing using RequestedToCapacityRatioResourceAllocation -Before Kubernetes 1.15, Kube-scheduler used to allow scoring nodes based on the request to capacity ratio of primary resources like CPU and Memory. Kubernetes 1.16 added a new parameter to the priority function that allows the users to specify the resources along with weights for each resource to score nodes based on the request to capacity ratio. This allows users to bin pack extended resources by using appropriate parameters improves the utilization of scarce resources in large clusters. The behavior of the `RequestedToCapacityRatioResourceAllocation` priority function can be controlled by a configuration option called `requestedToCapacityRatioArguments`. This argument consists of two parameters `shape` and `resources`. Shape allows the user to tune the function as least requested or most requested based on `utilization` and `score` values. Resources +Before Kubernetes 1.15, Kube-scheduler used to allow scoring nodes based on the request to capacity ratio of primary resources like CPU and Memory. Kubernetes 1.16 added a new parameter to the priority function that allows the users to specify the resources along with weights for each resource to score nodes based on the request to capacity ratio. This allows users to bin pack extended resources by using appropriate parameters and improves the utilization of scarce resources in large clusters. The behavior of the `RequestedToCapacityRatioResourceAllocation` priority function can be controlled by a configuration option called `requestedToCapacityRatioArguments`. This argument consists of two parameters `shape` and `resources`. Shape allows the user to tune the function as least requested or most requested based on `utilization` and `score` values. Resources consists of `name` which specifies the resource to be considered during scoring and `weight` specify the weight of each resource. Below is an example configuration that sets `requestedToCapacityRatioArguments` to bin packing behavior for extended resources `intel.com/foo` and `intel.com/bar` diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index a4b821104334c..1a55acf3d81bf 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -181,7 +181,7 @@ When you set `setHostnameAsFQDN: true` in the Pod spec, the kubelet writes the P {{< note >}} In Linux, the hostname field of the kernel (the `nodename` field of `struct utsname`) is limited to 64 characters. -If a Pod enables this feature and its FQDN is longer than 64 character, it will fail to start. The Pod will remain in `Pending` status (`ContainerCreating` as seen by `kubectl`) generating error events, such as Failed to construct FQDN from pod hostname and cluster domain, FQDN `long-FDQN` is too long (64 characters is the max, 70 characters requested). One way of improving user experience for this scenario is to create an [admission webhook controller](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) to control FQDN size when users create top level objects, for example, Deployment. +If a Pod enables this feature and its FQDN is longer than 64 character, it will fail to start. The Pod will remain in `Pending` status (`ContainerCreating` as seen by `kubectl`) generating error events, such as Failed to construct FQDN from pod hostname and cluster domain, FQDN `long-FQDN` is too long (64 characters is the max, 70 characters requested). One way of improving user experience for this scenario is to create an [admission webhook controller](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) to control FQDN size when users create top level objects, for example, Deployment. {{< /note >}} ### Pod's DNS Policy diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index ded1451d801f9..83b7850364d98 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -881,6 +881,10 @@ There are other annotations to manage Classic Elastic Load Balancers that are de # health check. This value must be less than the service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval # value. Defaults to 5, must be between 2 and 60 + service.beta.kubernetes.io/aws-load-balancer-security-groups: "sg-53fae93f" + # A list of existing security groups to be added to ELB created. Unlike the annotation + # service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other security groups previously assigned to the ELB. + service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" # A list of additional security groups to be added to the ELB diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index b73f78712ecbd..3eedc64d57ff8 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -13,7 +13,7 @@ weight: 10 -A _Deployment_ provides declarative updates for {{< glossary_tooltip text="Pods" term_id="pod" >}} +A _Deployment_ provides declarative updates for {{< glossary_tooltip text="Pods" term_id="pod" >}} and {{< glossary_tooltip term_id="replica-set" text="ReplicaSets" >}}. You describe a _desired state_ in a Deployment, and the Deployment {{< glossary_tooltip term_id="controller" >}} changes the actual state to the desired state at a controlled rate. You can define Deployments to create new ReplicaSets, or to remove existing Deployments and adopt all their resources with new Deployments. @@ -102,7 +102,7 @@ Follow the steps given below to create the above Deployment: The output is similar to: ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` 4. Run the `kubectl get deployments` again a few seconds later. @@ -205,7 +205,7 @@ Follow the steps given below to update your Deployment: ``` or ``` - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` Get more details on your updated Deployment: @@ -857,7 +857,7 @@ kubectl rollout status deployment.v1.apps/nginx-deployment The output is similar to this: ``` Waiting for rollout to finish: 2 of 3 updated replicas are available... -deployment.apps/nginx-deployment successfully rolled out +deployment "nginx-deployment" successfully rolled out ``` and the exit status from `kubectl rollout` is 0 (success): ```shell diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 123eae830ea50..f0ac6e654be7d 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -13,7 +13,8 @@ of its primary containers starts OK, and then through either the `Succeeded` or Whilst a Pod is running, the kubelet is able to restart containers to handle some kind of faults. Within a Pod, Kubernetes tracks different container -[states](#container-states) and handles +[states](#container-states) and determines what action to take to make the Pod +healthy again. In the Kubernetes API, Pods have both a specification and an actual status. The status for a Pod object consists of a set of [Pod conditions](#pod-conditions). @@ -32,7 +33,7 @@ Like individual application containers, Pods are considered to be relatively ephemeral (rather than durable) entities. Pods are created, assigned a unique ID ([UID](/docs/concepts/overview/working-with-objects/names/#uids)), and scheduled to nodes where they remain until termination (according to restart policy) or -deletion. +deletion. If a {{< glossary_tooltip term_id="node" >}} dies, the Pods scheduled to that node are [scheduled for deletion](#pod-garbage-collection) after a timeout period. @@ -140,9 +141,8 @@ and Never. The default value is Always. The `restartPolicy` applies to all containers in the Pod. `restartPolicy` only refers to restarts of the containers by the kubelet on the same node. After containers in a Pod exit, the kubelet restarts them with an exponential back-off delay (10s, 20s, -40s, …), that is capped at five minutes. Once a container has executed with no problems -for 10 minutes without any problems, the kubelet resets the restart backoff timer for -that container. +40s, …), that is capped at five minutes. Once a container has executed for 10 minutes +without any problems, the kubelet resets the restart backoff timer forthat container. ## Pod conditions diff --git a/content/en/docs/contribute/_index.md b/content/en/docs/contribute/_index.md index 8616f77afb5e6..fb068856b67de 100644 --- a/content/en/docs/contribute/_index.md +++ b/content/en/docs/contribute/_index.md @@ -13,6 +13,13 @@ card: +*Kubernetes welcomes improvements from all contributors, new and experienced!* + +{{< note >}} +To learn more about contributing to Kubernetes in general, see the +[contributor documentation](https://www.kubernetes.dev/docs/). +{{< /note >}} + This website is maintained by [Kubernetes SIG Docs](/docs/contribute/#get-involved-with-sig-docs). Kubernetes documentation contributors: @@ -22,8 +29,6 @@ Kubernetes documentation contributors: - Translate the documentation - Manage and publish the documentation parts of the Kubernetes release cycle -Kubernetes documentation welcomes improvements from all contributors, new and experienced! - ## Getting started diff --git a/content/en/docs/contribute/generate-ref-docs/kubectl.md b/content/en/docs/contribute/generate-ref-docs/kubectl.md index 80552144dd24c..b216a0a5b7bb0 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubectl.md +++ b/content/en/docs/contribute/generate-ref-docs/kubectl.md @@ -230,11 +230,9 @@ Build the Kubernetes documentation in your local ``. ```shell cd -make docker-serve +git submodule update --init --recursive --depth 1 # if not already done +make container-serve ``` -{{< note >}} -The use of `make docker-serve` is deprecated. Please use `make container-serve` instead. -{{< /note >}} View the [local preview](https://localhost:1313/docs/reference/generated/kubectl/kubectl-commands/). diff --git a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md index f2ec01d8e84c2..251dfe2efed4f 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -182,13 +182,10 @@ Verify the [local preview](http://localhost:1313/docs/reference/generated/kubern ```shell cd -make docker-serve +git submodule update --init --recursive --depth 1 # if not already done +make container-serve ``` -{{< note >}} -The use of `make docker-serve` is deprecated. Please use `make container-serve` instead. -{{< /note >}} - ## Commit the changes In `` run `git add` and `git commit` to commit the change. diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index 74c4f8e091468..5e91d86ee8932 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -73,7 +73,9 @@ For an example of adding a label, see the PR for adding the [Italian language la ### Find community -Let Kubernetes SIG Docs know you're interested in creating a localization! Join the [SIG Docs Slack channel](https://kubernetes.slack.com/messages/C1J0BPD2M/). Other localization teams are happy to help you get started and answer any questions you have. +Let Kubernetes SIG Docs know you're interested in creating a localization! Join the [SIG Docs Slack channel](https://kubernetes.slack.com/messages/sig-docs) and the [SIG Docs Localizations Slack channel](https://kubernetes.slack.com/messages/sig-docs-localizations). Other localization teams are happy to help you get started and answer any questions you have. + +Please also consider participating in the [SIG Docs Localization Subgroup meeting](https://github.com/kubernetes/community/tree/master/sig-docs). The mission of the SIG Docs localization subgroup is to work across the SIG Docs localization teams to collaborate on defining and documenting the processes for creating localized contribution guides. In addition, the SIG Docs localization subgroup will look for opportunities for the creation and sharing of common tools across localization teams and also serve to identify new requirements to the SIG Docs Leadership team. If you have questions about this meeting, please inquire on the [SIG Docs Localizations Slack channel](https://kubernetes.slack.com/messages/sig-docs-localizations). You can also create a Slack channel for your localization in the `kubernetes/community` repository. For an example of adding a Slack channel, see the PR for [adding a channel for Persian](https://github.com/kubernetes/community/pull/4980). diff --git a/content/en/docs/contribute/new-content/new-features.md b/content/en/docs/contribute/new-content/new-features.md index 54db84da8f6c8..98823185ff0b4 100644 --- a/content/en/docs/contribute/new-content/new-features.md +++ b/content/en/docs/contribute/new-content/new-features.md @@ -98,7 +98,8 @@ deadlines. 1. Open a pull request against the `dev-{{< skew nextMinorVersion >}}` branch in the `kubernetes/website` repository, with a small commit that you will amend later. -2. Use the Prow command `/milestone {{< skew nextMinorVersion >}}` to +2. Edit the pull request description to include links to `k/k` PR(s) and `k/enhancement` issue(s). +3. Use the Prow command `/milestone {{< skew nextMinorVersion >}}` to assign the PR to the relevant milestone. This alerts the docs person managing this release that the feature docs are coming. diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index efdc9026aa8ea..a97dca823f17e 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -414,6 +414,8 @@ Webhook authentication is a hook for verifying bearer tokens. * `--authentication-token-webhook-config-file` a configuration file describing how to access the remote webhook service. * `--authentication-token-webhook-cache-ttl` how long to cache authentication decisions. Defaults to two minutes. +* `--authentication-token-webhook-version` determines whether to use `authentication.k8s.io/v1beta1` or `authentication.k8s.io/v1` + `TokenReview` objects to send/receive information from the webhook. Defaults to `v1beta1`. The configuration file uses the [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file format. Within the file, `clusters` refers to the remote service and @@ -447,72 +449,167 @@ contexts: name: webhook ``` -When a client attempts to authenticate with the API server using a bearer token -as discussed [above](#putting-a-bearer-token-in-a-request), -the authentication webhook POSTs a JSON-serialized `authentication.k8s.io/v1beta1` `TokenReview` object containing the token -to the remote service. Kubernetes will not challenge a request that lacks such a header. +When a client attempts to authenticate with the API server using a bearer token as discussed [above](#putting-a-bearer-token-in-a-request), +the authentication webhook POSTs a JSON-serialized `TokenReview` object containing the token to the remote service. -Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) -as other Kubernetes API objects. Implementers should be aware of looser -compatibility promises for beta objects and check the "apiVersion" field of the -request to ensure correct deserialization. Additionally, the API server must -enable the `authentication.k8s.io/v1beta1` API extensions group (`--runtime-config=authentication.k8s.io/v1beta1=true`). +Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) as other Kubernetes API objects. +Implementers should check the `apiVersion` field of the request to ensure correct deserialization, +and **must** respond with a `TokenReview` object of the same version as the request. -The POST body will be of the following format: +{{< tabs name="TokenReview_request" >}} +{{% tab name="authentication.k8s.io/v1" %}} +{{< note >}} +The Kubernetes API server defaults to sending `authentication.k8s.io/v1beta1` token reviews for backwards compatibility. +To opt into receiving `authentication.k8s.io/v1` token reviews, the API server must be started with `--authentication-token-webhook-version=v1`. +{{< /note >}} -```json +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "spec": { + # Opaque bearer token sent to the API server + "token": "014fbff9a07c...", + + # Optional list of the audience identifiers for the server the token was presented to. + # Audience-aware token authenticators (for example, OIDC token authenticators) + # should verify the token was intended for at least one of the audiences in this list, + # and return the intersection of this list and the valid audiences for the token in the response status. + # This ensures the token is valid to authenticate to the server it was presented to. + # If no audiences are provided, the token should be validated to authenticate to the Kubernetes API server. + "audiences": ["https://myserver.example.com", "https://myserver.internal.example.com"] + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml { "apiVersion": "authentication.k8s.io/v1beta1", "kind": "TokenReview", "spec": { - "token": "(BEARERTOKEN)" + # Opaque bearer token sent to the API server + "token": "014fbff9a07c...", + + # Optional list of the audience identifiers for the server the token was presented to. + # Audience-aware token authenticators (for example, OIDC token authenticators) + # should verify the token was intended for at least one of the audiences in this list, + # and return the intersection of this list and the valid audiences for the token in the response status. + # This ensures the token is valid to authenticate to the server it was presented to. + # If no audiences are provided, the token should be validated to authenticate to the Kubernetes API server. + "audiences": ["https://myserver.example.com", "https://myserver.internal.example.com"] } } ``` +{{% /tab %}} +{{< /tabs >}} -The remote service is expected to fill the `status` field of -the request to indicate the success of the login. The response body's `spec` -field is ignored and may be omitted. A successful validation of the bearer -token would return: +The remote service is expected to fill the `status` field of the request to indicate the success of the login. +The response body's `spec` field is ignored and may be omitted. +The remote service must return a response using the same `TokenReview` API version that it received. +A successful validation of the bearer token would return: -```json +{{< tabs name="TokenReview_response_success" >}} +{{% tab name="authentication.k8s.io/v1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "status": { + "authenticated": true, + "user": { + # Required + "username": "janedoe@example.com", + # Optional + "uid": "42", + # Optional group memberships + "groups": ["developers", "qa"], + # Optional additional information provided by the authenticator. + # This should not contain confidential data, as it can be recorded in logs + # or API objects, and is made available to admission webhooks. + "extra": { + "extrafield1": [ + "extravalue1", + "extravalue2" + ] + } + }, + # Optional list audience-aware token authenticators can return, + # containing the audiences from the `spec.audiences` list for which the provided token was valid. + # If this is omitted, the token is considered to be valid to authenticate to the Kubernetes API server. + "audiences": ["https://myserver.example.com"] + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml { "apiVersion": "authentication.k8s.io/v1beta1", "kind": "TokenReview", "status": { "authenticated": true, "user": { + # Required "username": "janedoe@example.com", + # Optional "uid": "42", - "groups": [ - "developers", - "qa" - ], + # Optional group memberships + "groups": ["developers", "qa"], + # Optional additional information provided by the authenticator. + # This should not contain confidential data, as it can be recorded in logs + # or API objects, and is made available to admission webhooks. "extra": { "extrafield1": [ "extravalue1", "extravalue2" ] } - } + }, + # Optional list audience-aware token authenticators can return, + # containing the audiences from the `spec.audiences` list for which the provided token was valid. + # If this is omitted, the token is considered to be valid to authenticate to the Kubernetes API server. + "audiences": ["https://myserver.example.com"] } } ``` +{{% /tab %}} +{{< /tabs >}} An unsuccessful request would return: -```json +{{< tabs name="TokenReview_response_error" >}} +{{% tab name="authentication.k8s.io/v1" %}} +```yaml +{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenReview", + "status": { + "authenticated": false, + # Optionally include details about why authentication failed. + # If no error is provided, the API will return a generic Unauthorized message. + # The error field is ignored when authenticated=true. + "error": "Credentials are expired" + } +} +``` +{{% /tab %}} +{{% tab name="authentication.k8s.io/v1beta1" %}} +```yaml { "apiVersion": "authentication.k8s.io/v1beta1", "kind": "TokenReview", "status": { - "authenticated": false + "authenticated": false, + # Optionally include details about why authentication failed. + # If no error is provided, the API will return a generic Unauthorized message. + # The error field is ignored when authenticated=true. + "error": "Credentials are expired" } } ``` - -HTTP status codes can be used to supply additional error context. - +{{% /tab %}} +{{< /tabs >}} ### Authenticating Proxy diff --git a/content/en/docs/reference/access-authn-authz/authorization.md b/content/en/docs/reference/access-authn-authz/authorization.md index db668f818acda..7a251726fbc1b 100644 --- a/content/en/docs/reference/access-authn-authz/authorization.md +++ b/content/en/docs/reference/access-authn-authz/authorization.md @@ -52,7 +52,7 @@ Kubernetes reviews only the following API request attributes: * **Resource** - The ID or name of the resource that is being accessed (for resource requests only) -- For resource requests using `get`, `update`, `patch`, and `delete` verbs, you must provide the resource name. * **Subresource** - The subresource that is being accessed (for resource requests only). * **Namespace** - The namespace of the object that is being accessed (for namespaced resource requests only). - * **API group** - The {{< glossary_tooltip text="API Group" term_id="api-group" >}} being accessed (for resource requests only). An empty string designates the [core API group](/docs/concepts/overview/kubernetes-api/). + * **API group** - The {{< glossary_tooltip text="API Group" term_id="api-group" >}} being accessed (for resource requests only). An empty string designates the [core API group](/docs/reference/using-api/api-overview/#api-groups). ## Determine the Request Verb diff --git a/content/en/docs/reference/glossary/rkt.md b/content/en/docs/reference/glossary/rkt.md deleted file mode 100644 index 165bce5406130..0000000000000 --- a/content/en/docs/reference/glossary/rkt.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: rkt -id: rkt -date: 2019-01-24 -full_link: https://coreos.com/rkt/ -short_description: > - A security-minded, standards-based container engine. - -aka: -tags: -- security -- tool ---- - A security-minded, standards-based container engine. - - - -rkt is an application {{< glossary_tooltip text="container" term_id="container" >}} engine featuring a {{< glossary_tooltip text="Pod" term_id="pod" >}}-native approach, a pluggable execution environment, and a well-defined surface area. rkt allows users to apply different configurations at both the Pod and application level. Each Pod executes directly in the classic Unix process model, in a self-contained, isolated environment. diff --git a/content/en/docs/reference/scheduling/config.md b/content/en/docs/reference/scheduling/config.md index 530e881cbd84f..0dca862fb9f87 100644 --- a/content/en/docs/reference/scheduling/config.md +++ b/content/en/docs/reference/scheduling/config.md @@ -20,10 +20,7 @@ by implementing one or more of these extension points. You can specify scheduling profiles by running `kube-scheduler --config `, using the component config APIs -([`v1alpha1`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha1?tab=doc#KubeSchedulerConfiguration) -or [`v1alpha2`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha2?tab=doc#KubeSchedulerConfiguration)). -The `v1alpha2` API allows you to configure kube-scheduler to run -[multiple profiles](#multiple-profiles). +([`v1beta1`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.19.0/config/v1beta1?tab=doc#KubeSchedulerConfiguration)). A minimal configuration looks as follows: diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index 21a6e628a8716..f84c62c01d20f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -12,6 +12,14 @@ weight: 90 from the community. Please try it out and give us feedback! {{< /caution >}} +## kubeadm alpha certs {#cmd-certs} + +A collection of operations for operating Kubernetes certificates. + +{{< tabs name="tab-certs" >}} +{{< tab name="overview" include="generated/kubeadm_alpha_certs.md" />}} +{{< /tabs >}} + ## kubeadm alpha certs renew {#cmd-certs-renew} You can renew all Kubernetes certificates using the `all` subcommand or renew them selectively. @@ -42,6 +50,15 @@ to enable the automatic copy of certificates when joining additional control-pla {{< tab name="certificate-key" include="generated/kubeadm_alpha_certs_certificate-key.md" />}} {{< /tabs >}} +## kubeadm alpha certs generate-csr {#cmd-certs-generate-csr} + +This command can be used to generate certificate signing requests (CSRs) which +can be submitted to a certificate authority (CA) for signing. + +{{< tabs name="tab-certs-generate-csr" >}} +{{< tab name="certificate-generate-csr" include="generated/kubeadm_alpha_certs_generate-csr.md" />}} +{{< /tabs >}} + ## kubeadm alpha certs check-expiration {#cmd-certs-check-expiration} This command checks expiration for the certificates in the local PKI managed by kubeadm. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index 655f9ec875f14..23dff658e9eee 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -16,6 +16,10 @@ You can use `kubeadm config print` to print the default configuration and `kubea convert your old configuration files to a newer version. `kubeadm config images list` and `kubeadm config images pull` can be used to list and pull the images that kubeadm requires. +For more information navigate to +[Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) +or [Using kubeadm join with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-join/#config-file). + In Kubernetes v1.13.0 and later to list/pull kube-dns images instead of the CoreDNS image the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) has to be used. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 289767e1e13cf..21ab7a863d73f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -119,6 +119,17 @@ Use the following phase to configure bootstrap tokens. {{< tab name="bootstrap-token" include="generated/kubeadm_init_phase_bootstrap-token.md" />}} {{< /tabs >}} +## kubeadm init phase kubelet-finialize {#cmd-phase-kubelet-finalize-all} + +Use the following phase to update settings relevant to the kubelet after TLS +bootstrap. You can use the `all` subcommand to run all `kubelet-finalize` +phases. + +{{< tabs name="tab-kubelet-finalize" >}} +{{< tab name="kublet-finalize" include="generated/kubeadm_init_phase_kubelet-finalize.md" />}} +{{< tab name="kublet-finalize-all" include="generated/kubeadm_init_phase_kubelet-finalize_all.md" />}} +{{< tab name="kublet-finalize-cert-rotation" include="generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md" />}} +{{< /tabs >}} ## kubeadm init phase addon {#cmd-phase-addon} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 997240399e435..7a210ba5de257 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -114,16 +114,18 @@ The config file is still considered beta and may change in future versions. It's possible to configure `kubeadm init` with a configuration file instead of command line flags, and some more advanced features may only be available as -configuration file options. This file is passed with the `--config` option. +configuration file options. This file is passed using the `--config` flag and it must +contain a `ClusterConfiguration` structure and optionally more structures separated by `---\n` +Mixing `--config` with others flags may not be allowed in some cases. The default configuration can be printed out using the [kubeadm config print](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -It is **recommended** that you migrate your old `v1beta1` configuration to `v1beta2` using +If your configuration is not using the latest version it is **recommended** that you migrate using the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -For more details on each field in the `v1beta2` configuration you can navigate to our -[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2). +For more information on the fields and usage of the configuration you can navigate to our API reference +page and pick a version from [the list](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories). ### Adding kube-proxy parameters {#kube-proxy} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 28d489cfb6ae9..0a39f709273f4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -273,15 +273,17 @@ The config file is still considered beta and may change in future versions. It's possible to configure `kubeadm join` with a configuration file instead of command line flags, and some more advanced features may only be available as configuration file options. This file is passed using the `--config` flag and it must -contain a `JoinConfiguration` structure. +contain a `JoinConfiguration` structure. Mixing `--config` with others flags may not be +allowed in some cases. -To print the default values of `JoinConfiguration` run the following command: +The default configuration can be printed out using the +[kubeadm config print](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -```shell -kubeadm config print join-defaults -``` +If your configuration is not using the latest version it is **recommended** that you migrate using +the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. -For details on individual fields in `JoinConfiguration` see [the godoc](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#JoinConfiguration). +For more information on the fields and usage of the configuration you can navigate to our API reference +page and pick a version from [the list](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories). ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md index a7f4b6d1a6468..1f712f912cbc3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md @@ -15,6 +15,7 @@ be called on a primary control-plane node. {{< tabs name="tab-phase" >}} {{< tab name="phase" include="generated/kubeadm_upgrade_node_phase.md" />}} +{{< tab name="preflight" include="generated/kubeadm_upgrade_node_phase_preflight.md" />}} {{< tab name="control-plane" include="generated/kubeadm_upgrade_node_phase_control-plane.md" />}} {{< tab name="kubelet-config" include="generated/kubeadm_upgrade_node_phase_kubelet-config.md" />}} {{< /tabs >}} diff --git a/content/en/docs/setup/best-practices/multiple-zones.md b/content/en/docs/setup/best-practices/multiple-zones.md index 7c2622641b865..501e9546428bf 100644 --- a/content/en/docs/setup/best-practices/multiple-zones.md +++ b/content/en/docs/setup/best-practices/multiple-zones.md @@ -4,401 +4,141 @@ reviewers: - justinsb - quinton-hoole title: Running in multiple zones -weight: 10 +weight: 20 content_type: concept --- -This page describes how to run a cluster in multiple zones. - - +This page describes running Kubernetes across multiple zones. -## Introduction - -Kubernetes 1.2 adds support for running a single cluster in multiple failure zones -(GCE calls them simply "zones", AWS calls them "availability zones", here we'll refer to them as "zones"). -This is a lightweight version of a broader Cluster Federation feature (previously referred to by the affectionate -nickname ["Ubernetes"](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md)). -Full Cluster Federation allows combining separate -Kubernetes clusters running in different regions or cloud providers -(or on-premises data centers). However, many -users simply want to run a more available Kubernetes cluster in multiple zones -of their single cloud provider, and this is what the multizone support in 1.2 allows -(this previously went by the nickname "Ubernetes Lite"). - -Multizone support is deliberately limited: a single Kubernetes cluster can run -in multiple zones, but only within the same region (and cloud provider). Only -GCE and AWS are currently supported automatically (though it is easy to -add similar support for other clouds or even bare metal, by simply arranging -for the appropriate labels to be added to nodes and volumes). - - -## Functionality - -When nodes are started, the kubelet automatically adds labels to them with -zone information. - -Kubernetes will automatically spread the pods in a replication controller -or service across nodes in a single-zone cluster (to reduce the impact of -failures.) With multiple-zone clusters, this spreading behavior is -extended across zones (to reduce the impact of zone failures.) (This is -achieved via `SelectorSpreadPriority`). This is a best-effort -placement, and so if the zones in your cluster are heterogeneous -(e.g. different numbers of nodes, different types of nodes, or -different pod resource requirements), this might prevent perfectly -even spreading of your pods across zones. If desired, you can use -homogeneous zones (same number and types of nodes) to reduce the -probability of unequal spreading. - -When persistent volumes are created, the `PersistentVolumeLabel` -admission controller automatically adds zone labels to them. The scheduler (via the -`VolumeZonePredicate` predicate) will then ensure that pods that claim a -given volume are only placed into the same zone as that volume, as volumes -cannot be attached across zones. - -## Limitations - -There are some important limitations of the multizone support: - -* We assume that the different zones are located close to each other in the -network, so we don't perform any zone-aware routing. In particular, traffic -that goes via services might cross zones (even if some pods backing that service -exist in the same zone as the client), and this may incur additional latency and cost. - -* Volume zone-affinity will only work with a `PersistentVolume`, and will not -work if you directly specify an EBS volume in the pod spec (for example). - -* Clusters cannot span clouds or regions (this functionality will require full -federation support). - -* Although your nodes are in multiple zones, kube-up currently builds -a single master node by default. While services are highly -available and can tolerate the loss of a zone, the control plane is -located in a single zone. Users that want a highly available control -plane should follow the [high availability](/docs/setup/production-environment/tools/kubeadm/high-availability/) instructions. - -### Volume limitations -The following limitations are addressed with [topology-aware volume binding](/docs/concepts/storage/storage-classes/#volume-binding-mode). - -* StatefulSet volume zone spreading when using dynamic provisioning is currently not compatible with - pod affinity or anti-affinity policies. - -* If the name of the StatefulSet contains dashes ("-"), volume zone spreading - may not provide a uniform distribution of storage across zones. - -* When specifying multiple PVCs in a Deployment or Pod spec, the StorageClass - needs to be configured for a specific single zone, or the PVs need to be - statically provisioned in a specific zone. Another workaround is to use a - StatefulSet, which will ensure that all the volumes for a replica are - provisioned in the same zone. - -## Walkthrough - -We're now going to walk through setting up and using a multi-zone -cluster on both GCE & AWS. To do so, you bring up a full cluster -(specifying `MULTIZONE=true`), and then you add nodes in additional zones -by running `kube-up` again (specifying `KUBE_USE_EXISTING_MASTER=true`). - -### Bringing up your cluster - -Create the cluster as normal, but pass MULTIZONE to tell the cluster to manage multiple zones; creating nodes in us-central1-a. - -GCE: - -```shell -curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash -``` - -AWS: - -```shell -curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash -``` - -This step brings up a cluster as normal, still running in a single zone -(but `MULTIZONE=true` has enabled multi-zone capabilities). - -### Nodes are labeled - -View the nodes; you can see that they are labeled with zone information. -They are all in `us-central1-a` (GCE) or `us-west-2a` (AWS) so far. The -labels are `failure-domain.beta.kubernetes.io/region` for the region, -and `failure-domain.beta.kubernetes.io/zone` for the zone: - -```shell -kubectl get nodes --show-labels -``` - -The output is similar to this: - -```shell -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-master Ready,SchedulingDisabled 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master -kubernetes-minion-87j9 Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 -kubernetes-minion-9vlv Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-a12q Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q -``` - -### Add more nodes in a second zone - -Let's add another set of nodes to the existing cluster, reusing the -existing master, running in a different zone (us-central1-b or us-west-2b). -We run kube-up again, but by specifying `KUBE_USE_EXISTING_MASTER=true` -kube-up will not create a new master, but will reuse one that was previously -created instead. - -GCE: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh -``` - -On AWS we also need to specify the network CIDR for the additional -subnet, along with the master internal IP address: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh -``` - - -View the nodes again; 3 more nodes should have launched and be tagged -in us-central1-b: +## Background -```shell -kubectl get nodes --show-labels -``` +Kubernetes is designed so that a single Kubernetes cluster can run +across multiple failure zones, typically where these zones fit within +a logical grouping called a _region_. Major cloud providers define a region +as a set of failure zones (also called _availability zones_) that provide +a consistent set of features: within a region, each zone offers the same +APIs and services. -The output is similar to this: +Typical cloud architectures aim to minimize the chance that a failure in +one zone also impairs services in another zone. -```shell -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-master Ready,SchedulingDisabled 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master -kubernetes-minion-281d Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d -kubernetes-minion-87j9 Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 -kubernetes-minion-9vlv Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-a12q Ready 17m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q -kubernetes-minion-pp2f Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f -kubernetes-minion-wf8i Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i -``` +## Control plane behavior -### Volume affinity +All [control plane components](/docs/concepts/overview/components/#control-plane-components) +support running as a pool of interchangable resources, replicated per +component. -Create a volume using the dynamic volume creation (only PersistentVolumes are supported for zone affinity): - -```bash -kubectl apply -f - <}} -For version 1.3+ Kubernetes will distribute dynamic PV claims across -the configured zones. For version 1.2, dynamic persistent volumes were -always created in the zone of the cluster master -(here us-central1-a / us-west-2a); that issue -([#23330](https://github.com/kubernetes/kubernetes/issues/23330)) -was addressed in 1.3+. +Kubernetes does not provide cross-zone resilience for the API server +endpoints. You can use various techniques to improve availability for +the cluster API server, including DNS round-robin, SRV records, or +a third-party load balancing solution with health checking. {{< /note >}} -Now let's validate that Kubernetes automatically labeled the zone & region the PV was created in. - -```shell -kubectl get pv --show-labels -``` - -The output is similar to this: - -```shell -NAME CAPACITY ACCESSMODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE LABELS -pv-gce-mj4gm 5Gi RWO Retain Bound default/claim1 manual 46s failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a -``` - -So now we will create a pod that uses the persistent volume claim. -Because GCE PDs / AWS EBS volumes cannot be attached across zones, -this means that this pod can only be created in the same zone as the volume: - -```yaml -kubectl apply -f - <}} +or {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}) +across different nodes in a cluster. This spreading helps +reduce the impact of failures. -The pods should be spread across all 3 zones: +When nodes start up, the kubelet on each node automatically adds +{{< glossary_tooltip text="labels" term_id="label" >}} to the Node object +that represents that specific kubelet in the Kubernetes API. +These labels can include +[zone information](/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesiozone). -```shell -kubectl describe pod -l app=guestbook | grep Node -``` +If your cluster spans multiple zones or regions, you can use node labels +in conjunction with +[Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) +to control how Pods are spread across your cluster among fault domains: +regions, zones, and even specific nodes. +These hints enable the +{{< glossary_tooltip text="scheduler" term_id="kube-scheduler" >}} to place +Pods for better expected availability, reducing the risk that a correlated +failure affects your whole workload. -```shell -Node: kubernetes-minion-9vlv/10.240.0.5 -Node: kubernetes-minion-281d/10.240.0.8 -Node: kubernetes-minion-olsh/10.240.0.11 -``` +For example, you can set a constraint to make sure that the +3 replicas of a StatefulSet are all running in different zones to each +other, whenever that is feasible. You can define this declaratively +without explicitly defining which availability zones are in use for +each workload. -```shell -kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d kubernetes-minion-olsh --show-labels -``` +### Distributing nodes across zones -```shell -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-minion-9vlv Ready 34m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-281d Ready 20m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d -kubernetes-minion-olsh Ready 3m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh -``` +Kubernetes' core does not create nodes for you; you need to do that yourself, +or use a tool such as the [Cluster API](https://cluster-api.sigs.k8s.io/) to +manage nodes on your behalf. +Using tools such as the Cluster API you can define sets of machines to run as +worker nodes for your cluster across multiple failure domains, and rules to +automatically heal the cluster in case of whole-zone service disruption. -Load-balancers span all zones in a cluster; the guestbook-go example -includes an example load-balanced service: +## Manual zone assignment for Pods -```shell -kubectl describe service guestbook | grep LoadBalancer.Ingress -``` - -The output is similar to this: - -```shell -LoadBalancer Ingress: 130.211.126.21 -``` - -Set the above IP: - -```shell -export IP=130.211.126.21 -``` - -Explore with curl via IP: - -```shell -curl -s http://${IP}:3000/env | grep HOSTNAME -``` - -The output is similar to this: - -```shell - "HOSTNAME": "guestbook-44sep", -``` - -Again, explore multiple times: - -```shell -(for i in `seq 20`; do curl -s http://${IP}:3000/env | grep HOSTNAME; done) | sort | uniq -``` - -The output is similar to this: - -```shell - "HOSTNAME": "guestbook-44sep", - "HOSTNAME": "guestbook-hum5n", - "HOSTNAME": "guestbook-ppm40", -``` - -The load balancer correctly targets all the pods, even though they are in multiple zones. - -### Shutting down the cluster - -When you're done, clean up: - -GCE: - -```shell -KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh -``` - -AWS: - -```shell -KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh -``` +You can apply [node selector constraints](/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) +to Pods that you create, as well as to Pod templates in workload resources +such as Deployment, StatefulSet, or Job. +## Storage access for zones +When persistent volumes are created, the `PersistentVolumeLabel` +[admission controller](/docs/reference/access-authn-authz/admission-controllers/) +automatically adds zone labels to any PersistentVolumes that are linked to a specific +zone. The {{< glossary_tooltip text="scheduler" term_id="kube-scheduler" >}} then ensures, +through its `NoVolumeZoneConflict` predicate, that pods which claim a given PersistentVolume +are only placed into the same zone as that volume. + +You can specify a {{< glossary_tooltip text="StorageClass" term_id="storage-class" >}} +for PersistentVolumeClaims that specifies the failure domains (zones) that the +storage in that class may use. +To learn about configuring a StorageClass that is aware of failure domains or zones, +see [Allowed topologies](/docs/concepts/storage/storage-classes/#allowed-topologies). + +## Networking + +By itself, Kubernetes does not include zone-aware networking. You can use a +[network plugin](docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) +to configure cluster networking, and that network solution might have zone-specific +elements. For example, if your cloud provider supports Services with +`type=LoadBalancer`, the load balancer might only send traffic to Pods running in the +same zone as the load balancer element processing a given connection. +Check your cloud provider's documentation for details. + +For custom or on-premises deployments, similar considerations apply. +{{< glossary_tooltip text="Service" term_id="service" >}} and +{{< glossary_tooltip text="Ingress" term_id="ingress" >}} behavior, including handling +of different failure zones, does vary depending on exactly how your cluster is set up. + +## Fault recovery + +When you set up your cluster, you might also need to consider whether and how +your setup can restore service if all of the failure zones in a region go +off-line at the same time. For example, do you rely on there being at least +one node able to run Pods in a zone? +Make sure that any cluster-critical repair work does not rely +on there being at least one healthy node in your cluster. For example: if all nodes +are unhealthy, you might need to run a repair Job with a special +{{< glossary_tooltip text="toleration" term_id="toleration" >}} so that the repair +can complete enough to bring at least one node into service. + +Kubernetes doesn't come with an answer for this challenge; however, it's +something to consider. + +## {{% heading "whatsnext" %}} + +To learn how the scheduler places Pods in a cluster, honoring the configured constraints, +visit [Scheduling and Eviction](/docs/concepts/scheduling-eviction/). diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index f82e4637c2dcd..daf4aaec1a670 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -139,7 +139,7 @@ is not supported by kubeadm. For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm/). -For a complete list of configuration options, see the [configuration file documentation](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). +To configure `kubeadm init` with a configuration file see [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). To customize control plane components, including optional IPv6 assignment to liveness probe for control plane components and etcd server, provide extra arguments to each component as documented in [custom arguments](/docs/setup/production-environment/tools/kubeadm/control-plane-flags/). diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index df4b49dc19f71..5b3fd114b0a24 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -132,31 +132,12 @@ The following file is an Ingress resource that sends traffic to your Service via 1. Create `example-ingress.yaml` from the following file: - ```yaml - apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - name: example-ingress - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$1 - spec: - rules: - - host: hello-world.info - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: web - port: - number: 8080 - ``` + {{< codenew file="service/networking/example-ingress.yaml" >}} 1. Create the Ingress resource by running the following command: ```shell - kubectl apply -f example-ingress.yaml + kubectl apply -f https://k8s.io/examples/service/networking/example-ingress.yaml ``` Output: diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index a6c2e217a50e2..3eceb4f6d2fbc 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -152,7 +152,7 @@ for database debugging. or ```shell - kubectl port-forward service/redis-master 7000:6379 + kubectl port-forward service/redis-master 7000:redis ``` Any of the above commands works. The output is similar to this: diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index e762258c88c1b..8680abad43e6e 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -222,7 +222,7 @@ data: fallthrough in-addr.arpa ip6.arpa } prometheus :9153 - proxy . /etc/resolv.conf + forward . /etc/resolv.conf cache 30 loop reload diff --git a/content/en/docs/tasks/administer-cluster/out-of-resource.md b/content/en/docs/tasks/administer-cluster/out-of-resource.md index 973eda947eacd..908d7155d541d 100644 --- a/content/en/docs/tasks/administer-cluster/out-of-resource.md +++ b/content/en/docs/tasks/administer-cluster/out-of-resource.md @@ -341,4 +341,11 @@ to prevent system OOMs, and promote eviction of workloads so cluster state can r The Pod eviction may evict more Pods than needed due to stats collection timing gap. This can be mitigated by adding the ability to get root container stats on an on-demand basis [(https://github.com/google/cadvisor/issues/1247)](https://github.com/google/cadvisor/issues/1247) in the future. +### active_file memory is not considered as available memory + +On Linux, the kernel tracks the number of bytes of file-backed memory on active LRU list as the `active_file` statistic. The kubelet treats `active_file` memory areas as not reclaimable. For workloads that make intensive use of block-backed local storage, including ephemeral local storage, kernel-level caches of file and block data means that many recently accessed cache pages are likely to be counted as `active_file`. If enough of these kernel block buffers are on the active LRU list, the kubelet is liable to observe this as high resource use and taint the node as experiencing memory pressure - triggering Pod eviction. + +For more more details, see [https://github.com/kubernetes/kubernetes/issues/43916](https://github.com/kubernetes/kubernetes/issues/43916) + +You can work around that behavior by setting the memory limit and memory request the same for containers likely to perform intensive I/O activity. You will need to estimate or measure an optimal memory limit value for that container. diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index 4f00675c3732f..b6249f50efa1a 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -39,22 +39,7 @@ the kubelet command line option `--reserved-cpus` to set an ## Node Allocatable -```text - Node Capacity ---------------------------- -| kube-reserved | -|-------------------------| -| system-reserved | -|-------------------------| -| eviction-threshold | -|-------------------------| -| | -| allocatable | -| (available for pods) | -| | -| | ---------------------------- -``` +![node capacity](/images/docs/node-capacity.svg) `Allocatable` on a Kubernetes node is defined as the amount of compute resources that are available for pods. The scheduler does not over-subscribe diff --git a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md index aa01c902e4e66..b1a7e565480c4 100644 --- a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md @@ -11,7 +11,7 @@ content_type: concept {{< feature-state state="beta" for_k8s_version="v1.11" >}} -Since cloud providers develop and release at a different pace compared to the Kubernetes project, abstracting the provider-specific code to the {{< glossary_tooltip text="`cloud-controller-manager`" term_id="cloud-controller-manager" >}} binary allows cloud vendors to evolve independently from the core Kubernetes code. +Since cloud providers develop and release at a different pace compared to the Kubernetes project, abstracting the provider-specific code to the `{{< glossary_tooltip text="cloud-controller-manager" term_id="cloud-controller-manager" >}}` binary allows cloud vendors to evolve independently from the core Kubernetes code. The `cloud-controller-manager` can be linked to any cloud provider that satisfies [cloudprovider.Interface](https://github.com/kubernetes/cloud-provider/blob/master/cloud.go). For backwards compatibility, the [cloud-controller-manager](https://github.com/kubernetes/kubernetes/tree/master/cmd/cloud-controller-manager) provided in the core Kubernetes project uses the same cloud libraries as `kube-controller-manager`. Cloud providers already supported in Kubernetes core are expected to use the in-tree cloud-controller-manager to transition out of Kubernetes core. diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index ed1b9657c81a1..3f37916660365 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -6,23 +6,21 @@ reviewers: - kow3ns title: Safely Drain a Node while Respecting the PodDisruptionBudget content_type: task +min-kubernetes-server-version: 1.5 --- -This page shows how to safely drain a node, respecting the PodDisruptionBudget you have defined. - +This page shows how to safely drain a {{< glossary_tooltip text="node" term_id="node" >}}, +respecting the PodDisruptionBudget you have defined. ## {{% heading "prerequisites" %}} - -This task assumes that you have met the following prerequisites: - -* You are using Kubernetes release >= 1.5. -* Either: +{{% version-check %}} +This task also assumes that you have met the following prerequisites: 1. You do not require your applications to be highly available during the node drain, or - 1. You have read about the [PodDisruptionBudget concept](/docs/concepts/workloads/pods/disruptions/) - and [Configured PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/) for + 1. You have read about the [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/) concept, + and have [configured PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/) for applications that need them. @@ -35,10 +33,10 @@ You can use `kubectl drain` to safely evict all of your pods from a node before you perform maintenance on the node (e.g. kernel upgrade, hardware maintenance, etc.). Safe evictions allow the pod's containers to [gracefully terminate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) -and will respect the `PodDisruptionBudgets` you have specified. +and will respect the PodDisruptionBudgets you have specified. {{< note >}} -By default `kubectl drain` will ignore certain system pods on the node +By default `kubectl drain` ignores certain system pods on the node that cannot be killed; see the [kubectl drain](/docs/reference/generated/kubectl/kubectl-commands/#drain) documentation for more details. @@ -78,29 +76,29 @@ The `kubectl drain` command should only be issued to a single node at a time. However, you can run multiple `kubectl drain` commands for different nodes in parallel, in different terminals or in the background. Multiple drain commands running concurrently will still -respect the `PodDisruptionBudget` you specify. +respect the PodDisruptionBudget you specify. For example, if you have a StatefulSet with three replicas and have -set a `PodDisruptionBudget` for that set specifying `minAvailable: -2`. `kubectl drain` will only evict a pod from the StatefulSet if all -three pods are ready, and if you issue multiple drain commands in -parallel, Kubernetes will respect the PodDisruptionBudget and ensure -that only one pod is unavailable at any given time. Any drains that -would cause the number of ready replicas to fall below the specified -budget are blocked. +set a PodDisruptionBudget for that set specifying `minAvailable: 2`, +`kubectl drain` only evicts a pod from the StatefulSet if all three +replicas pods are ready; if then you issue multiple drain commands in +parallel, Kubernetes respects the PodDisruptionBudget and ensure +that only 1 (calculated as `replicas - minAvailable`) Pod is unavailable +at any given time. Any drains that would cause the number of ready +replicas to fall below the specified budget are blocked. -## The Eviction API +## The Eviction API {#eviction-api} If you prefer not to use [kubectl drain](/docs/reference/generated/kubectl/kubectl-commands/#drain) (such as to avoid calling to an external command, or to get finer control over the pod eviction process), you can also programmatically cause evictions using the eviction API. -You should first be familiar with using [Kubernetes language clients](/docs/tasks/administer-cluster/access-cluster-api/#programmatic-access-to-the-api). +You should first be familiar with using [Kubernetes language clients](/docs/tasks/administer-cluster/access-cluster-api/#programmatic-access-to-the-api) to access the API. The eviction subresource of a -pod can be thought of as a kind of policy-controlled DELETE operation on the pod -itself. To attempt an eviction (perhaps more REST-precisely, to attempt to -*create* an eviction), you POST an attempted operation. Here's an example: +Pod can be thought of as a kind of policy-controlled DELETE operation on the Pod +itself. To attempt an eviction (more precisely: to attempt to +*create* an Eviction), you POST an attempted operation. Here's an example: ```json { @@ -116,21 +114,19 @@ itself. To attempt an eviction (perhaps more REST-precisely, to attempt to You can attempt an eviction using `curl`: ```bash -curl -v -H 'Content-type: application/json' http://127.0.0.1:8080/api/v1/namespaces/default/pods/quux/eviction -d @eviction.json +curl -v -H 'Content-type: application/json' https://your-cluster-api-endpoint.example/api/v1/namespaces/default/pods/quux/eviction -d @eviction.json ``` The API can respond in one of three ways: -- If the eviction is granted, then the pod is deleted just as if you had sent - a `DELETE` request to the pod's URL and you get back `200 OK`. +- If the eviction is granted, then the Pod is deleted just as if you had sent + a `DELETE` request to the Pod's URL and you get back `200 OK`. - If the current state of affairs wouldn't allow an eviction by the rules set forth in the budget, you get back `429 Too Many Requests`. This is typically used for generic rate limiting of *any* requests, but here we mean that this request isn't allowed *right now* but it may be allowed later. - Currently, callers do not get any `Retry-After` advice, but they may in - future versions. -- If there is some kind of misconfiguration, like multiple budgets pointing at - the same pod, you will get `500 Internal Server Error`. +- If there is some kind of misconfiguration; for example multiple PodDisruptionBudgets + that refer the same Pod, you get a `500 Internal Server Error` response. For a given eviction request, there are two cases: @@ -139,21 +135,25 @@ For a given eviction request, there are two cases: - There is at least one budget. In this case, any of the three above responses may apply. -In some cases, an application may reach a broken state where it will never return anything -other than 429 or 500. This can happen, for example, if the replacement pod created by the -application's controller does not become ready, or if the last pod evicted has a very long -termination grace period. +## Stuck evictions + +In some cases, an application may reach a broken state, one where unless you intervene the +eviction API will never return anything other than 429 or 500. + +For example: this can happen if ReplicaSet is creating Pods for your application but +the replacement Pods do not become `Ready`. You can also see similar symptoms if the +last Pod evicted has a very long termination grace period. In this case, there are two potential solutions: -- Abort or pause the automated operation. Investigate the reason for the stuck application, and restart the automation. -- After a suitably long wait, `DELETE` the pod instead of using the eviction API. +- Abort or pause the automated operation. Investigate the reason for the stuck application, + and restart the automation. +- After a suitably long wait, `DELETE` the Pod from your cluster's control plane, instead + of using the eviction API. Kubernetes does not specify what the behavior should be in this case; it is up to the application owners and cluster owners to establish an agreement on behavior in these cases. - - ## {{% heading "whatsnext" %}} @@ -162,4 +162,3 @@ application owners and cluster owners to establish an agreement on behavior in t - diff --git a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md index 3afda46609fc9..76c555aace1f9 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -222,6 +222,13 @@ Container is automatically assigned the default limit. Cluster administrators ca [LimitRange](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#limitrange-v1-core/) to specify a default value for the CPU limit. +## If you specify a CPU limit but do not specify a CPU request + +If you specify a CPU limit for a Container but do not specify a CPU request, Kubernetes automatically +assigns a CPU request that matches the limit. Similarly, if a Container specifies its own memory limit, +but does not specify a memory request, Kubernetes automatically assigns a memory request that matches +the limit. + ## Motivation for CPU requests and limits By configuring the CPU requests and limits of the Containers that run in your diff --git a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index 00c5eb89447f3..f5f2e78ba2db3 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -9,29 +9,29 @@ content_type: concept Resource usage metrics, such as container CPU and memory usage, -are available in Kubernetes through the Metrics API. These metrics can be either accessed directly -by user, for example by using `kubectl top` command, or used by a controller in the cluster, e.g. +are available in Kubernetes through the Metrics API. These metrics can be accessed either directly +by the user with the `kubectl top` command, or by a controller in the cluster, for example Horizontal Pod Autoscaler, to make decisions. ## The Metrics API -Through the Metrics API you can get the amount of resource currently used +Through the Metrics API, you can get the amount of resource currently used by a given node or a given pod. This API doesn't store the metric values, -so it's not possible for example to get the amount of resources used by a +so it's not possible, for example, to get the amount of resources used by a given node 10 minutes ago. The API is no different from any other API: -- it is discoverable through the same endpoint as the other Kubernetes APIs under `/apis/metrics.k8s.io/` path -- it offers the same security, scalability and reliability guarantees +- it is discoverable through the same endpoint as the other Kubernetes APIs under the path: `/apis/metrics.k8s.io/` +- it offers the same security, scalability, and reliability guarantees The API is defined in [k8s.io/metrics](https://github.com/kubernetes/metrics/blob/master/pkg/apis/metrics/v1beta1/types.go) repository. You can find more information about the API there. {{< note >}} -The API requires metrics server to be deployed in the cluster. Otherwise it will be not available. +The API requires the metrics server to be deployed in the cluster. Otherwise it will be not available. {{< /note >}} ## Measuring Resource Usage @@ -49,22 +49,19 @@ The kubelet chooses the window for the rate calculation. Memory is reported as the working set, in bytes, at the instant the metric was collected. In an ideal world, the "working set" is the amount of memory in-use that cannot be freed under memory pressure. However, calculation of the working set varies by host OS, and generally makes heavy use of heuristics to produce an estimate. -It includes all anonymous (non-file-backed) memory since kubernetes does not support swap. +It includes all anonymous (non-file-backed) memory since Kubernetes does not support swap. The metric typically also includes some cached (file-backed) memory, because the host OS cannot always reclaim such pages. ## Metrics Server [Metrics Server](https://github.com/kubernetes-incubator/metrics-server) is a cluster-wide aggregator of resource usage data. -It is deployed by default in clusters created by `kube-up.sh` script -as a Deployment object. If you use a different Kubernetes setup mechanism you can deploy it using the provided +By default, it is deployed in clusters created by `kube-up.sh` script +as a Deployment object. If you use a different Kubernetes setup mechanism, you can deploy it using the provided [deployment components.yaml](https://github.com/kubernetes-sigs/metrics-server/releases) file. -Metric server collects metrics from the Summary API, exposed by -[Kubelet](/docs/reference/command-line-tools-reference/kubelet/) on each node. - -Metrics Server is registered with the main API server through +Metrics Server collects metrics from the Summary API, exposed by +[Kubelet](/docs/reference/command-line-tools-reference/kubelet/) on each node, and is registered with the main API server via [Kubernetes aggregator](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). Learn more about the metrics server in [the design doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md). - diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 9c8e1f89515b6..a89fb778a952b 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -114,11 +114,7 @@ Now, we will see how the autoscaler reacts to increased load. We will start a container, and send an infinite loop of queries to the php-apache service (please run it in a different terminal): ```shell -kubectl run -it --rm load-generator --image=busybox /bin/sh - -Hit enter for command prompt - -while true; do wget -q -O- http://php-apache; done +kubectl run -i --tty load-generator --rm --image=busybox --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://php-apache; done" ``` Within a minute or so, we should see the higher CPU load by executing: diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index c79059d335da1..1da20c5219411 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -319,7 +319,7 @@ For instance if there are 80 replicas and the target has to be scaled down to 10 then during the first step 8 replicas will be reduced. In the next iteration when the number of replicas is 72, 10% of the pods is 7.2 but the number is rounded up to 8. On each loop of the autoscaler controller the number of pods to be change is re-calculated based on the number -of current replicas. When the number of replicas falls below 40 the first policy_(Pods)_ is applied +of current replicas. When the number of replicas falls below 40 the first policy _(Pods)_ is applied and 4 replicas will be reduced at a time. `periodSeconds` indicates the length of time in the past for which the policy must hold true. @@ -328,7 +328,7 @@ allows at most 10% of the current replicas to be scaled down in one minute. The policy selection can be changed by specifying the `selectPolicy` field for a scaling direction. By setting the value to `Min` which would select the policy which allows the -smallest change in the replica count. Setting the value to `Disabled` completely disabled +smallest change in the replica count. Setting the value to `Disabled` completely disables scaling in that direction. ### Stabilization Window @@ -405,8 +405,9 @@ behavior: periodSeconds: 60 ``` -To allow a final drop of 5 pods, another policy can be added with a selection -strategy of maximum: +To ensure that no more than 5 Pods are removed per minute, you can add a second scale-down +policy with a fixed size of 5, and set `selectPolicy` to minimum. Setting `selectPolicy` to `Min` means +that the autoscaler chooses the policy that affects the smallest number of Pods: ```yaml behavior: @@ -418,7 +419,7 @@ behavior: - type: Pods value: 5 periodSeconds: 60 - selectPolicy: Max + selectPolicy: Min ``` ### Example: disable scale down @@ -441,4 +442,3 @@ behavior: * kubectl autoscale command: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). * Usage example of [Horizontal Pod Autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). - diff --git a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md index e35fddcd467aa..62e5cfc9cfc88 100644 --- a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md +++ b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -76,11 +76,16 @@ cat <View kubectl Install and Set Up Guide -## Minikube +You can also read the +[`kubectl` reference documentation](/docs/reference/kubectl/). -[Minikube](https://minikube.sigs.k8s.io/) is a tool that lets you run -Kubernetes locally. Minikube runs a single-node Kubernetes cluster on your personal -computer (including Windows, macOS and Linux PCs) so that you can try out Kubernetes, -or for daily development work. +## minikube -You can follow the official [Get Started!](https://minikube.sigs.k8s.io/docs/start/) -guide, or read [Install Minikube](/docs/tasks/tools/install-minikube/) if your focus -is on getting the tool installed. +[`minikube`](https://minikube.sigs.k8s.io/) is a tool that lets you run Kubernetes +locally. `minikube` runs a single-node Kubernetes cluster on your personal +computer (including Windows, macOS and Linux PCs) so that you can try out +Kubernetes, or for daily development work. -Once you have Minikube working, you can use it to +You can follow the official +[Get Started!](https://minikube.sigs.k8s.io/docs/start/) guide if your focus is +on getting the tool installed. + +View minikube Get Started! Guide + +Once you have `minikube` working, you can use it to [run a sample application](/docs/tutorials/hello-minikube/). ## kind -Like Minikube, [kind](https://kind.sigs.k8s.io/docs/) lets you run Kubernetes on -your local computer. Unlike Minikube, kind only works with a single container runtime: -it requires that you have [Docker](https://docs.docker.com/get-docker/) installed -and configured. +Like `minikube`, [`kind`](https://kind.sigs.k8s.io/docs/) lets you run Kubernetes on +your local computer. Unlike `minikube`, `kind` only works with a single container +runtime: it requires that you have [Docker](https://docs.docker.com/get-docker/) +installed and configured. + +[Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) shows you what +you need to do to get up and running with `kind`. -[Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) shows you what you -need to do to get up and running with kind. +View kind Quick Start Guide diff --git a/content/en/docs/tasks/tools/install-minikube.md b/content/en/docs/tasks/tools/install-minikube.md deleted file mode 100644 index d8b5b101c4971..0000000000000 --- a/content/en/docs/tasks/tools/install-minikube.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -title: Install Minikube -content_type: task -weight: 20 -card: - name: tasks - weight: 10 ---- - - - -This page shows you how to install [Minikube](/docs/tutorials/hello-minikube), a tool that runs a single-node Kubernetes cluster in a virtual machine on your personal computer. - - - -## {{% heading "prerequisites" %}} - - -{{< tabs name="minikube_before_you_begin" >}} -{{% tab name="Linux" %}} -To check if virtualization is supported on Linux, run the following command and verify that the output is non-empty: -``` -grep -E --color 'vmx|svm' /proc/cpuinfo -``` -{{% /tab %}} - -{{% tab name="macOS" %}} -To check if virtualization is supported on macOS, run the following command on your terminal. -``` -sysctl -a | grep -E --color 'machdep.cpu.features|VMX' -``` -If you see `VMX` in the output (should be colored), the VT-x feature is enabled in your machine. -{{% /tab %}} - -{{% tab name="Windows" %}} -To check if virtualization is supported on Windows 8 and above, run the following command on your Windows terminal or command prompt. -``` -systeminfo -``` -If you see the following output, virtualization is supported on Windows. -``` -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - -If you see the following output, your system already has a Hypervisor installed and you can skip the next step. -``` -Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed. -``` - - -{{% /tab %}} -{{< /tabs >}} - - - - - -## Installing minikube - -{{< tabs name="tab_with_md" >}} -{{% tab name="Linux" %}} - -### Install kubectl - -Make sure you have kubectl installed. You can install kubectl according to the instructions in [Install and Set Up kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux). - -### Install a Hypervisor - -If you do not already have a hypervisor installed, install one of these now: - -• [KVM](https://www.linux-kvm.org/), which also uses QEMU - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -Minikube also supports a `--driver=none` option that runs the Kubernetes components on the host and not in a VM. -Using this driver requires [Docker](https://www.docker.com/products/docker-desktop) and a Linux environment but not a hypervisor. - -If you're using the `none` driver in Debian or a derivative, use the `.deb` packages for -Docker rather than the snap package, which does not work with Minikube. -You can download `.deb` packages from [Docker](https://www.docker.com/products/docker-desktop). - -{{< caution >}} -The `none` VM driver can result in security and data loss issues. -Before using `--driver=none`, consult [this documentation](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) for more information. -{{< /caution >}} - -Minikube also supports a `vm-driver=podman` similar to the Docker driver. Podman run as superuser privilege (root user) is the best way to ensure that your containers have full access to any feature available on your system. - -{{< caution >}} -The `podman` driver requires running the containers as root because regular user accounts don't have full access to all operating system features that their containers might need to run. -{{< /caution >}} - -### Install Minikube using a package - -There are *experimental* packages for Minikube available; you can find Linux (AMD64) packages -from Minikube's [releases](https://github.com/kubernetes/minikube/releases) page on GitHub. - -Use your Linux's distribution's package tool to install a suitable package. - -### Install Minikube via direct download - -If you're not installing via a package, you can download a stand-alone -binary and use that. - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && chmod +x minikube -``` - -Here's an easy way to add the Minikube executable to your path: - -```shell -sudo mkdir -p /usr/local/bin/ -sudo install minikube /usr/local/bin/ -``` - -### Install Minikube using Homebrew - -As yet another alternative, you can install Minikube using Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux): - -```shell -brew install minikube -``` - -{{% /tab %}} -{{% tab name="macOS" %}} -### Install kubectl - -Make sure you have kubectl installed. You can install kubectl according to the instructions in [Install and Set Up kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-macos). - -### Install a Hypervisor - -If you do not already have a hypervisor installed, install one of these now: - -• [HyperKit](https://github.com/moby/hyperkit) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -• [VMware Fusion](https://www.vmware.com/products/fusion) - -### Install Minikube -The easiest way to install Minikube on macOS is using [Homebrew](https://brew.sh): - -```shell -brew install minikube -``` - -You can also install it on macOS by downloading a stand-alone binary: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && chmod +x minikube -``` - -Here's an easy way to add the Minikube executable to your path: - -```shell -sudo mv minikube /usr/local/bin -``` - -{{% /tab %}} -{{% tab name="Windows" %}} -### Install kubectl - -Make sure you have kubectl installed. You can install kubectl according to the instructions in [Install and Set Up kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows). - -### Install a Hypervisor - -If you do not already have a hypervisor installed, install one of these now: - -• [Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) - -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - -{{< note >}} -Hyper-V can run on three versions of Windows 10: Windows 10 Enterprise, Windows 10 Professional, and Windows 10 Education. -{{< /note >}} - -### Install Minikube using Chocolatey - -The easiest way to install Minikube on Windows is using [Chocolatey](https://chocolatey.org/) (run as an administrator): - -```shell -choco install minikube -``` - -After Minikube has finished installing, close the current CLI session and restart. Minikube should have been added to your path automatically. - -### Install Minikube using an installer executable - -To install Minikube manually on Windows using [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal), download [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest/download/minikube-installer.exe) and execute the installer. - -### Install Minikube via direct download - -To install Minikube manually on Windows, download [`minikube-windows-amd64`](https://github.com/kubernetes/minikube/releases/latest), rename it to `minikube.exe`, and add it to your path. - -{{% /tab %}} -{{< /tabs >}} - -## Confirm Installation - -To confirm successful installation of both a hypervisor and Minikube, you can run the following command to start up a local Kubernetes cluster: - -{{< note >}} - -For setting the `--driver` with `minikube start`, enter the name of the hypervisor you installed in lowercase letters where `` is mentioned below. A full list of `--driver` values is available in [specifying the VM driver documentation](/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). - -{{< /note >}} - -{{< caution >}} -When using KVM, note that libvirt's default QEMU URI under Debian and some other systems is `qemu:///session` whereas Minikube's default QEMU URI is `qemu:///system`. If this is the case for your system, you will need to pass `--kvm-qemu-uri qemu:///session` to `minikube start`. -{{< /caution >}} - -```shell -minikube start --driver= -``` - -Once `minikube start` finishes, run the command below to check the status of the cluster: - -```shell -minikube status -``` - -If your cluster is running, the output from `minikube status` should be similar to: - -``` -host: Running -kubelet: Running -apiserver: Running -kubeconfig: Configured -``` - -After you have confirmed whether Minikube is working with your chosen hypervisor, you can continue to use Minikube or you can stop your cluster. To stop your cluster, run: - -```shell -minikube stop -``` - -## Clean up local state {#cleanup-local-state} - -If you have previously installed Minikube, and run: -```shell -minikube start -``` - -and `minikube start` returned an error: -``` -machine does not exist -``` - -then you need to clear minikube's local state: -```shell -minikube delete -``` - -## {{% heading "whatsnext" %}} - - -* [Running Kubernetes Locally via Minikube](/docs/setup/learning-environment/minikube/) diff --git a/content/en/docs/tutorials/configuration/configure-java-microservice/_index.md b/content/en/docs/tutorials/configuration/configure-java-microservice/_index.md new file mode 100755 index 0000000000000..8a5bc5d60471a --- /dev/null +++ b/content/en/docs/tutorials/configuration/configure-java-microservice/_index.md @@ -0,0 +1,5 @@ +--- +title: "Example: Configuring a Java Microservice" +weight: 10 +--- + diff --git a/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html b/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html new file mode 100644 index 0000000000000..bb926a1d197d5 --- /dev/null +++ b/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive.html @@ -0,0 +1,30 @@ +--- +title: "Interactive Tutorial - Configuring a Java Microservice" +weight: 20 +--- + + + + + + + + + + + +
+ +
+
+
+ To interact with the Terminal, please use the desktop/tablet version +
+
+
+
+ +
+ + + diff --git a/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice.md b/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice.md new file mode 100644 index 0000000000000..712bc64d55eae --- /dev/null +++ b/content/en/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice.md @@ -0,0 +1,39 @@ +--- +title: "Externalizing config using MicroProfile, ConfigMaps and Secrets" +content_type: tutorial +weight: 10 +--- + + + +In this tutorial you will learn how and why to externalize your microservice’s configuration. Specifically, you will learn how to use Kubernetes ConfigMaps and Secrets to set environment variables and then consume them using MicroProfile Config. + + +## {{% heading "prerequisites" %}} + +### Creating Kubernetes ConfigMaps & Secrets +There are several ways to set environment variables for a Docker container in Kubernetes, including: Dockerfile, kubernetes.yml, Kubernetes ConfigMaps, and Kubernetes Secrets. In the tutorial, you will learn how to use the latter two for setting your environment variables whose values will be injected into your microservices. One of the benefits for using ConfigMaps and Secrets is that they can be re-used across multiple containers, including being assigned to different environment variables for the different containers. + +ConfigMaps are API Objects that store non-confidential key-value pairs. In the Interactive Tutorial you will learn how to use a ConfigMap to store the application's name. For more information regarding ConfigMaps, you can find the documentation [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). + +Although Secrets are also used to store key-value pairs, they differ from ConfigMaps in that they're intended for confidential/sensitive information and are stored using Base64 encoding. This makes secrets the appropriate choice for storing such things as credentials, keys, and tokens, the former of which you'll do in the Interactive Tutorial. For more information on Secrets, you can find the documentation [here](https://kubernetes.io/docs/concepts/configuration/secret/). + + +### Externalizing Config from Code +Externalized application configuration is useful because configuration usually changes depending on your environment. In order to accomplish this, we'll use Java's Contexts and Dependency Injection (CDI) and MicroProfile Config. MicroProfile Config is a feature of MicroProfile, a set of open Java technologies for developing and deploying cloud-native microservices. + +CDI provides a standard dependency injection capability enabling an application to be assembled from collaborating, loosely-coupled beans. MicroProfile Config provides apps and microservices a standard way to obtain config properties from various sources, including the application, runtime, and environment. Based on the source's defined priority, the properties are automatically combined into a single set of properties that the application can access via an API. Together, CDI & MicroProfile will be used in the Interactive Tutorial to retrieve the externally provided properties from the Kubernetes ConfigMaps and Secrets and get injected into your application code. + +Many open source frameworks and runtimes implement and support MicroProfile Config. Throughout the interactive tutorial, you'll be using Open Liberty, a flexible open-source Java runtime for building and running cloud-native apps and microservices. However, any MicroProfile compatible runtime could be used instead. + + +## {{% heading "objectives" %}} + +* Create a Kubernetes ConfigMap and Secret +* Inject microservice configuration using MicroProfile Config + + + + +## Example: Externalizing config using MicroProfile, ConfigMaps and Secrets +### [Start Interactive Tutorial](/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive/) \ No newline at end of file diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index 901e0063cfbd9..6cc03c2198678 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -136,6 +136,9 @@ Kubernetes [*Service*](/docs/concepts/services-networking/service/). The `--type=LoadBalancer` flag indicates that you want to expose your Service outside of the cluster. + + The application code inside the image `k8s.gcr.io/echoserver` only listens on TCP port 8080. If you used + `kubectl expose` to expose a different port, clients could not connect to that other port. 2. View the Service you just created: @@ -283,4 +286,3 @@ minikube delete * Learn more about [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/). * Learn more about [Service objects](/docs/concepts/services-networking/service/). - diff --git a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 13d3d99758338..5ac682d7af020 100644 --- a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -72,7 +72,7 @@

Cluster Diagram

The Master is responsible for managing the cluster. The master coordinates all activities in your cluster, such as scheduling applications, maintaining applications' desired state, scaling applications, and rolling out new updates.

-

A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster. Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes master. The node should also have tools for handling container operations, such as Docker or rkt. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.

+

A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster. Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes master. The node should also have tools for handling container operations, such as containerd or Docker. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.

diff --git a/content/en/examples/application/guestbook/redis-master-service.yaml b/content/en/examples/application/guestbook/redis-master-service.yaml index a484014f1fe3b..65cef2191c493 100644 --- a/content/en/examples/application/guestbook/redis-master-service.yaml +++ b/content/en/examples/application/guestbook/redis-master-service.yaml @@ -8,7 +8,8 @@ metadata: tier: backend spec: ports: - - port: 6379 + - name: redis + port: 6379 targetPort: 6379 selector: app: redis diff --git a/content/en/examples/service/networking/example-ingress.yaml b/content/en/examples/service/networking/example-ingress.yaml new file mode 100644 index 0000000000000..b309d13275105 --- /dev/null +++ b/content/en/examples/service/networking/example-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: web + port: + number: 8080 \ No newline at end of file diff --git a/content/fr/docs/concepts/services-networking/ingress.md b/content/fr/docs/concepts/services-networking/ingress.md index f445ae25931f9..81018b3ede49d 100644 --- a/content/fr/docs/concepts/services-networking/ingress.md +++ b/content/fr/docs/concepts/services-networking/ingress.md @@ -72,7 +72,7 @@ Assurez-vous de consulter la documentation de votre contrôleur d’Ingress pour Exemple de ressource Ingress minimale : ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: test-ingress @@ -83,9 +83,12 @@ spec: - http: paths: - path: /testpath + pathType: Prefix backend: - serviceName: test - servicePort: 80 + service: + name: test + port: + number: 80 ``` Comme pour toutes les autres ressources Kubernetes, un Ingress (une entrée) a besoin des champs `apiVersion`,` kind` et `metadata`. @@ -126,14 +129,16 @@ Il existe des concepts Kubernetes qui vous permettent d’exposer un seul servic ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: test-ingress spec: - backend: - serviceName: testsvc - servicePort: 80 + defaultBackend: + service: + name: testsvc + port: + number: 80 ``` Si vous le créez en utilisant `kubectl create -f`, vous devriez voir : @@ -166,7 +171,7 @@ foo.bar.com -> 178.91.123.132 -> / foo service1:4200 ceci nécessitera un Ingress défini comme suit : ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: simple-fanout-example @@ -178,13 +183,19 @@ spec: http: paths: - path: /foo + pathType: Prefix backend: - serviceName: service1 - servicePort: 4200 + service: + name: service1 + port: + number: 4200 - path: /bar + pathType: Prefix backend: - serviceName: service2 - servicePort: 8080 + service: + name: service2 + port: + number: 8080 ``` Lorsque vous créez l'ingress avec `kubectl create -f`: @@ -233,7 +244,7 @@ bar.foo.com --| |-> bar.foo.com s2:80 L’Ingress suivant indique au load-balancer de router les requêtes en fonction de [En-tête du hôte](https://tools.ietf.org/html/rfc7230#section-5.4). ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: name-virtual-host-ingress @@ -242,21 +253,29 @@ spec: - host: foo.bar.com http: paths: - - backend: - serviceName: service1 - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 - host: bar.foo.com http: paths: - - backend: - serviceName: service2 - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: service2 + port: + number: 80 ``` Si vous créez une ressource Ingress sans aucun hôte défini dans les règles, tout trafic Web à destination de l'adresse IP de votre contrôleur d'Ingress peut être mis en correspondance sans qu'un hôte virtuel basé sur le nom ne soit requis. Par exemple, la ressource Ingress suivante acheminera le trafic demandé pour `first.bar.com` au `service1` `second.foo.com` au `service2`, et à tout trafic à l'adresse IP sans nom d'hôte défini dans la demande (c'est-à-dire sans en-tête de requête présenté) au `service3`. ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: name-virtual-host-ingress @@ -265,20 +284,32 @@ spec: - host: first.bar.com http: paths: - - backend: - serviceName: service1 - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 - host: second.foo.com http: paths: - - backend: - serviceName: service2 - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: service2 + port: + number: 80 - http: paths: - - backend: - serviceName: service3 - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: service3 + port: + number: 80 ``` ### TLS @@ -300,7 +331,7 @@ type: kubernetes.io/tls Référencer ce secret dans un Ingress indiquera au contrôleur d'ingress de sécuriser le canal du client au load-balancer à l'aide de TLS. Vous devez vous assurer que le secret TLS que vous avez créé provenait d'un certificat contenant un CN pour `sslexample.foo.com`. ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: tls-example-ingress @@ -314,9 +345,12 @@ spec: http: paths: - path: / + pathType: Prefix backend: - serviceName: service1 - servicePort: 80 + service: + name: service1 + port: + number: 80 ``` {{< note >}} @@ -373,16 +407,22 @@ spec: http: paths: - backend: - serviceName: s1 - servicePort: 80 + service: + name: s1 + port: + number: 80 path: /foo + pathType: Prefix - host: bar.baz.com http: paths: - backend: - serviceName: s2 - servicePort: 80 + service: + name: s2 + port: + number: 80 path: /foo + pathType: Prefix .. ``` diff --git a/content/fr/docs/concepts/storage/persistent-volumes.md b/content/fr/docs/concepts/storage/persistent-volumes.md index e1a1701fcbead..1f149bb6e5582 100644 --- a/content/fr/docs/concepts/storage/persistent-volumes.md +++ b/content/fr/docs/concepts/storage/persistent-volumes.md @@ -411,7 +411,7 @@ Un PV sans `storageClassName` n'a pas de classe et ne peut être lié qu'à des Dans le passé, l'annotation `volume.beta.kubernetes.io/storage-class` a été utilisé à la place de l'attribut `storageClassName`. Cette annotation fonctionne toujours; cependant, il deviendra complètement obsolète dans une future version de Kubernetes. -### Politique de récupration +### Politique de récupération Les politiques de récupération actuelles sont: diff --git a/content/fr/docs/concepts/workloads/controllers/deployment.md b/content/fr/docs/concepts/workloads/controllers/deployment.md index e8034cc9adf69..e12c3ffbab257 100644 --- a/content/fr/docs/concepts/workloads/controllers/deployment.md +++ b/content/fr/docs/concepts/workloads/controllers/deployment.md @@ -116,7 +116,7 @@ Avant de commencer, assurez-vous que votre cluster Kubernetes est opérationnel. ```shell Waiting for rollout to finish: 2 out of 3 new replicas have been updated... - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` 1. Exécutez à nouveau `kubectl get deployments` quelques secondes plus tard. @@ -223,7 +223,7 @@ Suivez les étapes ci-dessous pour mettre à jour votre déploiement: ou ```text - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` Obtenez plus de détails sur votre déploiement mis à jour: @@ -932,7 +932,7 @@ La sortie est similaire à ceci: ```text Waiting for rollout to finish: 2 of 3 updated replicas are available... -deployment.apps/nginx-deployment successfully rolled out +deployment "nginx-deployment" successfully rolled out $ echo $? 0 ``` diff --git a/content/fr/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/fr/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md new file mode 100644 index 0000000000000..dc427699f6a55 --- /dev/null +++ b/content/fr/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -0,0 +1,299 @@ +--- +title: Configurer les Liveness, Readiness et Startup Probes +content_template: templates/task +weight: 110 +--- + +{{% capture overview %}} + +Cette page montre comment configurer les liveness, readiness et startup probes pour les conteneurs. + +Le [Kubelet](/docs/admin/kubelet/) utilise les liveness probes pour détecter quand redémarrer un conteneur. Par exemple, les Liveness probes pourraient attraper un deadlock dans le cas où une application est en cours d'exécution, mais qui est incapable de traiter les requêtes. Le redémarrage d'un conteneur dans un tel état rend l'application plus disponible malgré les bugs. + +Le Kubelet utilise readiness probes pour savoir quand un conteneur est prêt à accepter le trafic. Un Pod est considéré comme prêt lorsque tous ses conteneurs sont prêts. +Ce signal sert notamment à contrôler les pods qui sont utilisés comme backends pour les Services. Lorsqu'un Pod n'est pas prêt, il est retiré des équilibreurs de charge des Services. + +Le Kubelet utilise startup probes pour savoir quand une application d'un conteneur a démarré. +Si une telle probe est configurée, elle désactive les contrôles de liveness et readiness jusqu'à cela réussit, en s'assurant que ces probes n'interfèrent pas avec le démarrage de l'application. +Cela peut être utilisé dans le cas des liveness checks sur les conteneurs à démarrage lent, en les évitant de se faire tuer par le Kubelet avant qu'ils ne soient opérationnels. + +{{% /capture %}} + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Définir une commande de liveness + +De nombreuses applications fonctionnant pour des longues périodes finissent par passer à des états de rupture et ne peuvent pas se rétablir, sauf en étant redémarrées. Kubernetes fournit des liveness probes pour détecter et remédier à ces situations. + +Dans cet exercice, vous allez créer un Pod qui exécute un conteneur basé sur l'image `k8s.gcr.io/busybox`. Voici le fichier de configuration pour le Pod : + +{{< codenew file="pods/probe/exec-liveness.yaml" >}} + +Dans le fichier de configuration, vous constatez que le Pod a un seul conteneur. +Le champ `periodSeconds` spécifie que le Kubelet doit effectuer un check de liveness toutes les 5 secondes. Le champ `initialDelaySeconds` indique au Kubelet qu'il devrait attendre 5 secondes avant d'effectuer la première probe. Pour effectuer une probe, le Kubelet exécute la commande `cat /tmp/healthy` dans le conteneur. Si la commande réussit, elle renvoie 0, et le Kubelet considère que le conteneur est vivant et en bonne santé. Si la commande renvoie une valeur non nulle, le Kubelet tue le conteneur et le redémarre. + +Au démarrage, le conteneur exécute cette commande : + +```shell +/bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600" +``` + +Pour les 30 premières secondes de la vie du conteneur, il y a un fichier `/tmp/healthy`. +Donc pendant les 30 premières secondes, la commande `cat /tmp/healthy` renvoie un code de succès. Après 30 secondes, `cat /tmp/healthy` renvoie un code d'échec. + +Créez le Pod : + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/exec-liveness.yaml +``` + +Dans les 30 secondes, visualisez les événements du Pod : + +```shell +kubectl describe pod liveness-exec +``` + +La sortie indique qu'aucune liveness probe n'a encore échoué : + +```shell +FirstSeen LastSeen Count From SubobjectPath Type Reason Message +--------- -------- ----- ---- ------------- -------- ------ ------- +24s 24s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox" +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox" +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined] +23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e +``` + +Après 35 secondes, visualisez à nouveau les événements du Pod : + +```shell +kubectl describe pod liveness-exec +``` + +Au bas de la sortie, il y a des messages indiquant que les liveness probes ont échoué, et que les conteneurs ont été tués et recréés. + +```shell +FirstSeen LastSeen Count From SubobjectPath Type Reason Message +--------- -------- ----- ---- ------------- -------- ------ ------- +37s 37s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox" +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox" +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined] +36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e +2s 2s 1 {kubelet worker0} spec.containers{liveness} Warning Unhealthy Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory +``` + +Attendez encore 30 secondes et vérifiez que le conteneur a été redémarré : + +```shell +kubectl get pod liveness-exec +``` + +La sortie montre que `RESTARTS` a été incrémenté : + +```shell +NAME READY STATUS RESTARTS AGE +liveness-exec 1/1 Running 1 1m +``` + +## Définir une requête HTTP de liveness + +Un autre type de liveness probe utilise une requête GET HTTP. Voici la configuration +d'un Pod qui fait fonctionner un conteneur basé sur l'image `k8s.gcr.io/liveness`. + +{{< codenew file="pods/probe/http-liveness.yaml" >}} + +Dans le fichier de configuration, vous pouvez voir que le Pod a un seul conteneur. +Le champ `periodSeconds` spécifie que le Kubelet doit effectuer une liveness probe toutes les 3 secondes. Le champ `initialDelaySeconds` indique au Kubelet qu'il devrait attendre 3 secondes avant d'effectuer la première probe. Pour effectuer une probe, le Kubelet envoie une requête HTTP GET au serveur qui s'exécute dans le conteneur et écoute sur le port 8080. Si le handler du chemin `/healthz` du serveur renvoie un code de succès, le Kubelet considère que le conteneur est vivant et en bonne santé. Si le handler renvoie un code d'erreur, le Kubelet tue le conteneur et le redémarre. + +Tout code supérieur ou égal à 200 et inférieur à 400 indique un succès. Tout autre code indique un échec. + +Vous pouvez voir le code source du serveur dans +[server.go](https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/liveness/server.go). + +Pendant les 10 premières secondes où le conteneur est en vie, le handler `/healthz` renvoie un statut de 200. Après cela, le handler renvoie un statut de 500. + +```go +http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + duration := time.Now().Sub(started) + if duration.Seconds() > 10 { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("erreur: %v", duration.Seconds()))) + } else { + w.WriteHeader(200) + w.Write([]byte("ok")) + } +}) +``` + +Le Kubelet commence à effectuer des contrôles de santé 3 secondes après le démarrage du conteneur. +Ainsi, les premiers contrôles de santé seront réussis. Mais après 10 secondes, les contrôles de santé échoueront, et le Kubelet tuera et redémarrera le conteneur. + +Pour essayer le HTTP liveness check, créez un Pod : + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/http-liveness.yaml +``` + +Après 10 secondes, visualisez les événements du Pod pour vérifier que les liveness probes ont échoué et le conteneur a été redémarré : + +```shell +kubectl describe pod liveness-http +``` + +Dans les versions antérieures à la v1.13 (y compris la v1.13), au cas où la variable d'environnement `http_proxy` (ou `HTTP_PROXY`) est définie sur le noeud où tourne un Pod, le HTTP liveness probe utilise ce proxy. +Dans les versions postérieures à la v1.13, les paramètres de la variable d'environnement du HTTP proxy local n'affectent pas le HTTP liveness probe. + +## Définir une TCP liveness probe + +Un troisième type de liveness probe utilise un TCP Socket. Avec cette configuration, le Kubelet tentera d'ouvrir un socket vers votre conteneur sur le port spécifié. +S'il arrive à établir une connexion, le conteneur est considéré comme étant en bonne santé, s'il n'y arrive pas, c'est un échec. + +{{< codenew file="pods/probe/tcp-liveness-readiness.yaml" >}} + +Comme vous le voyez, la configuration pour un check TCP est assez similaire à un check HTTP. +Cet exemple utilise à la fois des readiness et liveness probes. Le Kubelet transmettra la première readiness probe 5 secondes après le démarrage du conteneur. Il tentera de se connecter au conteneur `goproxy` sur le port 8080. Si la probe réussit, le conteneur sera marqué comme prêt. Kubelet continuera à effectuer ce check tous les 10 secondes. + +En plus de la readiness probe, cette configuration comprend une liveness probe. +Le Kubelet effectuera la première liveness probe 15 secondes après que le conteneur démarre. Tout comme la readiness probe, celle-ci tentera de se connecter au conteneur de `goproxy` sur le port 8080. Si la liveness probe échoue, le conteneur sera redémarré. + +Pour essayer la TCP liveness check, créez un Pod : + +```shell +kubectl apply -f https://k8s.io/examples/pods/probe/tcp-liveness-readiness.yaml +``` + +Après 15 secondes, visualisez les événements de Pod pour vérifier les liveness probes : + +```shell +kubectl describe pod goproxy +``` + +## Utilisation d'un port nommé + +Vous pouvez utiliser un [ContainerPort](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#containerport-v1-core) nommé pour les HTTP or TCP liveness probes : + +```yaml +ports: +- name: liveness-port + containerPort: 8080 + hostPort: 8080 + +livenessProbe: + httpGet: + path: /healthz + port: liveness-port +``` + +## Protéger les conteneurs à démarrage lent avec des startup probes {#define-startup-probes} + +Parfois, vous devez faire face à des applications legacy qui peuvent nécessiter un temps de démarrage supplémentaire lors de leur première initialisation. +Dans de telles situations, il peut être compliqué de régler les paramètres de la liveness probe sans compromettant la réponse rapide aux blocages qui ont motivé une telle probe. +L'astuce est de configurer une startup probe avec la même commande, HTTP ou TCP check avec un `failureThreshold * periodSeconds` assez long pour couvrir le pire des scénarios des temps de démarrage. + +Ainsi, l'exemple précédent deviendrait : + +```yaml +ports: +- name: liveness-port + containerPort: 8080 + hostPort: 8080 + +livenessProbe: + httpGet: + path: /healthz + port: liveness-port + failureThreshold: 1 + periodSeconds: 10 + +startupProbe: + httpGet: + path: /healthz + port: liveness-port + failureThreshold: 30 + periodSeconds: 10 +``` + +Grâce à la startup probe, l'application aura un maximum de 5 minutes (30 * 10 = 300s) pour terminer son démarrage. +Une fois que la startup probe a réussi, la liveness probe prend le relais pour fournir une réponse rapide aux blocages de conteneurs. +Si la startup probe ne réussit jamais, le conteneur est tué après 300s puis soumis à la `restartPolicy` (politique de redémarrage) du Pod. + +## Définir les readiness probes + +Parfois, les applications sont temporairement incapables de servir le trafic. +Par exemple, une application peut avoir besoin de charger des larges données ou des fichiers de configuration pendant le démarrage, ou elle peut dépendre de services externes après le démarrage. +Dans ces cas, vous ne voulez pas tuer l'application, mais tu ne veux pas non plus lui envoyer de requêtes. Kubernetes fournit des readiness probes pour détecter et atténuer ces situations. Un pod avec des conteneurs qui signale qu'elle n'est pas prête ne reçoit pas de trafic par les services de Kubernetes. + +{{< note >}} +Readiness probes fonctionnent sur le conteneur pendant tout son cycle de vie. +{{< /note >}} + +Readiness probes sont configurées de la même façon que les liveness probes. La seule différence est que vous utilisez le champ `readinessProbe` au lieu du champ `livenessProbe`. + +```yaml +readinessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 5 + periodSeconds: 5 +``` + +La configuration des readiness probes HTTP et TCP reste également identique à celle des liveness probes. + +Les readiness et liveness probes peuvent être utilisées en parallèle pour le même conteneur. +L'utilisation des deux peut garantir que le trafic n'atteigne pas un conteneur qui n'est pas prêt et que les conteneurs soient redémarrés en cas de défaillance. + +## Configurer les Probes + +{{< comment >}} +Éventuellement, une partie de cette section pourrait être déplacée vers un sujet conceptuel. +{{< /comment >}} + +[Probes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) ont un certain nombre de champs qui vous pouvez utiliser pour contrôler plus précisément le comportement de la vivacité et de la disponibilité des probes : + +* `initialDelaySeconds`: Nombre de secondes après le démarrage du conteneur avant que les liveness et readiness probes ne soient lancées. La valeur par défaut est de 0 seconde. La valeur minimale est 0. +* `periodSeconds`: La fréquence (en secondes) à laquelle la probe doit être effectuée. La valeur par défaut est de 10 secondes. La valeur minimale est de 1. +* `timeoutSeconds`: Nombre de secondes après lequel la probe time out. Valeur par défaut à 1 seconde. La valeur minimale est de 1. +* `successThreshold`: Le minimum de succès consécutifs pour que la probe soit considérée comme réussie après avoir échoué. La valeur par défaut est 1. Doit être 1 pour la liveness probe. La valeur minimale est de 1. +* `failureThreshold`: Quand un Pod démarre et que la probe échoue, Kubernetes va tenter pour un temps de `failureThreshold` avant d'abandonner. Abandonner en cas de liveness probe signifie le redémarrage du conteneur. En cas de readiness probe, le Pod sera marqué Unready. +La valeur par défaut est 3, la valeur minimum est 1. + +[HTTP probes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#httpgetaction-v1-core) +ont des champs supplémentaires qui peuvent être définis sur `httpGet` : + +* `host`: Nom de l'hôte auquel se connecter, par défaut l'IP du pod. Vous voulez peut être mettre "Host" en httpHeaders à la place. +* `scheme`: Schéma à utiliser pour se connecter à l'hôte (HTTP ou HTTPS). La valeur par défaut est HTTP. +* `path`: Chemin d'accès sur le serveur HTTP. +* `httpHeaders`: En-têtes personnalisés à définir dans la requête. HTTP permet des en-têtes répétés. +* `port`: Nom ou numéro du port à accéder sur le conteneur. Le numéro doit être dans un intervalle de 1 à 65535. + +Pour une probe HTTP, le Kubelet envoie une requête HTTP au chemin et au port spécifiés pour effectuer la vérification. Le Kubelet envoie la probe à l'adresse IP du Pod, à moins que l'adresse ne soit surchargée par le champ optionnel `host` dans `httpGet`. Si Le champ `scheme` est mis à `HTTPS`, le Kubelet envoie une requête HTTPS en ignorant la vérification du certificat. Dans la plupart des scénarios, vous ne voulez pas définir le champ `host`. +Voici un scénario où vous le mettriez en place. Supposons que le conteneur écoute sur 127.0.0.1 et que le champ `hostNetwork` du Pod a la valeur true. Alors `host`, sous `httpGet`, devrait être défini à 127.0.0.1. Si votre Pod repose sur des hôtes virtuels, ce qui est probablement plus courant, vous ne devriez pas utiliser `host`, mais plutôt mettre l'en-tête `Host` dans `httpHeaders`. + +Le Kubelet fait la connexion de la probe au noeud, pas dans le Pod, ce qui signifie que vous ne pouvez pas utiliser un nom de service dans le paramètre `host` puisque le Kubelet est incapable pour le résoudre. + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Pour en savoir plus sur +[Probes des Conteneurs](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + +### Référence + +* [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) +* [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) +* [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) + +{{% /capture %}} + + diff --git a/content/fr/docs/tasks/configure-pod-container/share-process-namespace.md b/content/fr/docs/tasks/configure-pod-container/share-process-namespace.md new file mode 100644 index 0000000000000..f55431571d8fc --- /dev/null +++ b/content/fr/docs/tasks/configure-pod-container/share-process-namespace.md @@ -0,0 +1,102 @@ +--- +title: Partager l'espace de nommage des processus entre les conteneurs d'un Pod +min-kubernetes-server-version: v1.10 +content_template: templates/task +weight: 160 +--- + +{{% capture overview %}} + +{{< feature-state state="stable" for_k8s_version="v1.17" >}} + +Cette page montre comment configurer le partage de l'espace de noms d'un processus pour un pod. Lorsque le partage de l'espace de noms des processus est activé, les processus d'un conteneur sont visibles pour tous les autres conteneurs de ce pod. + +Vous pouvez utiliser cette fonctionnalité pour configurer les conteneurs coopérants, comme un conteneur de sidecar de gestionnaire de journaux, ou pour dépanner les images de conteneurs qui n'incluent pas d'utilitaires de débogage comme un shell. + +{{% /capture %}} + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Configurer un Pod + +Le partage de l'espace de nommage du processus est activé en utilisant le champ `shareProcessNamespace` de `v1.PodSpec`. Par exemple: + +{{< codenew file="pods/share-process-namespace.yaml" >}} + +1. Créez le pod `nginx` sur votre cluster : + + ```shell + kubectl apply -f https://k8s.io/examples/pods/share-process-namespace.yaml + ``` + +1. Attachez-le au conteneur `shell` et lancez `ps` : + + ```shell + kubectl attach -it nginx -c shell + ``` + + Si vous ne verrez pas d'invite de commande, appuyez sur la touche Entrée. + + ``` + / # ps ax + PID USER TIME COMMAND + 1 root 0:00 /pause + 8 root 0:00 nginx: master process nginx -g daemon off; + 14 101 0:00 nginx: worker process + 15 root 0:00 sh + 21 root 0:00 ps ax + ``` + +Vous pouvez signaler les processus dans d'autres conteneurs. Par exemple, envoyez `SIGHUP` à +nginx pour relancer le processus de worker. Cela nécessite la fonctionnalité `SYS_PTRACE`. + +``` +/ # kill -HUP 8 +/ # ps ax +PID USER TIME COMMAND + 1 root 0:00 /pause + 8 root 0:00 nginx: master process nginx -g daemon off; + 15 root 0:00 sh + 22 101 0:00 nginx: worker process + 23 root 0:00 ps ax +``` + +Il est même possible d'accéder aux autres conteneurs en utilisant le lien `/proc/$pid/root`. + +``` +/ # head /proc/8/root/etc/nginx/nginx.conf + +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +``` + +{{% /capture %}} + +{{% capture discussion %}} + +## Comprendre le processus de partage de l'espace de nommage + +Les pods partagent de nombreuses ressources, il est donc logique qu'elles partagent également un espace de noms des processus. Pour certaines images de conteneur, on peut envisager de les isoler les uns des autres. Il est donc important de comprendre ces différences : + +1. **Le processus de conteneur n'a plus de PID 1.** Certaines images de conteneurs refusent de démarrer sans PID 1 (par exemple, les conteneurs utilisant `systemd`) ou exécuter des commandes comme `kill -HUP 1` pour signaler le processus du conteneur. Dans les pods avec un espace de noms partagé du processus, `kill -HUP 1` signalera la sandbox du pod. (`/pause` dans l'exemple ci-dessus.) + +1. **Les processus sont visibles par les autres conteneurs du pod.** Cela inclut tout les informations visibles dans `/proc`, comme les mots de passe passés en argument ou les variables d'environnement. Celles-ci ne sont protégées que par des permissions Unix régulières. + +1. **Les systèmes de fichiers des conteneurs sont visibles par les autres conteneurs du pod à travers le lien `/proc/$pid/root`.** Cela rend le débogage plus facile, mais cela signifie aussi que les secrets du système de fichiers ne sont protégés que par les permissions du système de fichiers. + +{{% /capture %}} + + diff --git a/content/fr/examples/pods/probe/exec-liveness.yaml b/content/fr/examples/pods/probe/exec-liveness.yaml new file mode 100644 index 0000000000000..07bf75f85c6f3 --- /dev/null +++ b/content/fr/examples/pods/probe/exec-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-exec +spec: + containers: + - name: liveness + image: k8s.gcr.io/busybox + args: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 5 + periodSeconds: 5 diff --git a/content/fr/examples/pods/probe/http-liveness.yaml b/content/fr/examples/pods/probe/http-liveness.yaml new file mode 100644 index 0000000000000..670af18399e20 --- /dev/null +++ b/content/fr/examples/pods/probe/http-liveness.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-http +spec: + containers: + - name: liveness + image: k8s.gcr.io/liveness + args: + - /server + livenessProbe: + httpGet: + path: /healthz + port: 8080 + httpHeaders: + - name: Custom-Header + value: Awesome + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/content/fr/examples/pods/probe/tcp-liveness-readiness.yaml b/content/fr/examples/pods/probe/tcp-liveness-readiness.yaml new file mode 100644 index 0000000000000..08fb77ff0f58c --- /dev/null +++ b/content/fr/examples/pods/probe/tcp-liveness-readiness.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goproxy + labels: + app: goproxy +spec: + containers: + - name: goproxy + image: k8s.gcr.io/goproxy:0.1 + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 diff --git a/content/fr/examples/pods/share-process-namespace.yaml b/content/fr/examples/pods/share-process-namespace.yaml new file mode 100644 index 0000000000000..af812732a247a --- /dev/null +++ b/content/fr/examples/pods/share-process-namespace.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + shareProcessNamespace: true + containers: + - name: nginx + image: nginx + - name: shell + image: busybox + securityContext: + capabilities: + add: + - SYS_PTRACE + stdin: true + tty: true diff --git a/content/id/docs/concepts/workloads/controllers/deployment.md b/content/id/docs/concepts/workloads/controllers/deployment.md index 8eae6c579feec..b4e728cd9283f 100644 --- a/content/id/docs/concepts/workloads/controllers/deployment.md +++ b/content/id/docs/concepts/workloads/controllers/deployment.md @@ -96,7 +96,7 @@ Dalam contoh ini: 3. Untuk melihat status rilis Deployment, jalankan `kubectl rollout status deployment.v1.apps/nginx-deployment`. Keluaran akan tampil seperti berikut: ```shell Waiting for rollout to finish: 2 out of 3 new replicas have been updated... - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` 4. Jalankan `kubectl get deployments` lagi beberapa saat kemudian. Keluaran akan tampil seperti berikut: @@ -179,7 +179,7 @@ Ikuti langkah-langkah berikut untuk membarui Deployment: ``` atau ``` - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` Untuk menampilkan detail lain dari Deployment yang terbaru: @@ -826,7 +826,7 @@ kubectl rollout status deployment.v1.apps/nginx-deployment Keluaran akan tampil seperti berikut: ``` Waiting for rollout to finish: 2 of 3 updated replicas are available... -deployment.apps/nginx-deployment successfully rolled out +deployment "nginx-deployment" successfully rolled out $ echo $? 0 ``` diff --git a/content/id/docs/reference/glossary/deployment.md b/content/id/docs/reference/glossary/deployment.md new file mode 100644 index 0000000000000..6d21100fbb64c --- /dev/null +++ b/content/id/docs/reference/glossary/deployment.md @@ -0,0 +1,18 @@ +--- +title: Deployment +id: deployment +date: 2018-04-12 +full_link: /id/docs/concepts/workloads/controllers/deployment/ +short_description: > + Mengelola aplikasi yang direplikasi di dalam klastermu. +aka: +tags: +- fundamental +- core-object +- workload +--- +Sebuah objek API yang mengelola aplikasi yang direplikasi, biasanya dengan menjalankan Pod tanpa keadaan (_state_) lokal. + + + +Setiap replika direpresentasikan oleh sebuah {{< glossary_tooltip term_id="pod" >}}, dan Pod tersebut didistribusikan di antara {{< glossary_tooltip term_id="node" >}} dari suatu klaster. Untuk beban kerja yang membutuhkan keadaan lokal, pertimbangkan untuk menggunakan {{< glossary_tooltip term_id="StatefulSet" >}}. diff --git a/content/id/docs/reference/glossary/device-plugin.md b/content/id/docs/reference/glossary/device-plugin.md new file mode 100644 index 0000000000000..1318f448124e7 --- /dev/null +++ b/content/id/docs/reference/glossary/device-plugin.md @@ -0,0 +1,20 @@ +--- +title: Pugasan Peranti +id: device-plugin +date: 2019-02-02 +full_link: /id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/ +short_description: > + Ekstensi perangkat lunak untuk memungkinkan Pod mengakses peranti yang membutuhkan inisialisasi atau penyiapan khusus. +aka: +- Device Plugin +tags: +- fundamental +- extension +--- +Pugasan peranti berjalan pada {{< glossary_tooltip term_id="node" >}} pekerja dan menyediakan akses ke sumber daya untuk {{< glossary_tooltip term_id="pod" >}}, seperti perangkat keras lokal, yang membutuhkan langkah inisialisasi atau penyiapan khusus. + + + +Pugasan peranti menawarkan sumber daya ke {{< glossary_tooltip term_id="kubelet" text="kubelet" >}}, sehingga beban kerja Pod dapat mengakses fitur perangkat keras yang berhubungan dengan Node di mana Pod tersebut berjalan. Kamu dapat menggelar sebuah pugasan peranti sebagai sebuah {{< glossary_tooltip term_id="daemonset" >}}, atau menginstal perangkat lunak pugasan peranti secara langsung pada setiap Node target. + +Lihat [Pugasan Peranti](/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) untuk informasi lebih lanjut. diff --git a/content/id/docs/reference/glossary/disruption.md b/content/id/docs/reference/glossary/disruption.md new file mode 100644 index 0000000000000..fb4898f397e76 --- /dev/null +++ b/content/id/docs/reference/glossary/disruption.md @@ -0,0 +1,18 @@ +--- +title: Disrupsi +id: disruption +date: 2019-09-10 +full_link: /id/docs/concepts/workloads/pods/disruptions/ +short_description: > + Peristiwa yang menyebabkan hilangnya Pod +aka: +tags: +- fundamental +--- +Disrupsi merupakan kejadian yang menyebabkan hilangnya satu atau beberapa {{< glossary_tooltip term_id="pod" >}}. Suatu disrupsi memiliki konsekuensi terhadap sumber daya beban kerja, seperti {{< glossary_tooltip term_id="deployment" >}}, yang bergantung pada Pod yang terpengaruh. + + + +Jika kamu, sebagai operator klaster, menghancurkan sebuah Pod milik suatu aplikasi, maka hal ini dalam Kubernetes dikenal sebagai disrupsi disengaja (_voluntary disruption_). Jika sebuah Pod menghilang karena kegagalan Node, atau pemadaman yang mempengaruhi zona kegagalan yang lebih luas, maka dalam Kubernetes dikenal dengan istilah disrupsi tidak disengaja (_involuntary disruption_). + +Lihat [Disrupsi](/id/docs/concepts/workloads/pods/disruptions/) untuk informasi lebih lanjut. diff --git a/content/id/docs/reference/glossary/docker.md b/content/id/docs/reference/glossary/docker.md new file mode 100644 index 0000000000000..12e40468b621f --- /dev/null +++ b/content/id/docs/reference/glossary/docker.md @@ -0,0 +1,16 @@ +--- +title: Docker +id: docker +date: 2018-04-12 +full_link: https://docs.docker.com/engine/ +short_description: > + Docker merupakan suatu teknologi perangkat lunak yang menyediakan virtualisasi pada level sistem operasi yang juga dikenal sebagai Container. +aka: +tags: +- fundamental +--- +Docker (secara spesifik, Docker Engine) merupakan suatu teknologi perangkat lunak yang menyediakan virtualisasi pada level sistem operasi yang juga dikenal sebagai {{< glossary_tooltip term_id="container" >}}. + + + +Docker menggunakan fitur isolasi sumber daya pada kernel Linux seperti cgroup dan _namespace_, dan [UnionFS](https://docs.docker.com/get-started/overview/#union-file-systems) seperti OverlayFS dan lainnya untuk memungkinkan masing-masing Container dijalankan pada satu instans Linux, menghindari beban tambahan (_overhead_) saat memulai dan menjalankan VM. diff --git a/content/id/docs/reference/glossary/ephemeral-container.md b/content/id/docs/reference/glossary/ephemeral-container.md new file mode 100644 index 0000000000000..bed099180769d --- /dev/null +++ b/content/id/docs/reference/glossary/ephemeral-container.md @@ -0,0 +1,17 @@ +--- +title: Container Sementara +id: ephemeral-container +date: 2019-08-26 +full_link: /id/docs/concepts/workloads/pods/ephemeral-containers/ +short_description: > + Jenis tipe Container yang dapat kamu jalankan sementara di dalam sebuah Pod. +aka: +- Ephemeral Container +tags: +- fundamental +--- +Jenis tipe {{< glossary_tooltip term_id="container" >}} yang dapat kamu jalankan sementara di dalam sebuah {{< glossary_tooltip term_id="pod" >}}. + + + +Jika kamu ingin menyelidiki sebuah Pod yang bermasalah, kamu dapat menambahkan Container sementara ke Pod tersebut dan menjalankan diagnosis. Container sementara tidak memiliki jaminan sumber daya atau penjadwalan, dan kamu tidak boleh menggunakannya untuk menjalankan bagian mana pun dari beban kerja. diff --git a/content/id/docs/reference/glossary/extensions.md b/content/id/docs/reference/glossary/extensions.md new file mode 100644 index 0000000000000..2e8e73d55325a --- /dev/null +++ b/content/id/docs/reference/glossary/extensions.md @@ -0,0 +1,17 @@ +--- +title: Ekstensi +id: Extensions +date: 2019-02-01 +full_link: /id/docs/concepts/extend-kubernetes/extend-cluster/#perluasan +short_description: > + Ekstensi adalah komponen perangkat lunak yang memperluas dan terintegrasi secara mendalam dengan Kubernetes untuk mendukung perangkat keras baru. +aka: +tags: +- fundamental +- extension +--- +Ekstensi adalah komponen perangkat lunak yang memperluas dan terintegrasi secara mendalam dengan Kubernetes untuk mendukung perangkat keras baru. + + + +Sebagian besar admin klaster akan menggunakan instans Kubernetes yang dihoskan (_hosted_) atau didistribusikan. Akibatnya, hampir semua pengguna Kubernetes perlu menginstal [ekstensi](/id/docs/concepts/extend-kubernetes/extend-cluster/#perluasan) dan sedikit pengguna yang perlu membuat ekstensi baru. diff --git a/content/id/docs/reference/glossary/image.md b/content/id/docs/reference/glossary/image.md index bc26114e60bf4..d0c09fc978dc5 100644 --- a/content/id/docs/reference/glossary/image.md +++ b/content/id/docs/reference/glossary/image.md @@ -4,15 +4,14 @@ id: image date: 2019-04-24 full_link: short_description: > - Instans yang disimpan dari sebuah kontainer yang mengandung seperangkat perangkat lunak yang dibutuhkan untuk menjalankan sebuah aplikasi. + Instans yang disimpan dari sebuah Container yang memuat seperangkat perangkat lunak yang dibutuhkan untuk menjalankan sebuah aplikasi. -aka: +aka: tags: - fundamental --- - Instans yang disimpan dari sebuah kontainer yang mengandung seperangkat perangkat lunak yang dibutuhkan untuk menjalankan sebuah aplikasi. +Instans yang disimpan dari sebuah Container yang memuat seperangkat perangkat lunak yang dibutuhkan untuk menjalankan sebuah aplikasi. - - -Sebuah mekanisme untuk mengemas perangkat lunak yang mengizinkan perangkat lunak tersebut untuk disimpan di dalam registri kontainer, di-_pull_ kedalam filesystem lokal, dan dijalankan sebagai suatu aplikasi. Meta data yang dimasukkan mengindikasikan _executable_ apa sajakah yang perlu dijalanmkan, siapa yang membuat _executable_ tersebut, dan informasi lainnya. + +Sebuah mekanisme untuk mengemas perangkat lunak yang memungkinkan perangkat lunak tersebut untuk disimpan di dalam register Container, ditarik ke dalam sistem lokal, dan dijalankan sebagai suatu aplikasi. Metadata disertakan di dalam _image_ yang mengindikasikan _executable_ apa saja yang perlu dijalankan, siapa yang membuatnya, dan informasi lainnya. diff --git a/content/id/docs/reference/glossary/init-container.md b/content/id/docs/reference/glossary/init-container.md new file mode 100644 index 0000000000000..3300504d9e245 --- /dev/null +++ b/content/id/docs/reference/glossary/init-container.md @@ -0,0 +1,17 @@ +--- +title: Container Inisialisasi +id: init-container +date: 2018-04-12 +full_link: +short_description: > + Satu atau beberapa Container inisialisasi yang harus berjalan hingga selesai sebelum Container aplikasi apapun dijalankan. +aka: +- Init Container +tags: +- fundamental +--- +Satu atau beberapa {{< glossary_tooltip term_id="container" >}} inisialisasi yang harus berjalan hingga selesai sebelum Container aplikasi apapun dijalankan. + + + +Container inisialisasi mirip seperti Container aplikasi biasa, dengan satu perbedaan: Container inisialisasi harus berjalan sampai selesai sebelum Container aplikasi lainnya dijalankan. Container inisialisasi dijalankan secara seri: setiap Container inisialisasi harus berjalan sampai selesai sebelum Container inisialisasi berikutnya dijalankan. diff --git a/content/id/docs/reference/glossary/job.md b/content/id/docs/reference/glossary/job.md new file mode 100644 index 0000000000000..2cc72f407338a --- /dev/null +++ b/content/id/docs/reference/glossary/job.md @@ -0,0 +1,18 @@ +--- +title: Job +id: job +date: 2018-04-12 +full_link: /docs/concepts/workloads/controllers/job/ +short_description: > + Tugas terbatas atau bertumpuk (_batch_) yang berjalan sampai selesai. +aka: +tags: +- fundamental +- core-object +- workload +--- +Tugas terbatas atau bertumpuk (_batch_) yang berjalan sampai selesai. + + + +Membuat satu atau beberapa objek {{< glossary_tooltip term_id="pod" >}} dan memastikan bahwa sejumlah objek tersebut berhasil dihentikan. Saat Pod berhasil diselesaikan (_complete_), maka Job melacak keberhasilan penyelesaian tersebut. diff --git a/content/id/docs/reference/glossary/kube-apiserver.md b/content/id/docs/reference/glossary/kube-apiserver.md index fda5ec39d5841..a1b04754bd746 100644 --- a/content/id/docs/reference/glossary/kube-apiserver.md +++ b/content/id/docs/reference/glossary/kube-apiserver.md @@ -4,16 +4,15 @@ id: kube-apiserver date: 2019-04-21 full_link: /docs/reference/generated/kube-apiserver/ short_description: > - Komponen di master yang mengekspos API Kubernetes. Merupakan front-end dari kontrol plane Kubernetes. + Komponen _control plane_ yang mengekspos API Kubernetes. Merupakan _front-end_ dari _control plane_ Kubernetes. aka: tags: - architecture - fundamental --- - Komponen di master yang mengekspos API Kubernetes. Merupakan front-end dari kontrol plane Kubernetes. +Komponen _control plane_ yang mengekspos API Kubernetes. Merupakan _front-end_ dari _control plane_ Kubernetes. -Komponen ini didesain agar dapat di-scale secara horizontal. Lihat [Membangun Klaster HA](/docs/admin/high-availability/). - +Komponen ini didesain agar dapat diskalakan secara horizontal. Lihat [Membangun Klaster HA](/docs/admin/high-availability/). diff --git a/content/id/docs/tasks/administer-cluster/cluster-management.md b/content/id/docs/tasks/administer-cluster/cluster-management.md new file mode 100644 index 0000000000000..0473dde9f3d8b --- /dev/null +++ b/content/id/docs/tasks/administer-cluster/cluster-management.md @@ -0,0 +1,221 @@ +--- +title: Manajemen Klaster +content_type: concept +--- + + + +Dokumen ini menjelaskan beberapa topik yang terkait dengan siklus hidup sebuah klaster: membuat klaster baru, +memperbarui Node _control plane_ dan Node pekerja dari klaster kamu, +melakukan pemeliharaan Node (misalnya pembaruan kernel), dan meningkatkan versi API Kubernetes dari +klaster yang berjalan. + + + + +## Membuat dan mengonfigurasi klaster + +Untuk menginstal Kubernetes dalam sekumpulan mesin, konsultasikan dengan salah satu [Panduan Memulai](/id/docs/setup) tergantung dengan lingkungan kamu. + +## Memperbarui klaster + +Status saat ini pembaruan klaster bergantung pada penyedia, dan beberapa rilis yang mungkin memerlukan perhatian khusus saat memperbaruinya. Direkomendasikan agar admin membaca [Catatan Rilis](https://git.k8s.io/kubernetes/CHANGELOG/README.md), serta catatan khusus pembaruan versi sebelum memperbarui klaster mereka. + +### Memperbarui klaster Azure Kubernetes Service (AKS) + +Azure Kubernetes Service memungkinkan pembaruan layanan mandiri yang mudah dari _control plane_ dan Node pada klaster kamu. Prosesnya adalah +saat ini dimulai oleh pengguna dan dijelaskan dalam [Azure AKS documentation](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster). + +### Memperbarui klaster Google Compute Engine + +Google Compute Engine Open Source (GCE-OSS) mendukung pembaruan _control plane_ dengan menghapus dan +membuat ulang _control plane_, sambil mempertahankan _Persistent Disk_ (PD) yang sama untuk memastikan bahwa data disimpan pada berkas +untuk setiap kali pembaruan. + +Pembaruan Node untuk GCE menggunakan [grup _instance_ yang di-_manage_](https://cloud.google.com/compute/docs/instance-groups/), dimana setiap Node +dihancurkan secara berurutan dan kemudian dibuat ulang dengan perangkat lunak baru. Semua Pod yang berjalan di Node tersebut harus +dikontrol oleh pengontrol replikasi (_Replication Controller_), atau dibuat ulang secara manual setelah peluncuran. + +Pembaruan versi pada klaster open source Google Compute Engine (GCE) yang dikontrol oleh skrip `cluster/gce/upgrade.sh`. + +Dapatkan penggunaan dengan menjalankan `cluster/gce/upgrade.sh -h`. + +Misalnya, untuk meningkatkan hanya _control plane_ kamu ke versi tertentu (v1.0.2): + +```shell +cluster/gce/upgrade.sh -M v1.0.2 +``` + +Sebagai alternatif, untuk meningkatkan seluruh klaster kamu ke rilis yang stabil terbaru gunakan: + +```shell +cluster/gce/upgrade.sh release/stable +``` + +### Memperbarui klaster Google Kubernetes Engine + +Google Kubernetes Engine secara otomatis memperbarui komponen _control plane_ (misalnya, `kube-apiserver`, ` kube-scheduler`) ke versi yang terbaru. Ini juga menangani pembaruan sistem operasi dan komponen lain yang dijalankan oleh _control plane_. + +Proses pembaruan Node dimulai oleh pengguna dan dijelaskan dalam [Dokumentasi Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/clusters/upgrade). + +### Memperbarui klaster Amazon EKS + +Komponen _control plane_ klaster pada Amazon EKS dapat diperbarui dengan menggunakan eksctl, AWS Management Console, atau AWS CLI. Prosesnya dimulai oleh pengguna dan dijelaskan di [Dokumentasi Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). + +### Memperbarui klaster Oracle Cloud Infrastructure Container Engine untuk Kubernetes (OKE) + +Oracle membuat dan mengelola sekumpulan Node _control plane_ pada _control plane_ Oracle atas nama kamu (dan infrastruktur Kubernetes terkait seperti Node etcd) untuk memastikan kamu memiliki Kubernetes _control plane_ yang terkelola dengan ketersedian tinggi. Kamu juga dapat memperbarui Node _control plane_ ini dengan mulus ke versi Kubernetes baru tanpa berhenti. Tindakan ini dijelaskan dalam [Dokumentasi OKE](https://docs.cloud.oracle.com/iaas/Content/ContEng/Tasks/contengupgradingk8smasternode.htm). + +### Memperbarui klaster pada platform yang lain + +Penyedia dan alat yang berbeda akan mengelola pembaruan secara berbeda. Kamu disarankan untuk membaca dokumentasi utama mereka terkait pembaruan. + +* [kops](https://github.com/kubernetes/kops) +* [kubespray](https://github.com/kubernetes-incubator/kubespray) +* [CoreOS Tectonic](https://coreos.com/tectonic/docs/latest/admin/upgrade.html) +* [Digital Rebar](https://provision.readthedocs.io/en/tip/doc/content-packages/krib.html) +* ... + +Untuk memperbarukan sebuah klaster pada platform yang tidak disebutkan dalam daftar di atas, periksa urutan pembaruan komponen pada +halaman [Versi Skewed](/docs/setup/release/version-skew-policy/#supported-component-upgrade-order). + +## Merubah ukuran klaster + +Jika klaster kamu kekurangan sumber daya, kamu dapat dengan mudah menambahkan lebih banyak mesin ke klaster tersebut jika klaster kamu +menjalankan [Mode Node Registrasi Sendiri](/docs/concepts/architecture/nodes/#self-registration-of-nodes). +Jika kamu menggunakan GCE atau Google Kubernetes Engine, itu dilakukan dengan mengubah ukuran grup _instance_ yang mengelola Node kamu. +Ini dapat dilakukan dengan mengubah jumlah _instance_ pada +`Compute > Compute Engine > Instance groups > your group > Edit group` +[Laman Google Cloud Console](https://console.developers.google.com) atau dengan baris perintah gcloud: + +```shell +gcloud compute instance-groups managed resize kubernetes-node-pool --size=42 --zone=$ZONE +``` + +Grup _instance_ akan menangani penempatan _image_ yang sesuai pada mesin baru dan memulainya, +sedangkan Kubelet akan mendaftarkan Node-nya ke server API agar tersedia untuk penjadwalan. +Jika kamu menurunkan skala grup _instance_, sistem akan secara acak memilih Node untuk dimatikan. + +Di lingkungan lain kamu mungkin perlu mengonfigurasi mesin sendiri dan memberi tahu Kubelet di mana server API mesin itu berjalan. + +### Merubah ukuran klaster Azure Kubernetes Service (AKS) + +Azure Kubernetes Service memungkinkan perubahan ukuran klaster yang dimulai oleh pengguna dari CLI atau +portal Azure dan dijelaskan dalam [Dokumentasi Azure AKS](https://docs.microsoft.com/en-us/azure/aks/scale-cluster). + + +### Penyekalaan otomatis klaster + +Jika kamu menggunakan GCE atau Google Kubernetes Engine, kamu dapat mengonfigurasi klaster kamu sehingga secara otomatis diskalakan berdasarkan +kebutuhan Pod. + +Seperti yang dideskripsikan dalam [Sumber daya komputasi](/id/docs/concepts/configuration/manage-resources-containers/), +pengguna dapat memesan berapa banyak CPU dan memori yang dialokasikan ke Pod. +Informasi ini digunakan oleh penjadwal Kubernetes untuk menemukan tempat menjalankan Pod. Jika +tidak ada Node yang memiliki kapasitas kosong yang cukup (atau tidak sesuai dengan persyaratan Pod yang lainnya) maka Pod +menunggu sampai beberapa Pod dihentikan atau Node baru ditambahkan. + +Penyekala otomatis klaster mencari Pod yang tidak dapat dijadwalkan dan memeriksa apakah perlu menambahkan Node baru, yang serupa +dengan Node yang lain dalam klaster untuk membantu. Jika ya, maka itu mengubah ukuran klaster agar dapat mengakomodasi Pod yang menunggu. + +Penyekala otomatis klaster juga menurunkan skala klaster jika mengetahui bahwa satu atau beberapa Node tidak diperlukan lagi untuk +periode waktu tambahan (selama 10 menit tetapi dapat berubah di masa mendatang). + +Penyekala otomatis klaster dikonfigurasikan untuk per grup _instance_ (GCE) atau kumpulan Node (Google Kubernetes Engine). + +Jika kamu menggunakan GCE, kamu dapat mengaktifkannya sambil membuat klaster dengan skrip kube-up.sh. +Untuk mengonfigurasi penyekala otomatis klaster, kamu harus menyetel tiga variabel lingkungan: + +* `KUBE_ENABLE_CLUSTER_AUTOSCALER` - mengaktifkan penyekala otomatis klaster kalau di setel menjadi _true_. +* `KUBE_AUTOSCALER_MIN_NODES` - minimal jumlah Node dalam klaster. +* `KUBE_AUTOSCALER_MAX_NODES` - maksimal jumlah Node dalam klaster. + +Contoh: + +```shell +KUBE_ENABLE_CLUSTER_AUTOSCALER=true KUBE_AUTOSCALER_MIN_NODES=3 KUBE_AUTOSCALER_MAX_NODES=10 NUM_NODES=5 ./cluster/kube-up.sh +``` + +Pada Google Kubernetes Engine, kamu mengonfigurasi penyekala otomatis klaster baik saat pembuatan atau pembaruan klaster atau saat membuat kumpulan Node tertentu +(yang ingin kamu skalakan secara otomatis) dengan meneruskan _flag_ `--enable-autoscaling`, `--min-nodes` dan `--max-nodes` +yang sesuai dengan perintah `gcloud`. + +Contoh: + +```shell +gcloud container clusters create mytestcluster --zone=us-central1-b --enable-autoscaling --min-nodes=3 --max-nodes=10 --num-nodes=5 +``` + +```shell +gcloud container clusters update mytestcluster --enable-autoscaling --min-nodes=1 --max-nodes=15 +``` + +**Penyekala otomatis klaster mengharapkan bahwa Node belum dimodifikasi secara manual (misalnya dengan menambahkan label melalui kubectl) karena properti tersebut tidak akan disebarkan ke Node baru dalam grup _instance_ yang sama.** + +Untuk detail selengkapnya tentang cara penyekala otomatis klaster memutuskan apakah, kapan dan bagaimana +melakukan penyekalaan sebuah klaster, silahkan lihat dokumentasi [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) +dari proyek penyekala otomatis klaster. + +## Memelihara dalam Node + +Jika kamu perlu memulai ulang Node (seperti untuk pembaruan kernel, pembaruan libc, pembaruan perangkat keras, dll.) dan waktu kegagalan (_downtime_) yang +singkat, lalu ketika Kubelet memulai ulang, maka ia akan mencoba untuk memulai ulang Pod yang dijadwalkan. Jika mulai ulang membutuhkan waktu yang lebih lama +(waktu bawaan adalah 5 menit, yang dikontrol oleh `--pod-eviction-timeout` pada _controller-manager_), +maka pengontrol Node akan menghentikan Pod yang terikat ke Node yang tidak tersedia. Jika ada yang sesuai dengan +kumpulan replika (atau pengontrol replikasi), maka salinan baru dari Pod akan dimulai pada Node yang berbeda. Jadi, dalam kasus di mana semua +Pod direplikasi, pembaruan dapat dilakukan tanpa koordinasi khusus, dengan asumsi bahwa tidak semua Node akan mati pada saat yang bersamaan. + +Jika kamu ingin lebih mengontrol proses pembaruan, kamu dapat menggunakan alur kerja berikut ini: + +Gunakan `kubectl drain` untuk meghentikan perlahan-lahan semua Pod dalam Node ketika menandai Node sebagai _unschedulable_: + +```shell +kubectl drain $NODENAME +``` + +Ini mencegah Pod baru mendarat pada Node saat kamu mencoba melepaskannya. + +Untuk Pod dengan sebuah kumpulan replika, Pod tersebut akan diganti dengan Pod baru yang akan dijadwalkan ke Node baru. Selain itu, jika Pod adalah bagian dari layanan, maka klien akan secara otomatis dialihkan ke Pod baru. + +Untuk Pod yang tidak memiliki replika, kamu perlu memunculkan salinan baru dari Pod tersebut, dan menganggapnya bukan bagian dari layanan, alihkan klien ke Pod tersebut. + +Lakukan pekerjaan pemeliharaan pada Node. + +Buat Node dapat dijadwal lagi: + + +```shell +kubectl uncordon $NODENAME +``` + +Jika kamu menghapus Node dari _instance_ VM dan membuat yang baru, maka sumber daya Node baru yang dapat dijadwalkan akan +dibuat secara otomatis (jika kamu menggunakan penyedia cloud yang mendukung +pencarian Node; saat ini hanya Google Compute Engine, tidak termasuk CoreOS di Google Compute Engine menggunakan kube-register). +Lihatlah [Node](/docs/concepts/architecture/nodes/) untuk lebih detail. + +## Topik lebih lanjut + +### Mengaktifkan atau menonaktifkan versi API untuk klaster kamu + +Versi API spesifik dapat dinyalakan dan dimatikan dengan meneruskan _flag_ `--runtime-config=api/` ketika menjalankan server API. Sebagai contoh: untuk menyalakan APIv1, teruskan `--runtime-config=api/v1=false`. +_runtime-config_ juga mendukung 2 kunci khusus: api/all dan api/legacy yang masing-masing untuk mengontrol semua dan API lama. +Sebagai contoh, untuk mematikan versi API semua kecuali v1, teruskan `--runtime-config=api/all=false,api/v1=true`. +Untuk tujuan _flag_ ini, API lama adalah API yang sudah tidak digunakan lagi secara eksplisit (misalnya, `v1beta3`). + +### Mengalihkan versi API penyimpanan dari klaster kamu + +Objek yang disimpan ke diska untuk representasi internal klaster dari sumber daya Kubernetes yang aktif dalam klaster ditulis menggunakan versi API tertentu. +Saat API yang didukung berubah, objek ini mungkin perlu ditulis ulang dalam API yang lebih baru. Kegagalan melakukan ini pada akhirnya akan menghasilkan sumber daya yang tidak lagi dapat didekodekan atau digunakan +oleh server API Kubernetes. + +### Mengalihkan berkas konfigurasi kamu ke versi API baru + +Kamu dapat menggunakan perintah `kubectl convert` untuk mengubah berkas konfigurasi di antara versi API berbeda. + +```shell +kubectl convert -f pod.yaml --output-version v1 +``` + +Untuk opsi yang lainnya, silakan merujuk pada penggunaan dari perintah [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert). + + diff --git a/content/ja/case-studies/appdirect/index.html b/content/ja/case-studies/appdirect/index.html index e2b8948bf6937..0be120c3b501b 100644 --- a/content/ja/case-studies/appdirect/index.html +++ b/content/ja/case-studies/appdirect/index.html @@ -12,7 +12,7 @@ 私たちはたくさんの人からの関心を得るためにさまざまな戦略を試みています。Kubernetesとクラウドネイティブ技術は、いまやデファクトのエコシステムとみなされています。 --- -
+

ケーススタディ:
AppDirect:AppDirectはいかにしてKubernetessを活用し、エンジニアリングスタッフが10倍になるほどの成長を後押ししたのか

@@ -47,14 +47,14 @@

課題

AppDirect はクラウ

AppDirect は2009年以来、クラウドベースの製品やサービス向けのエンドツーエンドのeコマースプラットフォームによって、ComcastやGoDaddyといった組織がデジタルサプライチェーンをシンプルにすることに役立ってきました。


2014年にソフトウェア開発ディレクターのPierre-Alexandre Lacerteが働き始めたとき、AppDirectでは「tomcatベースのインフラにモノリシックなアプリケーションをデプロイしていて、リリースプロセス全体が必要以上に複雑なものとなっていました」と彼は振り返ります。「たくさんのマニュアルステップがありました。1人のエンジニアがある機能を構築し、Pull requestを作成、そしてQAもしくは別のエンジニアの手によってその機能を検証する、といった具合でした。さらにこれがマージされれば、また別の誰かがデプロイ作業の面倒をみることになるでしょう。そのため、提供までのパイプラインにボトルネックがいくつもありました。」

これと同時に、40人のエンジニアリングチームが大きくなっていくにつれ、その成長を後押しし加速する上でも、より良いインフラが必要となってくることに同社は気づいたのです。そのころプラットフォーム チームの一員であったLacerteには、Node.jsSpring Boot Javaといった、異なるフレームワークや言語を使いたいといった声が複数のチームから聞こえてくるようになってきました。同社の成長とスピードを両立するには、(チームが自律的に動き、自らがデプロイし、稼働中のサービスに責任を持てるような)よりよいインフラやシステムがこの会社には必要だということに彼はすぐに気づいたのです。
-
+
「正しいタイミングで正しい判断ができました。Kubernetesとクラウドネイティブ技術は、いまやデファクトのエコシステムとみなされています。スケールアウトしていく中で直面する新たな難題に取り組むにはどこに注力すべきか、私たちはわかっています。このコミュニティはとても活発で、当社の優秀なチームをすばらしく補完してくれています。」

- AppDirect ソフトウェア開発者 Alexandre Gervais
Lacerteは当初から言っていました。「私のアイデアは、チームがサービスをもっと高速にデプロイできる環境を作ろう、というものです。そうすれば彼らもこう言うでしょう『そうだよ、モノリスを建てるなんてもうしたくないしサービスを構築したいんだ』と」(Lacerteは2019年に同社を退社)。

Lacerteのグループは運用チームと連携することで同社の AWSのインフラにより多くアクセスし、コントロールするようになりました。そして、いくつかのオーケストレーション技術のプロトタイプを作り始めたのです。「当時を振り返ると、Kubernetesはちょっとアンダーグラウンドというか、それほど知られていなかったように思います。」と彼は言います。「しかし、コミュニティやPull requestの数、GitHub上でのスピードなどをよく見てみると勢いが増してきていることがわかりました。他の技術よりも管理がはるかに簡単であることもわかりました。」彼らは、Kubernetes上で ChefTerraform によるプロビジョニングを用いながら最初のいくつかのサービスを開発しました。その後さらにサービスも、自動化されるところも増えました。「韓国、オーストラリア、ドイツ、そしてアメリカ、私たちのクラスターは世界中にあります。」とLacerteは言います。「自動化は私たちにとって極めて重要です。」今彼らは大部分でKopsを使っていて、いくつかのクラウドプロバイダーから提供されるマネージドKubernetesサービスも視野に入れていれています。

今もモノリスは存在してはいますが、コミットや機能はどんどん少なくなってきています。あらゆるチームがこの新たなインフラ上でデプロイしていて、それらはサービスとして提供されるのが一般的です。今やAppDirectは本番環境で50以上のマイクロサービス、15のKubernetesクラスターをAWS上や世界中のオンプレミス環境で展開しています。

Kubernetesプラットフォームがデプロイ時間に非常に大きなインパクトを与えたことから、Lacerteの戦略が究極的に機能しました。カスタムメイドで不安定だった、SCPコマンドを用いたシェルスクリプトに対する依存性を弱めることで、新しいバージョンをデプロイする時間は4時間から数分にまで短縮されるようになったのです。こういったことに加え同社は、開発者たちが自らのサービスとして仕立て上げるよう、数多くの努力をしてきました。「新しいサービスを始めるのに、 Jiraのチケットや他のチームとのミーティングはもはや必要ないのです」とLacerteは言います。以前、週あたり1〜30だった同社のデプロイ数は、いまや週1,600デプロイにまでなっています。
-
+
「この新たなインフラがなければ、我々は大幅なスローダウンを強いられていたと思います。」

- AppDirect ソフトウェア開発 ディレクター Pierre-Alexandre Lacerte
diff --git a/content/ja/case-studies/chinaunicom/index.html b/content/ja/case-studies/chinaunicom/index.html index 4c288aa97ab1f..622820ada0df1 100644 --- a/content/ja/case-studies/chinaunicom/index.html +++ b/content/ja/case-studies/chinaunicom/index.html @@ -12,7 +12,7 @@ Kubernetesが私たちのクラウドインフラの経験値を上げてくれました。今のところ、これに代わる技術はありません。 --- -
+

ケーススタディ:
China Unicom社:KubernetesによるITコスト削減と効率性向上をいかにして実現したか

@@ -57,7 +57,7 @@

China Unicomは、3億人を超えるユーザーを抱える、中国国内 そこで新しい技術、研究開発(R&D)、およびプラットフォームの責務を担うZhangのチームは、IT管理におけるソリューションの探索を始めました。以前は完全な国営企業だったChina Unicomは、近年BAT(Baidu、Alibaba、Tencent)およびJD.comからの民間投資を受け、今は商用製品ではなくオープンソース技術を活用した社内開発に注力するようになりました。こういったこともあり、Zhangのチームはクラウドインフラのオープンソースオーケストレーションツールを探し始めたのです。

-
+
「これほどの短期間でここまでのスケーラビリティを達成できるとは思いもよりませんでした。」
- Chengyu Zhang、 China Unicom プラットフォーム技術R&D グループリーダー
@@ -73,7 +73,7 @@

China Unicomは、3億人を超えるユーザーを抱える、中国国内

-
+
「この技術は比較的複雑ですが、開発者が慣れれば、恩恵をすべて享受できるのではないかと思います。」

- Jie Jia、China Unicom プラットフォーム技術 R&D
diff --git a/content/ja/case-studies/nav/index.html b/content/ja/case-studies/nav/index.html index c9ad5ab65b327..c0e9afa890544 100644 --- a/content/ja/case-studies/nav/index.html +++ b/content/ja/case-studies/nav/index.html @@ -11,7 +11,7 @@ コミュニティは非常に活発です。アイデアを出し合い、皆が直面する多くの類似課題について話すことができ、そして支援を得ることができます。私たちはさまざまな理由から同じ問題に取り組み、そこでお互いに助け合うことができる、そういう点が気に入っています。 --- -
+

ケーススタディ:
スタートアップはどのようにしてKubernetesでインフラコストを50%も削減したのか

@@ -54,7 +54,7 @@

2012年に設立された Navは、小規

-
+
「コミュニティは非常に活発です。アイデアを出し合い、皆が直面する多くの類似課題について話すことができ、そして支援を得ることができます。私たちはさまざまな理由から同じ問題に取り組み、そこでお互いに助け合うことができる、そういう点が気に入っています。」

- Travis Jeppson、Nav エンジニアリングディレクター
@@ -65,7 +65,7 @@

2012年に設立された Navは、小規 Jeppsonの4人編成のエンジニアリングサービスチームは、Kubernetesを立ち上げ、稼働させるのに6ヶ月かけました(クラスターを動かすために Kubespray を使いました)。そして、その後6ヶ月かけNavの25のマイクロサービスと一つのモノリシックな主要サービスのフルマイグレーションを完了させました。「すべて書き換えたり、止めることはできませんでした」と彼は言います。「稼働し、利用可能であり続けなければいけなかったですし、ダウンタイムがあってもをそれを最小にしなければなりませんでした。そのためパイプライン作成、メトリクスやロギングといったことについてよくわかるようになりました。さらにKubernetes自身についても習熟し、起動、アップグレード、サービス提供の仕方についてもわかるようになりました。そうして移行を少しずつ進めていきました。」

-
+
「Kubernetesは、これまで経験したことのない新たな自由とたくさんの価値をNavにもたらしてくれました。」

- Travis Jeppson、Nav エンジニアリングディレクター
diff --git a/content/ja/case-studies/nordstrom/index.html b/content/ja/case-studies/nordstrom/index.html index e867f49d7c42a..b6f77e9003bff 100644 --- a/content/ja/case-studies/nordstrom/index.html +++ b/content/ja/case-studies/nordstrom/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

ケーススタディ:
厳しい小売環境下で数百万ドルのコスト削減を実現 @@ -57,7 +57,7 @@

影響

-
+
「私たちはKubernetesに人気が出ることに賭けました。コミュニティのサポートとプロジェクトの速度の初期の指標に基づいて、Kubernetesを中心にシステムを再構築しました。」
@@ -73,7 +73,7 @@

影響

参加したチームにとってメリットはすぐに現れました。「Kubernetesクラスターを使っているチームは心配する問題が少ないという事実を気に入っていました。インフラストラクチャーやオペレーティングシステムを管理する必要はありませんでした。」とGrigoriu氏は言います。「初期の導入者は、Kubernetesの宣言的な性質を好んでいます。彼らは対処しなければならなかった領域が減少したことを好んでます。」
-
+
「Kubernetesクラスターを使っているチームは心配する問題が少ないという事実を気に入っていました。インフラストラクチャーやオペレーティングシステムを管理する必要はありませんでした。」とGrigoriu氏は言います。「初期の導入者は、Kubernetesの宣言的な性質を好んでいます。彼らは対処しなければならなかった領域が減少したことを好んでます。」
diff --git a/content/ja/case-studies/sos/index.html b/content/ja/case-studies/sos/index.html index 814e9cb1194e0..d893f6a9b3226 100644 --- a/content/ja/case-studies/sos/index.html +++ b/content/ja/case-studies/sos/index.html @@ -8,7 +8,7 @@ --- -
+

ケーススタディ:
SOS International: Kubernetesを使ってコネクテッドな世界での緊急支援を提供

@@ -56,7 +56,7 @@

SOS Internationalは60年にわたり、北欧諸国の顧客に信頼性の

-
+
「私たちは新しいデジタルサービスを提供しなければなりませんが、古いものも移行する必要があります。そして、コアシステムをこのプラットフォーム上に構築された新しいシステムに変換する必要があります。この技術を選んだ理由の1つは古いデジタルサービスを変更しながら新しいサービスを構築できるからです。」 @@ -73,7 +73,7 @@

SOS Internationalは60年にわたり、北欧諸国の顧客に信頼性の プラットフォームは2018年春に公開されました。マイクロサービスアーキテクチャに基づく6つの未開発のプロジェクトが最初に開始されました。さらに、同社のJavaアプリケーションはすべて「リフト&シフト」移行を行っています。最初に稼働しているKubernetesベースのプロジェクトの一つがRemote Medical Treatmentです。これは顧客が音声、チャット、ビデオを介してSOSアラームセンターに連絡できるソリューションです。「完全なCI/CDパイプラインと最新のマイクロサービスアーキテクチャをすべて2つのOpenShiftクラスターセットアップで実行することに焦点を当てて、非常に短時間で開発できました。」とAhrentsen氏は言います。北欧諸国へのレスキュートラックの派遣に使用されるOnsite、および、レッカー車の追跡を可能にするFollow Your Truckも展開されています。

-
+
「ITプロフェッショナルが新しい技術を提供したという理由で我が社を選んでいたことが新人研修の時にわかりました。」

- SOS International エンタープライズアーキテクチャ責任者 Martin Ahrentsen
diff --git a/content/ja/case-studies/spotify/index.html b/content/ja/case-studies/spotify/index.html index 0725723b68351..49d929995daae 100644 --- a/content/ja/case-studies/spotify/index.html +++ b/content/ja/case-studies/spotify/index.html @@ -11,7 +11,7 @@ Kubernetesを中心に成長した素晴らしいコミュニティを見て、その一部になりたかったのです。スピードの向上とコスト削減のメリットを享受し、ベストプラクティスとツールについて業界の他の企業と連携したいとも思いました。 --- -
+

ケーススタディ:Spotify
Spotify:コンテナ技術のアーリーアダプターであるSpotifyは自社製オーケストレーションツールからKubernetesに移行しています

@@ -55,7 +55,7 @@

「私たちのゴールは、クリエイターたちに力を与え、今

-
+
「このコミュニティは、あらゆる技術への取り組みをより速く、より容易にしてくれることを強力に助けてくれました。そして、私たちの取り組みのすべてを検証することも助けてくれました。」

- Spotify ソフトウェアエンジニア、インフラおよびオペレーション担当、Dave Zolotusky
@@ -81,7 +81,7 @@

「私たちのゴールは、クリエイターたちに力を与え、今

-
+
「レガシーのインフラをサポートしたり連携するKubernetes APIやKubernetesの拡張性機能をたくさん使うことができたので、インテグレーションはシンプルで簡単なものでした」

- Spotify、Spotifyエンジニア、James Wen
diff --git a/content/ko/case-studies/newyorktimes/index.html b/content/ko/case-studies/newyorktimes/index.html index c65b5fe88355f..53dbd06a55085 100644 --- a/content/ko/case-studies/newyorktimes/index.html +++ b/content/ko/case-studies/newyorktimes/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
The New York Times: From Print to the Web to Cloud Native

@@ -64,7 +64,7 @@

Impact

-
+
"We had some internal tooling that attempted to do what Kubernetes does for containers, but for VMs. We asked why are we building and maintaining these tools ourselves?"
@@ -79,7 +79,7 @@

Impact

-
+
"Right now, every team is running a small Kubernetes cluster, but it would be nice if we could all live in a larger ecosystem," says Kapadia. "Then we can harness the power of things like service mesh proxies that can actually do a lot of instrumentation between microservices, or service-to-service orchestration. Those are the new things that we want to experiment with as we go forward." diff --git a/content/ko/docs/concepts/workloads/controllers/deployment.md b/content/ko/docs/concepts/workloads/controllers/deployment.md index 0e5c5e94fbf36..745a33b52d031 100644 --- a/content/ko/docs/concepts/workloads/controllers/deployment.md +++ b/content/ko/docs/concepts/workloads/controllers/deployment.md @@ -100,7 +100,7 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml 다음과 유사하게 출력된다. ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` 4. 몇 초 후 `kubectl get deployments` 를 다시 실행한다. @@ -203,7 +203,7 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml ``` 또는 ``` - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` 업데이트된 디플로이먼트에 대해 자세한 정보 보기 @@ -855,7 +855,7 @@ kubectl rollout status deployment.v1.apps/nginx-deployment 이와 유사하게 출력된다. ``` Waiting for rollout to finish: 2 of 3 updated replicas are available... -deployment.apps/nginx-deployment successfully rolled out +deployment "nginx-deployment" successfully rolled out ``` 그리고 `kubectl rollout` 의 종료 상태는 0(success)이다. ```shell diff --git a/content/zh/case-studies/adform/index.html b/content/zh/case-studies/adform/index.html index e9a8acc7a22f2..be35a2d8375ee 100644 --- a/content/zh/case-studies/adform/index.html +++ b/content/zh/case-studies/adform/index.html @@ -12,7 +12,7 @@ Kubernetes enabled the self-healing and immutable infrastructure. We can do faster releases, so our developers are really happy. They can ship our features faster than before, and that makes our clients happier. --- -
+

CASE STUDY:
Improving Performance and Morale with Cloud Native

@@ -66,7 +66,7 @@

Adform made +
"The fact that Cloud Native Computing Foundation incubated Kubernetes was a really big point for us because it was vendor neutral. And we can see that a community really gathers around it. Everyone shares their experiences, their knowledge, and the fact that it’s open source, you can contribute."

— Edgaras Apšega, IT Systems Engineer, Adform
@@ -83,7 +83,7 @@

Adform made +
"Releases are really nice for them, because they just push their code to Git and that’s it. They don’t have to worry about their virtual machines anymore."

— Andrius Cibulskis, IT Systems Engineer, Adform
diff --git a/content/zh/case-studies/ant-financial/index.html b/content/zh/case-studies/ant-financial/index.html index 92b46526dee48..1711ef97b8157 100644 --- a/content/zh/case-studies/ant-financial/index.html +++ b/content/zh/case-studies/ant-financial/index.html @@ -7,7 +7,7 @@ featured: false --- -
+ -
+
"On Double 11 this year, we had plenty of nodes on Kubernetes, but compared to the whole scale of our infrastructure, this is still in progress."

- RANGER YU, GLOBAL TECHNOLOGY PARTNERSHIP & DEVELOPMENT, ANT FINANCIAL
@@ -65,7 +65,7 @@

A spinoff of the multinational conglomerate Alibaba, Ant Financial boasts a All core financial systems were containerized by November 2017, and the migration to Kubernetes is ongoing. Ant’s platform also leverages a number of other CNCF projects, including Prometheus, OpenTracing, etcd and CoreDNS. “On Double 11 this year, we had plenty of nodes on Kubernetes, but compared to the whole scale of our infrastructure, this is still in progress,” says Ranger Yu, Global Technology Partnership & Development.

-
+
"We’re very grateful for CNCF and this amazing technology, which we need as we continue to scale globally. We’re definitely embracing the community and open source more in the future."

- HAOJIE HANG, PRODUCT MANAGEMENT, ANT FINANCIAL
diff --git a/content/zh/case-studies/appdirect/index.html b/content/zh/case-studies/appdirect/index.html index 16d93cce5cb4e..ca6b0b8fe92a4 100644 --- a/content/zh/case-studies/appdirect/index.html +++ b/content/zh/case-studies/appdirect/index.html @@ -12,7 +12,7 @@ We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. --- -
+

CASE STUDY:
AppDirect: How AppDirect Supported the 10x Growth of Its Engineering Staff with Kubernetess

@@ -53,7 +53,7 @@

With its end-to-end commerce platform for cloud-based products and services,

-
+
"We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. We know where to focus our efforts in order to tackle the new wave of challenges we face as we scale out. The community is so active and vibrant, which is a great complement to our awesome internal team."

- Alexandre Gervais, Staff Software Developer, AppDirect
@@ -69,7 +69,7 @@

With its end-to-end commerce platform for cloud-based products and services, Lacerte’s strategy ultimately worked because of the very real impact the Kubernetes platform has had to deployment time. Due to less dependency on custom-made, brittle shell scripts with SCP commands, time to deploy a new version has shrunk from 4 hours to a few minutes. Additionally, the company invested a lot of effort to make things self-service for developers. "Onboarding a new service doesn’t require Jira tickets or meeting with three different teams," says Lacerte. Today, the company sees 1,600 deployments per week, compared to 1-30 before.

-
+
"I think our velocity would have slowed down a lot if we didn’t have this new infrastructure."

- Pierre-Alexandre Lacerte, Director of Software Development, AppDirect
diff --git a/content/zh/case-studies/bose/index.html b/content/zh/case-studies/bose/index.html index d22de2187af9c..c77f416c13715 100644 --- a/content/zh/case-studies/bose/index.html +++ b/content/zh/case-studies/bose/index.html @@ -11,7 +11,7 @@ The CNCF Landscape quickly explains what’s going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles. --- -
+

CASE STUDY:
Bose: Supporting Rapid Development for Millions of IoT Products With Kubernetes

@@ -56,7 +56,7 @@

A household name in high-quality audio equipment, +
"Everybody on the team thinks in terms of automation, leaning out the processes, getting things done as quickly as possible. When you step back and look at what it means for a 50-plus-year-old speaker company to have that sort of culture, it really is quite incredible, and I think the tools that we use and the foundation that we’ve built with them is a huge piece of that."

- Dylan O’Mahony, Cloud Architecture Manager, Bose
@@ -70,7 +70,7 @@

A household name in high-quality audio equipment, +
"The CNCF Landscape quickly explains what’s going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles."

- Josh West, Lead Cloud Engineer, Bose
diff --git a/content/zh/case-studies/capital-one/index.html b/content/zh/case-studies/capital-one/index.html index 773db4869e4e3..f95fb2acc703b 100644 --- a/content/zh/case-studies/capital-one/index.html +++ b/content/zh/case-studies/capital-one/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Supporting Fast Decisioning Applications with Kubernetes

@@ -55,7 +55,7 @@

-
+
"We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled." @@ -69,7 +69,7 @@

-
+
With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier."
diff --git a/content/zh/case-studies/ccp-games/ccp_logo.png b/content/zh/case-studies/ccp-games/ccp_logo.png deleted file mode 100644 index cbf3d267ba8dd..0000000000000 Binary files a/content/zh/case-studies/ccp-games/ccp_logo.png and /dev/null differ diff --git a/content/zh/case-studies/ccp-games/index.html b/content/zh/case-studies/ccp-games/index.html deleted file mode 100644 index 8867cbb323442..0000000000000 --- a/content/zh/case-studies/ccp-games/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: CCP Games -content_url: https://cloud.google.com/customers/ccp-games/ ---- \ No newline at end of file diff --git a/content/zh/case-studies/cern/index.html b/content/zh/case-studies/cern/index.html index 9bd797024595e..48e965d7fb145 100644 --- a/content/zh/case-studies/cern/index.html +++ b/content/zh/case-studies/cern/index.html @@ -7,7 +7,7 @@ logo: cern_featured_logo.png --- -
+

CASE STUDY: CERN
CERN: Processing Petabytes of Data More Efficiently with Kubernetes

@@ -52,7 +52,7 @@

With a mission of researching fundamental science, and a stable of extremely

-
+
"Before, the tendency was always: ‘I need this, I get a couple of developers, and I implement it.’ Right now it’s ‘I need this, I’m sure other people also need this, so I’ll go and ask around.’ The CNCF is a good source because there’s a very large catalog of applications available. It’s very hard right now to justify developing a new product in-house. There is really no real reason to keep doing that. It’s much easier for us to try it out, and if we see it’s a good solution, we try to reach out to the community and start working with that community."

- Ricardo Rocha, Software Engineer, CERN
@@ -66,7 +66,7 @@

With a mission of researching fundamental science, and a stable of extremely

-
+
"With Kubernetes, there’s a well-established technology and a big community that we can contribute to. It allows us to do our physics analysis without having to focus so much on the lower level software. This is just exciting. We are looking forward to keep contributing to the community and collaborating with everyone."

- Ricardo Rocha, Software Engineer, CERN
diff --git a/content/zh/case-studies/chinaunicom/index.html b/content/zh/case-studies/chinaunicom/index.html index 675273c47fa5d..94a1822a25dfe 100644 --- a/content/zh/case-studies/chinaunicom/index.html +++ b/content/zh/case-studies/chinaunicom/index.html @@ -12,7 +12,7 @@ Kubernetes has improved our experience using cloud infrastructure. There is currently no alternative technology that can replace it. --- -
+

CASE STUDY:
China Unicom: How China Unicom Leveraged Kubernetes to Boost Efficiency
and Lower IT Costs

@@ -55,7 +55,7 @@

With more than 300 million users, China Unicom is one of the country’s top

-
+
"We could never imagine we can achieve this scalability in such a short time."

- Chengyu Zhang, Group Leader of Platform Technology R&D, China Unicom
@@ -69,7 +69,7 @@

With more than 300 million users, China Unicom is one of the country’s top

-
+
"This technology is relatively complicated, but as long as developers get used to it, they can enjoy all the benefits."

- Jie Jia, Member of Platform Technology R&D, China Unicom
diff --git a/content/zh/case-studies/city-of-montreal/index.html b/content/zh/case-studies/city-of-montreal/index.html index 151ce44b21691..55378c649ed01 100644 --- a/content/zh/case-studies/city-of-montreal/index.html +++ b/content/zh/case-studies/city-of-montreal/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
City of Montréal - How the City of Montréal Is Modernizing Its 30-Year-Old, Siloed Architecture with Kubernetes

@@ -50,7 +50,7 @@

The second biggest municipality in Canada, Montréal has a large number of l The first step to modernize the architecture was containerization. “We based our effort on the new trends; we understood the benefits of immutability and deployments without downtime and such things,” says Solutions Architect Marc Khouzam. The team started with a small Docker farm with four or five servers, with Rancher for providing access to the Docker containers and their logs and Jenkins for deployment.

-
+
"Getting a project running in Kubernetes is entirely dependent on how long you need to program the actual software. It’s no longer dependent on deployment. Deployment is so fast that it’s negligible."

- MARC KHOUZAM, SOLUTIONS ARCHITECT, CITY OF MONTRÉAL
@@ -65,7 +65,7 @@

The second biggest municipality in Canada, Montréal has a large number of l Another important factor in the decision was vendor neutrality. “As a government entity, it is essential for us to be neutral in our selection of products and providers,” says Thibault. “The independence of the Cloud Native Computing Foundation from any company provides this.”

-
+
"Kubernetes has been great. It’s been stable, and it provides us with elasticity, resilience, and robustness. While re-architecting for Kubernetes, we also benefited from the monitoring and logging aspects, with centralized logging, Prometheus logging, and Grafana dashboards. We have enhanced visibility of what’s being deployed."

- MORGAN MARTINET, ENTERPRISE ARCHITECT, CITY OF MONTRÉAL
diff --git a/content/zh/case-studies/comcast/comcast_logo.png b/content/zh/case-studies/comcast/comcast_logo.png deleted file mode 100644 index 3f0ef76645680..0000000000000 Binary files a/content/zh/case-studies/comcast/comcast_logo.png and /dev/null differ diff --git a/content/zh/case-studies/comcast/index.html b/content/zh/case-studies/comcast/index.html deleted file mode 100644 index 054df1469667c..0000000000000 --- a/content/zh/case-studies/comcast/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Comcast -content_url: https://youtu.be/lmeFkH-rHII ---- diff --git a/content/zh/case-studies/concur/concur_featured_logo.png b/content/zh/case-studies/concur/concur_featured_logo.png deleted file mode 100644 index 473427a3baee3..0000000000000 Binary files a/content/zh/case-studies/concur/concur_featured_logo.png and /dev/null differ diff --git a/content/zh/case-studies/concur/index.html b/content/zh/case-studies/concur/index.html deleted file mode 100644 index 977399a5fcc6f..0000000000000 --- a/content/zh/case-studies/concur/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Concur -content_url: http://searchitoperations.techtarget.com/news/450297178/Tech-firms-roll-out-Kubernetes-in-production ---- diff --git a/content/zh/case-studies/ebay/ebay_featured.png b/content/zh/case-studies/ebay/ebay_featured.png deleted file mode 100644 index 4ad17a4af5036..0000000000000 Binary files a/content/zh/case-studies/ebay/ebay_featured.png and /dev/null differ diff --git a/content/zh/case-studies/ebay/ebay_logo.png b/content/zh/case-studies/ebay/ebay_logo.png deleted file mode 100644 index 830913c52b13f..0000000000000 Binary files a/content/zh/case-studies/ebay/ebay_logo.png and /dev/null differ diff --git a/content/zh/case-studies/ebay/index.html b/content/zh/case-studies/ebay/index.html deleted file mode 100644 index 2a0d9cc18ec6e..0000000000000 --- a/content/zh/case-studies/ebay/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Ebay -content_url: http://www.nextplatform.com/2015/11/12/inside-ebays-shift-to-kubernetes-and-containers-atop-openstack/ ---- diff --git a/content/zh/case-studies/goldman-sachs/gs_logo.png b/content/zh/case-studies/goldman-sachs/gs_logo.png deleted file mode 100644 index 5cc8c14566ae5..0000000000000 Binary files a/content/zh/case-studies/goldman-sachs/gs_logo.png and /dev/null differ diff --git a/content/zh/case-studies/goldman-sachs/index.html b/content/zh/case-studies/goldman-sachs/index.html deleted file mode 100644 index 93d3022d12440..0000000000000 --- a/content/zh/case-studies/goldman-sachs/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Goldman Sachs -content_url: http://blogs.wsj.com/cio/2016/02/24/big-changes-in-goldmans-software-emerge-from-small-containers/ ---- \ No newline at end of file diff --git a/content/zh/case-studies/homeoffice/homeoffice_logo.png b/content/zh/case-studies/homeoffice/homeoffice_logo.png deleted file mode 100644 index 35d9722611159..0000000000000 Binary files a/content/zh/case-studies/homeoffice/homeoffice_logo.png and /dev/null differ diff --git a/content/zh/case-studies/homeoffice/index.html b/content/zh/case-studies/homeoffice/index.html deleted file mode 100644 index 589c7507d4839..0000000000000 --- a/content/zh/case-studies/homeoffice/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Home Office UK -content_url: https://www.youtube.com/watch?v=F3iMkz_NSvU ---- diff --git a/content/zh/case-studies/jd/index.html b/content/zh/case-studies/jd/index.html deleted file mode 100644 index 8fbf434a567c2..0000000000000 --- a/content/zh/case-studies/jd/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: JD.COM -content_url: https://kubernetes.io/blog/2017/02/inside-jd-com-shift-to-kubernetes-from-openstack ---- diff --git a/content/zh/case-studies/jd/jd_logo.png b/content/zh/case-studies/jd/jd_logo.png deleted file mode 100644 index 58ef32a3221e4..0000000000000 Binary files a/content/zh/case-studies/jd/jd_logo.png and /dev/null differ diff --git a/content/zh/case-studies/liveperson/index.html b/content/zh/case-studies/liveperson/index.html deleted file mode 100644 index 0cadb0f274073..0000000000000 --- a/content/zh/case-studies/liveperson/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: LivePerson -content_url: https://www.openstack.org/videos/video/running-kubernetes-on-openstack-at-liveperson ---- \ No newline at end of file diff --git a/content/zh/case-studies/liveperson/liveperson_logo.png b/content/zh/case-studies/liveperson/liveperson_logo.png deleted file mode 100644 index b7e63d94f72d6..0000000000000 Binary files a/content/zh/case-studies/liveperson/liveperson_logo.png and /dev/null differ diff --git a/content/zh/case-studies/monzo/index.html b/content/zh/case-studies/monzo/index.html deleted file mode 100644 index 99d4f35934145..0000000000000 --- a/content/zh/case-studies/monzo/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Monzo -content_url: https://youtu.be/YkOY7DgXKyw ---- \ No newline at end of file diff --git a/content/zh/case-studies/monzo/monzo_logo.png b/content/zh/case-studies/monzo/monzo_logo.png deleted file mode 100644 index 854409d17eabb..0000000000000 Binary files a/content/zh/case-studies/monzo/monzo_logo.png and /dev/null differ diff --git a/content/zh/case-studies/netease/index.html b/content/zh/case-studies/netease/index.html index 0ce8d133e2353..f5c70dedffc70 100644 --- a/content/zh/case-studies/netease/index.html +++ b/content/zh/case-studies/netease/index.html @@ -6,11 +6,11 @@ --- - -
+

案例研究:
网易如何利用 Kubernetes 支持在全球的互联网业务

@@ -55,7 +55,7 @@

其游戏业务是世界Kubernetes 的基础上。这项技术来自谷歌,这一事实让团队有信心,它能够跟上网易的规模。“经过2到3个月的评估,我们相信它能满足我们的需求,”冯长健说。

-
+
“我们利用 Kubernetes 的可编程性,构建一个平台,以满足内部客户对升级和部署的需求。”

- 冯长健,网易云和容器托管平台架构师
@@ -71,7 +71,7 @@

其游戏业务是世界 +
“只要公司拥有成熟的团队和足够的开发人员,我认为 Kubernetes 是一个很好的有所助力的技术。”

- 李兰青, 网易 Kubernetes 开发人员
diff --git a/content/zh/case-studies/nordstrom/index.html b/content/zh/case-studies/nordstrom/index.html index 40cfc7642d77f..11edf1c652c74 100644 --- a/content/zh/case-studies/nordstrom/index.html +++ b/content/zh/case-studies/nordstrom/index.html @@ -5,14 +5,14 @@ css: /css/style_case_studies.css --- - -
+

案例研究:
在艰难的零售环境下寻找数百万的潜在成本节约 @@ -87,7 +87,7 @@

影响

但是,新环境仍然需要很长时间才能出现,因此下一步是在云中工作。如今,Nordstrom Technology 已经构建了一个企业平台,允许公司的1500 名开发人员在云中部署以 Docker 容器身份运行的应用程序,这些应用程序由 Kubernetes 进行编排。
-
+
“了解到早期的社区支持和项目迭代指标,我们肯定 Kubernetes 一定会成功的,因此我们以 Kubernetes 为核心重建了我们的系统。” @@ -108,7 +108,7 @@

影响

对于加入的团队来说,这些好处是立竿见影的。Grigoriu 说:“在我们的 Kubernetes 集群中运行的团队喜欢这样一个事实,即他们担心的问题更少,他们不需要管理基础设施或操作系统。早期使用者喜欢 Kubernetes 的声明特性,让他们不得不处理的面积减少。
-
+
Grigoriu 说:“在我们的 Kubernetes 集群中运行的团队喜欢这样一个事实,即他们担心的问题更少,他们不需要管理基础设施或操作系统。早期使用者喜欢 Kubernetes 的声明特性,让他们不得不处理的面积减少。” diff --git a/content/zh/case-studies/philips/index.html b/content/zh/case-studies/philips/index.html deleted file mode 100644 index e45d41a776baa..0000000000000 --- a/content/zh/case-studies/philips/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Philips -content_url: https://cloud.google.com/customers/philips/ ---- \ No newline at end of file diff --git a/content/zh/case-studies/philips/philips_logo.png b/content/zh/case-studies/philips/philips_logo.png deleted file mode 100644 index 9ba3421a61b81..0000000000000 Binary files a/content/zh/case-studies/philips/philips_logo.png and /dev/null differ diff --git a/content/zh/case-studies/pokemon-go/index.html b/content/zh/case-studies/pokemon-go/index.html deleted file mode 100644 index ed4e168019236..0000000000000 --- a/content/zh/case-studies/pokemon-go/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Pokemon GO -content_url: https://cloudplatform.googleblog.com/2016/09/bringing-Pokemon-GO-to-life-on-Google-Cloud.html ---- \ No newline at end of file diff --git a/content/zh/case-studies/pokemon-go/pokemon_go_logo.png b/content/zh/case-studies/pokemon-go/pokemon_go_logo.png deleted file mode 100644 index 3cf2b5c7ef63d..0000000000000 Binary files a/content/zh/case-studies/pokemon-go/pokemon_go_logo.png and /dev/null differ diff --git a/content/zh/case-studies/samsung-sds/index.html b/content/zh/case-studies/samsung-sds/index.html deleted file mode 100644 index db4aa479abc1e..0000000000000 --- a/content/zh/case-studies/samsung-sds/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Samsung SDS -content_url: http://www.nextplatform.com/2016/05/24/samsung-experts-put-kubernetes-paces/ ---- \ No newline at end of file diff --git a/content/zh/case-studies/samsung-sds/sds_logo.png b/content/zh/case-studies/samsung-sds/sds_logo.png deleted file mode 100644 index 0a172df65d7e1..0000000000000 Binary files a/content/zh/case-studies/samsung-sds/sds_logo.png and /dev/null differ diff --git a/content/zh/case-studies/sap/index.html b/content/zh/case-studies/sap/index.html deleted file mode 100644 index d94c034023dc8..0000000000000 --- a/content/zh/case-studies/sap/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: SAP -content_url: https://youtu.be/4gyeixJLabo ---- diff --git a/content/zh/case-studies/sap/sap_logo.png b/content/zh/case-studies/sap/sap_logo.png deleted file mode 100644 index 681a3fe5cff8c..0000000000000 Binary files a/content/zh/case-studies/sap/sap_logo.png and /dev/null differ diff --git a/content/zh/case-studies/sap/sap_small.png b/content/zh/case-studies/sap/sap_small.png deleted file mode 100644 index ada89de7595e5..0000000000000 Binary files a/content/zh/case-studies/sap/sap_small.png and /dev/null differ diff --git a/content/zh/case-studies/soundcloud/index.html b/content/zh/case-studies/soundcloud/index.html deleted file mode 100644 index c3c99ba715c4c..0000000000000 --- a/content/zh/case-studies/soundcloud/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: SoundCloud -content_url: https://www.youtube.com/watch?v=5378N5iLb2Q ---- diff --git a/content/zh/case-studies/soundcloud/soundcloud_logo.png b/content/zh/case-studies/soundcloud/soundcloud_logo.png deleted file mode 100644 index f8c12f05b5edf..0000000000000 Binary files a/content/zh/case-studies/soundcloud/soundcloud_logo.png and /dev/null differ diff --git a/content/zh/case-studies/squarespace/index.html b/content/zh/case-studies/squarespace/index.html index b78180f52c295..c2ac30a7bad6a 100644 --- a/content/zh/case-studies/squarespace/index.html +++ b/content/zh/case-studies/squarespace/index.html @@ -14,13 +14,13 @@ --- --> - -
+

案例分析:
Squarespace: 借力 Kubernetes 提升效率和可靠性

@@ -113,7 +113,7 @@

从 2003 年宿舍起步, Squarespace 已经为数百万人提供了网站

-
+
diff --git a/content/zh/case-studies/wepay/index.html b/content/zh/case-studies/wepay/index.html deleted file mode 100644 index b8ce8201d5f5b..0000000000000 --- a/content/zh/case-studies/wepay/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: WePay -content_url: http://thenewstack.io/wepay-kubernetes-changed-business/ ---- \ No newline at end of file diff --git a/content/zh/case-studies/wepay/wepay_logo.png b/content/zh/case-studies/wepay/wepay_logo.png deleted file mode 100644 index 4e35dd8fd694a..0000000000000 Binary files a/content/zh/case-studies/wepay/wepay_logo.png and /dev/null differ diff --git a/content/zh/case-studies/zulily/index.html b/content/zh/case-studies/zulily/index.html deleted file mode 100644 index d5caf422aacd7..0000000000000 --- a/content/zh/case-studies/zulily/index.html +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Zulily -content_url: https://www.youtube.com/embed/of45hYbkIZs ---- diff --git a/content/zh/case-studies/zulily/zulily_featured.png b/content/zh/case-studies/zulily/zulily_featured.png deleted file mode 100644 index 81179f36d293a..0000000000000 Binary files a/content/zh/case-studies/zulily/zulily_featured.png and /dev/null differ diff --git a/content/zh/case-studies/zulily/zulily_logo.png b/content/zh/case-studies/zulily/zulily_logo.png deleted file mode 100644 index e144c7897b3bf..0000000000000 Binary files a/content/zh/case-studies/zulily/zulily_logo.png and /dev/null differ diff --git a/content/zh/docs/concepts/overview/working-with-objects/names.md b/content/zh/docs/concepts/overview/working-with-objects/names.md index 303a7e541d2c9..678adea96df82 100644 --- a/content/zh/docs/concepts/overview/working-with-objects/names.md +++ b/content/zh/docs/concepts/overview/working-with-objects/names.md @@ -13,7 +13,7 @@ Every Kubernetes object also has a [_UID_](#uids) that is unique across your who For example, you can only have one Pod named `myapp-1234` within the same [namespace](/docs/concepts/overview/working-with-objects/namespaces/), but you can have one Pod and one Deployment that are each named `myapp-1234`. --> -集群中的每一个对象都一个[_名称_](#names) 来标识在同类资源中的唯一性。 +集群中的每一个对象都有一个[_名称_](#names) 来标识在同类资源中的唯一性。 每个 Kubernetes 对象也有一个[_UID_](#uids) 来标识在整个集群中的唯一性。 diff --git a/content/zh/docs/concepts/overview/working-with-objects/object-management.md b/content/zh/docs/concepts/overview/working-with-objects/object-management.md index fc48eb7489830..e7bcf9af09a1b 100644 --- a/content/zh/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/zh/docs/concepts/overview/working-with-objects/object-management.md @@ -324,10 +324,10 @@ Disadvantages compared to imperative object configuration: - [Kubectl Book](https://kubectl.docs.kubernetes.io) - [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) --> -- [使用命令式命令管理 Kubernetes 对象](/docs/tasks/manage-kubernetes-objects/imperative-command/) +- [使用命令式命令管理 Kubernetes 对象](/zh/docs/tasks/manage-kubernetes-objects/imperative-command/) - [使用对象配置管理 Kubernetes 对象(命令式)](/zh/docs/tasks/manage-kubernetes-objects/imperative-config/) -- [使用对象配置管理 Kubernetes 对象(声明式)](/docs/tasks/manage-kubernetes-objects/declarative-config/) -- [使用 Kustomize(声明式)管理 Kubernetes 对象](/docs/tasks/manage-kubernetes-objects/kustomization/) +- [使用对象配置管理 Kubernetes 对象(声明式)](/zh/docs/tasks/manage-kubernetes-objects/declarative-config/) +- [使用 Kustomize(声明式)管理 Kubernetes 对象](/zh/docs/tasks/manage-kubernetes-objects/kustomization/) - [Kubectl 命令参考](/docs/reference/generated/kubectl/kubectl-commands/) - [Kubectl Book](https://kubectl.docs.kubernetes.io) - [Kubernetes API 参考](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) diff --git a/content/zh/docs/concepts/services-networking/ingress.md b/content/zh/docs/concepts/services-networking/ingress.md index 254f04cba2be4..6a5cb35739536 100644 --- a/content/zh/docs/concepts/services-networking/ingress.md +++ b/content/zh/docs/concepts/services-networking/ingress.md @@ -10,20 +10,18 @@ weight: 40 --> -{{< feature-state for_k8s_version="v1.1" state="beta" >}} +{{< feature-state for_k8s_version="v1.19" state="stable" >}} {{< glossary_definition term_id="ingress" length="all" >}} - -## 术语 - +## 术语 + 为了表达更加清晰,本指南定义了以下术语: -## Ingress 是什么? - +## Ingress 是什么? [Ingress](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io) 公开了从集群外部到集群内[服务](/zh/docs/concepts/services-networking/service/)的 HTTP 和 HTTPS 路由。 流量路由由 Ingress 资源上定义的规则控制。 -```none - internet - | - [ Ingress ] - --|-----|-- - [ Services ] -``` + +下面是一个将所有流量都发送到同一 Service 的简单 Ingress 示例: + +{{< mermaid >}} +graph LR; + client([客户端])-. Ingress-管理的
负载均衡器 .->ingress[Ingress]; + ingress-->|路由规则|service[Service]; + subgraph cluster + ingress; + service-->pod1[Pod]; + service-->pod2[Pod]; + end + classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000; + classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; + classDef cluster fill:#fff,stroke:#bbb,stroke-width:2px,color:#326ce5; + class ingress,service,pod1,pod2 k8s; + class client plain; + class cluster cluster; +{{}} {{< note >}} -确保您查看了 Ingress 控制器的文档,以了解选择它的注意事项。 +确保你查看了 Ingress 控制器的文档,以了解选择它的注意事项。 {{< /note >}} -## Ingress 资源 +## Ingress 资源 {#the-ingress-resource} 一个最小的 Ingress 资源示例: -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: test-ingress - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - rules: - - http: - paths: - - path: /testpath - pathType: Prefix - backend: - serviceName: test - servicePort: 80 -``` + +{{< codenew file="service/networking/minimal-ingress.yaml" >}} -### Ingress 规则 {#ingress-rules} - +### Ingress 规则 {#ingress-rules} + 每个 HTTP 规则都包含以下信息: -* 可选主机。在此示例中,未指定主机,因此该规则适用于通过指定 IP 地址的所有入站 HTTP 通信。 - 如果提供了主机(例如 foo.bar.com),则规则适用于该主机。 -* 路径列表(例如,`/testpath`),每个路径都有一个由 `serviceName` 和 `servicePort` 定义的关联后端。 +* 可选的 `host`。在此示例中,未指定 `host`,因此该规则适用于通过指定 IP 地址的所有入站 HTTP 通信。 + 如果提供了 `host`(例如 foo.bar.com),则 `rules` 适用于该 `host`。 +* 路径列表 paths(例如,`/testpath`),每个路径都有一个由 `serviceName` 和 `servicePort` 定义的关联后端。 在负载均衡器将流量定向到引用的服务之前,主机和路径都必须匹配传入请求的内容。 -* 后端是 [Service 文档](/zh/docs/concepts/services-networking/service/)中所述的服务和端口名称的组合。 - 与规则的主机和路径匹配的对 Ingress 的 HTTP(和 HTTPS )请求将发送到列出的后端。 +* `backend`(后端)是 [Service 文档](/zh/docs/concepts/services-networking/service/)中所述的服务和端口名称的组合。 + 与规则的 `host` 和 `path` 匹配的对 Ingress 的 HTTP(和 HTTPS )请求将发送到列出的 `backend`。 -通常在 Ingress 控制器中会配置默认后端,以服务任何不符合规范中路径的请求。 +通常在 Ingress 控制器中会配置 `defaultBackend`(默认后端),以服务于任何不符合规约中 `path` 的请求。 -### 默认后端 +### DefaultBackend {#default-backend} -没有规则的 Ingress 将所有流量发送到同一个默认后端。 -默认后端通常是 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers) -的配置选项,并且未在 Ingress 资源中指定。 +没有 `rules` 的 Ingress 将所有流量发送到同一个默认后端。 +`defaultBackend` 通常是 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers) +的配置选项,而非在 Ingress 资源中指定。 -如果主机或路径都没有与 Ingress 对象中的 HTTP 请求匹配,则流量将路由到默认后端。 +如果 `hosts` 或 `paths` 都没有与 Ingress 对象中的 HTTP 请求匹配,则流量将路由到默认后端。 + + +### 资源后端 {#resource-backend} + +`Resource` 后端是一个 `ObjectRef`,指向同一名字空间中的另一个 +Kubernetes,将其作为 Ingress 对象。`Resource` 与 `Service` 配置是互斥的,在 +二者均被设置时会无法通过合法性检查。 +`Resource` 后端的一种常见用法是将所有入站数据导向带有静态资产的对象存储后端。 + +{{< codenew file="service/networking/ingress-resource-backend.yaml" >}} + + +创建了如上的 Ingress 之后,你可以使用下面的命令查看它: + +```bash +kubectl describe ingress ingress-resource-backend +``` + +``` +Name: ingress-resource-backend +Namespace: default +Address: +Default backend: APIGroup: k8s.example.com, Kind: StorageBucket, Name: static-assets +Rules: + Host Path Backends + ---- ---- -------- + * + /icons APIGroup: k8s.example.com, Kind: StorageBucket, Name: icon-assets +Annotations: +Events: +``` ### 路径类型 {#path-types} -Ingress 中的每个路径都有对应的路径类型。当前支持的路径类型有三种: +Ingress 中的每个路径都需要有对应的路径类型(Path Type)。未明确设置 `pathType` +的路径无法通过合法性检查。当前支持的路径类型有三种: -* _`ImplementationSpecific`_ (默认):对于这种类型,匹配取决于 IngressClass。 +* `ImplementationSpecific`:对于这种路径类型,匹配方法取决于 IngressClass。 具体实现可以将其作为单独的 `pathType` 处理或者与 `Prefix` 或 `Exact` 类型作相同处理。 -* _`Exact`_:精确匹配 URL 路径,且对大小写敏感。 +* `Exact`:精确匹配 URL 路径,且区分大小写。 -* _`Prefix`_:基于以 `/` 分隔的 URL 路径前缀匹配。匹配对大小写敏感,并且对路径中的元素逐个完成。 +* `Prefix`:基于以 `/` 分隔的 URL 路径前缀匹配。匹配区分大小写,并且对路径中的元素逐个完成。 路径元素指的是由 `/` 分隔符分隔的路径中的标签列表。 如果每个 _p_ 都是请求路径 _p_ 的元素前缀,则请求与路径 _p_ 匹配。 @@ -264,8 +302,56 @@ Ingress 中的每个路径都有对应的路径类型。当前支持的路径类 (例如:`/foo/bar` 匹配 `/foo/bar/baz`, 但不匹配 `/foo/barbaz`)。 {{< /note >}} + +### 示例 + +| 类型 | 路径 | 请求路径 | 匹配与否? | +|--------|---------------------------------|-----------------|--------------------------| +| Prefix | `/` | (所有路径) | 是 | +| Exact | `/foo` | `/foo` | 是 | +| Exact | `/foo` | `/bar` | 否 | +| Exact | `/foo` | `/foo/` | 否 | +| Exact | `/foo/` | `/foo` | 否 | +| Prefix | `/foo` | `/foo`, `/foo/` | 是 | +| Prefix | `/foo/` | `/foo`, `/foo/` | 是 | +| Prefix | `/aaa/bb` | `/aaa/bbb` | 否 | +| Prefix | `/aaa/bbb` | `/aaa/bbb` | 是 | +| Prefix | `/aaa/bbb/` | `/aaa/bbb` | 是,忽略尾部斜线 | +| Prefix | `/aaa/bbb` | `/aaa/bbb/` | 是,匹配尾部斜线 | +| Prefix | `/aaa/bbb` | `/aaa/bbb/ccc` | 是,匹配子路径 | +| Prefix | `/aaa/bbb` | `/aaa/bbbxyz` | 否,字符串前缀不匹配 | +| Prefix | `/`, `/aaa` | `/aaa/ccc` | 是,匹配 `/aaa` 前缀 | +| Prefix | `/`, `/aaa`, `/aaa/bbb` | `/aaa/bbb` | 是,匹配 `/aaa/bbb` 前缀 | +| Prefix | `/`, `/aaa`, `/aaa/bbb` | `/ccc` | 是,匹配 `/` 前缀 | +| Prefix | `/aaa` | `/ccc` | 否,使用默认后端 | +| 混合 | `/foo` (Prefix), `/foo` (Exact) | `/foo` | 是,优选 Exact 类型 | + +## 主机名通配符 {#hostname-wildcards} + +主机名可以是精确匹配(例如“`foo.bar.com`”)或者使用通配符来匹配 +(例如“`*.foo.com`”)。 +精确匹配要求 HTTP `host` 头部字段与 `host` 字段值完全匹配。 +通配符匹配则要求 HTTP `host` 头部字段与通配符规则中的后缀部分相同。 + + +| 主机 | host 头部 | 匹配与否? | +| ------------ |--------------------| ------------------------------------| +| `*.foo.com` | `bar.foo.com` | 基于相同的后缀匹配 | +| `*.foo.com` | `baz.bar.foo.com` | 不匹配,通配符仅覆盖了一个 DNS 标签 | +| `*.foo.com` | `foo.com` | 不匹配,通配符仅覆盖了一个 DNS 标签 | + +{{< codenew file="service/networking/ingress-wildcard-host.yaml" >}} + -IngressClass 资源包含一个可选的参数字段,可用于为该类引用额外配置。 +IngressClass 资源包含一个可选的 `parameters` 字段,可用于为该类引用额外配置。 -### 废弃的注解 +### 废弃的注解 {#deprecated-annotation} 在 Kubernetes 1.18 版本引入 IngressClass 资源和 `ingressClassName` 字段之前, Ingress 类是通过 Ingress 中的一个 `kubernetes.io/ingress.class` 注解来指定的。 @@ -337,7 +442,7 @@ Ingress 中新的 `ingressClassName` 字段是该注解的替代品,但并非 包括 Ingress 控制器的名称。 ### 默认 Ingress 类 {#default-ingress-class} -您可以将一个特定的 IngressClass 标记为集群默认选项。 +你可以将一个特定的 IngressClass 标记为集群默认 Ingress 类。 将一个 IngressClass 资源的 `ingressclass.kubernetes.io/is-default-class` 注解设置为 `true` 将确保新的未指定 `ingressClassName` 字段的 Ingress 能够分配为这个默认的 IngressClass. @@ -358,15 +463,15 @@ an `ingressClassName` specified. You can resolve this by ensuring that at most 1 IngressClasess are marked as default in your cluster. --> {{< caution >}} -如果集群中有多个 IngressClass 被标记为默认,准入控制器将阻止创建新的未指定 `ingressClassName` -的 Ingress 对象。 +如果集群中有多个 IngressClass 被标记为默认,准入控制器将阻止创建新的未指定 +`ingressClassName` 的 Ingress 对象。 解决这个问题只需确保集群中最多只能有一个 IngressClass 被标记为默认。 {{< /caution >}} ## Ingress 类型 {#types-of-ingress} -### 单服务 Ingress {#single-service-ingress} +### 由单个 Service 来完成的 Ingress {#single-service-ingress} -现有的 Kubernetes 概念允许您暴露单个 Service (查看[替代方案](#alternatives))。 +现有的 Kubernetes 概念允许你暴露单个 Service (参见[替代方案](#alternatives))。 你也可以通过指定无规则的 *默认后端* 来对 Ingress 进行此操作。 -{{< codenew file="service/networking/ingress.yaml" >}} +{{< codenew file="service/networking/test-ingress.yaml" >}} -如果使用 `kubectl apply -f` 创建它,则应该能够查看刚刚添加的 Ingress 的状态: +如果使用 `kubectl apply -f` 创建此 Ingress,则应该能够查看刚刚添加的 Ingress 的状态: ```shell kubectl get ingress test-ingress ``` ``` -NAME HOSTS ADDRESS PORTS AGE -test-ingress * 203.0.113.123 80 59s +NAME CLASS HOSTS ADDRESS PORTS AGE +test-ingress external-lb * 203.0.113.123 80 59s ``` {{< note >}} -入口控制器和负载平衡器可能需要一两分钟才能分配 IP 地址。在此之前,您通常会看到地址字段的值被设定为 -``。 +入口控制器和负载平衡器可能需要一两分钟才能分配 IP 地址。 +在此之前,你通常会看到地址字段的值被设定为 ``。 {{< /note >}} -### 简单分列 - -一个分列配置根据请求的 HTTP URI 将流量从单个 IP 地址路由到多个服务。 -Ingress 允许您将负载均衡器的数量降至最低。例如,这样的设置: - -```none -foo.bar.com -> 178.91.123.132 -> / foo service1:4200 - / bar service2:8080 -``` +### 简单扇出 {#simple-fanout} + +一个扇出(fanout)配置根据请求的 HTTP URI 将来自同一 IP 地址的流量路由到多个 Service。 +Ingress 允许你将负载均衡器的数量降至最低。例如,这样的设置: + +{{< mermaid >}} +graph LR; + client([客户端])-. Ingress-管理的
负载均衡器 .->ingress[Ingress, 178.91.123.132]; + ingress-->|/foo|service1[Service service1:4200]; + ingress-->|/bar|service2[Service service2:8080]; + subgraph cluster + ingress; + service1-->pod1[Pod]; + service1-->pod2[Pod]; + service2-->pod3[Pod]; + service2-->pod4[Pod]; + end + classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000; + classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; + classDef cluster fill:#fff,stroke:#bbb,stroke-width:2px,color:#326ce5; + class ingress,service1,service2,pod1,pod2,pod3,pod4 k8s; + class client plain; + class cluster cluster; +{{}} 将需要一个如下所示的 Ingress: -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: simple-fanout-example - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / -spec: - rules: - - host: foo.bar.com - http: - paths: - - path: /foo - backend: - serviceName: service1 - servicePort: 4200 - - path: /bar - backend: - serviceName: service2 - servicePort: 8080 -``` +{{< codenew file="service/networking/simple-fanout-example.yaml" >}} -Ingress 控制器将提供实现特定的负载均衡器来满足 Ingress,只要 Service (`service1`,`service2`) 存在。 -当它这样做了,你会在地址字段看到负载均衡器的地址。 +Ingress 控制器将提供实现特定的负载均衡器来满足 Ingress, +只要 Service (`service1`,`service2`) 存在。 +当它这样做时,你会在 Address 字段看到负载均衡器的地址。 {{< note >}} -取决于你使用的 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers), +取决于你所使用的 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers), 你可能需要创建默认 HTTP 后端[服务](/zh/docs/concepts/services-networking/service/)。 {{< /note >}} @@ -507,15 +608,29 @@ you are using, you may need to create a default-http-backend Name-based virtual hosts support routing HTTP traffic to multiple host names at the same IP address. --> -### 基于名称的虚拟托管 +### 基于名称的虚拟托管 {#name-based-virtual-hosting} 基于名称的虚拟主机支持将针对多个主机名的 HTTP 流量路由到同一 IP 地址上。 -```none -foo.bar.com --| |-> foo.bar.com service1:80 - | 178.91.123.132 | -bar.foo.com --| |-> bar.foo.com service2:80 -``` +{{< mermaid >}} +graph LR; + client([客户端])-. Ingress-管理的
负载均衡器 .->ingress[Ingress, 178.91.123.132]; + ingress-->|Host: foo.bar.com|service1[Service service1:80]; + ingress-->|Host: bar.foo.com|service2[Service service2:80]; + subgraph cluster + ingress; + service1-->pod1[Pod]; + service1-->pod2[Pod]; + service2-->pod3[Pod]; + service2-->pod4[Pod]; + end + classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000; + classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; + classDef cluster fill:#fff,stroke:#bbb,stroke-width:2px,color:#326ce5; + class ingress,service1,service2,pod1,pod2,pod3,pod4 k8s; + class client plain; + class cluster cluster; +{{}} -如果您创建的 Ingress 资源没有规则中定义的任何主机,则可以匹配指向 Ingress 控制器 IP 地址 -的任何网络流量,而无需基于名称的虚拟主机。 +如果你创建的 Ingress 资源没有在 `rules` 中定义的任何 `hosts`,则可以匹配指向 +Ingress 控制器 IP 地址的任何网络流量,而无需基于名称的虚拟主机。 -例如,以下 Ingress 资源会将 `first.bar.com` 请求的流量路由到 `service1`, -将 `second.foo.com` 请求的流量路由到 `service2`, -而没有在请求中定义主机名的 IP 地址的流量路由(即,不提供请求标头)到 `service3`。 +例如,以下 Ingress 会将针对 `first.bar.com` 的请求流量路由到 `service1`, +将针对 `second.foo.com` 的请求流量路由到 `service2`, +而针对该 IP 地址的、没有在请求中定义主机名的请求流量会被路由(即,不提供请求标头) +到 `service3`。 -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: name-virtual-host-ingress -spec: - rules: - - host: first.bar.com - http: - paths: - - backend: - serviceName: service1 - servicePort: 80 - - host: second.foo.com - http: - paths: - - backend: - serviceName: service2 - servicePort: 80 - - http: - paths: - - backend: - serviceName: service3 - servicePort: 80 -``` +{{< codenew file="service/networking/name-virtual-host-ingress-no-third-host.yaml" >}} 在 Ingress 中引用此 Secret 将会告诉 Ingress 控制器使用 TLS 加密从客户端到负载均衡器的通道。 -你需要确保创建的 TLS Secret 来自包含 `sslexample.foo.com` 的公用名称(CN)的证书。 +你需要确保创建的 TLS Secret 创建自包含 `sslexample.foo.com` 的公用名称(CN)的证书。 这里的公共名称也被称为全限定域名(FQDN)。 -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: tls-example-ingress -spec: - tls: - - hosts: - - sslexample.foo.com - secretName: testsecret-tls - rules: - - host: sslexample.foo.com - http: - paths: - - path: / - backend: - serviceName: service1 - servicePort: 80 -``` +{{< codenew file="service/networking/tls-example-ingress.yaml" >}} -### 负载均衡 +### 负载均衡 {#load-balancing} Ingress 控制器启动引导时使用一些适用于所有 Ingress 的负载均衡策略设置, 例如负载均衡算法、后端权重方案和其他等。 @@ -694,8 +750,9 @@ specific documentation to see how they handle health checks ( [nginx](https://git.k8s.io/ingress-nginx/README.md), [GCE](https://git.k8s.io/ingress-gce/README.md#health-checks)). --> -值得注意的是,即使健康检查不是通过 Ingress 直接暴露的,在 Kubernetes -中存在并行概念,比如[就绪检查](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) +值得注意的是,尽管健康检查不是通过 Ingress 直接暴露的,在 Kubernetes +中存在并行的概念,比如 +[就绪检查](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/), 允许你实现相同的目的。 请检查特定控制器的说明文档,以了解它们是怎样处理健康检查的 ( [nginx](https://git.k8s.io/ingress-nginx/README.md), @@ -706,7 +763,7 @@ specific documentation to see how they handle health checks ( To update an existing Ingress to add a new Host, you can update it by editing the resource: --> -## 更新 Ingress +## 更新 Ingress {#updating-an-ingress} 要更新现有的 Ingress 以添加新的 Host,可以通过编辑资源来对其进行更新: @@ -753,6 +810,7 @@ spec: serviceName: service1 servicePort: 80 path: /foo + pathType: Prefix - host: bar.baz.com http: paths: @@ -760,6 +818,7 @@ spec: serviceName: service2 servicePort: 80 path: /foo + pathType: Prefix .. ``` @@ -808,28 +867,13 @@ You can achieve the same outcome by invoking `kubectl replace -f` on a modified ## Failing across availability zones Techniques for spreading traffic across failure domains differs between cloud providers. -Please check the documentation of the relevant [Ingress controller](/docs/concepts/services-networking/ingress-controllers) for details. You can also refer to the [federation documentation](https://github.com/kubernetes-sigs/federation-v2) -for details on deploying Ingress in a federated cluster. +Please check the documentation of the relevant [Ingress controller](/docs/concepts/services-networking/ingress-controllers) for details. --> ## 跨可用区失败 {#failing-across-availability-zones} 不同的云厂商使用不同的技术来实现跨故障域的流量分布。详情请查阅相关 Ingress 控制器的文档。 -请查看相关[ Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers) 的文档以了解详细信息。 -你还可以参考[联邦文档](https://github.com/kubernetes-sigs/federation-v2),以获取有关在联合集群中部署 Ingress 的详细信息。 - - -## 未来工作 -跟踪 [SIG Network](https://github.com/kubernetes/community/tree/master/sig-network) -的活动以获得有关 Ingress 和相关资源演变的更多细节。 -你还可以跟踪 [Ingress 仓库](https://github.com/kubernetes/ingress/tree/master) -以获取有关各种 Ingress 控制器的更多细节。 +请查看相关 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers) +的文档以了解详细信息。 -* 使用 [Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer) -* 使用 [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) +* 使用 [Service.Type=LoadBalancer](/zh/docs/concepts/services-networking/service/#loadbalancer) +* 使用 [Service.Type=NodePort](/zh/docs/concepts/services-networking/service/#nodeport) ## {{% heading "whatsnext" %}} * 进一步了解 [Ingress API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io) * 进一步了解 [Ingress 控制器](/zh/docs/concepts/services-networking/ingress-controllers/) -* [使用 NGINX 控制器在 Minikube 上安装 Ingress](/zh/docs/tasks/access-application-cluster/ingress-minikube) +* [使用 NGINX 控制器在 Minikube 上安装 Ingress](/zh/docs/tasks/access-application-cluster/ingress-minikube/) diff --git a/content/zh/docs/concepts/workloads/controllers/deployment.md b/content/zh/docs/concepts/workloads/controllers/deployment.md index c5fd7b38a42ef..af26b817cb74b 100644 --- a/content/zh/docs/concepts/workloads/controllers/deployment.md +++ b/content/zh/docs/concepts/workloads/controllers/deployment.md @@ -203,7 +203,7 @@ Follow the steps given below to create the above Deployment: ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... - deployment.apps/nginx-deployment successfully rolled out + deployment "nginx-deployment" successfully rolled out ``` ## API 参考 -* [Kubernetes API 概述](/docs/reference/using-api/api-overview/) - Kubernetes API 概述。 -* Kubernetes API 版本 - * [1.17](/docs/reference/generated/kubernetes-api/v1.17/) - * [1.16](/docs/reference/generated/kubernetes-api/v1.16/) - * [1.15](/docs/reference/generated/kubernetes-api/v1.15/) - * [1.14](/docs/reference/generated/kubernetes-api/v1.14/) - * [1.13](/docs/reference/generated/kubernetes-api/v1.13/) +* [Kubernetes API 概述](/zh/docs/reference/using-api/api-overview/) - Kubernetes API 概述。 +* [Kubernetes API 参考 {{< latest-version >}}](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/) ## CLI 参考 -* [kubectl](/docs/user-guide/kubectl-overview) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。 - * [JSONPath](/docs/user-guide/jsonpath/) - 通过 kubectl 使用 [JSONPath 表达式](http://goessner.net/articles/JsonPath/) 的语法指南。 -* [kubeadm](/docs/admin/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。 -* [kubefed](/docs/admin/kubefed/) - 此 CLI 工具可帮助您管理集群联邦。 +* [kubectl](/zh/docs/reference/kubectl/overview/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。 + * [JSONPath](/zh/docs/reference/kubectl/jsonpath/) - 通过 kubectl 使用 [JSONPath 表达式](http://goessner.net/articles/JsonPath/) 的语法指南。 +* [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。 -## 配置参考 - -* [kubelet](/docs/admin/kubelet/) - 在每个节点上运行的主 *节点代理* 。kubelet 采用一组 PodSpecs 并确保所描述的容器健康地运行。 -* [kube-apiserver](/docs/admin/kube-apiserver/) - REST API,用于验证和配置 API 对象(如 pod,服务,副本控制器)的数据。 -* [kube-controller-manager](/docs/admin/kube-controller-manager/) - 一个守护进程,它嵌入到了 Kubernetes 的附带的核心控制循环。 -* [kube-proxy](/docs/admin/kube-proxy/) - 可以跨一组后端进行简单的 TCP/UDP 流转发或循环 TCP/UDP 转发。 -* [kube-scheduler](/docs/admin/kube-scheduler/) - 一个调度程序,用于管理可用性、性能和容量。 - +## 组件参考 + +* [kubelet](/zh/docs/reference/command-line-tools-reference/kubelet/) - 在每个节点上运行的主 *节点代理* 。kubelet 采用一组 PodSpecs 并确保所描述的容器健康地运行。 +* [kube-apiserver](/zh/docs/reference/command-line-tools-reference/kube-apiserver/) - REST API,用于验证和配置 API 对象(如 Pod、服务或副本控制器等)的数据。 +* [kube-controller-manager](/zh/docs/reference/command-line-tools-reference/kube-controller-manager/) - 一个守护进程,它嵌入到了 Kubernetes 的附带的核心控制循环。 +* [kube-proxy](/zh/docs/reference/command-line-tools-reference/kube-proxy/) - 可进行简单的 TCP/UDP 流转发或针对一组后端执行轮流 TCP/UDP 转发。 +* [kube-scheduler](/zh/docs/reference/command-line-tools-reference/kube-scheduler/) - 一个调度程序,用于管理可用性、性能和容量。 + * [kube-scheduler 策略](/zh/docs/reference/scheduling/policies) + * [kube-scheduler 配置](/zh/docs/reference/scheduling/config#profiles) + + + + +云供应商,有时也称作云服务供应商(CSPs)提供云计算平台或服务。 + +很多云供应商提供托管的基础设施(也称作基础设施即服务或 IaaS)。 +针对托管的基础设施,云供应商负责服务器、存储和网络,而用户(你) +负责管理其上运行的各层软件,例如运行一个 Kubernetes 集群。 + +你也会看到 Kubernetes 被作为托管服务提供;有时也称作平台即服务或 PaaS。 +针对托管的 Kubernetes,你的云供应商负责 Kubernetes 的控制面以及 +{{< glossary_tooltip term_id="node" text="节点" >}}及他们所依赖的基础设施: +网络、存储以及其他一些诸如负载均衡器之类的元素。 + diff --git a/content/zh/docs/reference/glossary/container-runtime.md b/content/zh/docs/reference/glossary/container-runtime.md index d5568e7fb8213..7a68b6fd5c043 100644 --- a/content/zh/docs/reference/glossary/container-runtime.md +++ b/content/zh/docs/reference/glossary/container-runtime.md @@ -2,7 +2,7 @@ title: 容器运行环境(Container Runtime) id: container-runtime date: 2019-06-05 -full_link: /docs/reference/generated/container-runtime +full_link: /docs/setup/production-environment/container-runtimes short_description: > 容器运行环境是负责运行容器的软件。 @@ -16,7 +16,7 @@ tags: title: Container Runtime id: container-runtime date: 2019-06-05 -full_link: /docs/reference/generated/container-runtime +full_link: /docs/setup/production-environment/container-runtimes short_description: > The container runtime is the software that is responsible for running containers. @@ -35,11 +35,11 @@ The container runtime is the software that is responsible for running containers -Kubernetes 支持多个容器运行环境: [Docker](http://www.docker.com)、 -[containerd](https://containerd.io)、[cri-o](https://cri-o.io/)、 -[rktlet](https://github.com/kubernetes-incubator/rktlet) 以及任何实现 [Kubernetes CRI (容器运行环境接口)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md)。 +Kubernetes 支持多个容器运行环境: {{< glossary_tooltip term_id="docker">}}、 +{{< glossary_tooltip term_id="containerd" >}}、{{< glossary_tooltip term_id="cri-o" >}} +以及任何实现 [Kubernetes CRI (容器运行环境接口)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md)。 diff --git a/content/zh/docs/reference/glossary/endpoint.md b/content/zh/docs/reference/glossary/endpoint.md new file mode 100644 index 0000000000000..ee9342cd6afd2 --- /dev/null +++ b/content/zh/docs/reference/glossary/endpoint.md @@ -0,0 +1,36 @@ +--- +title: 端点(Endpoints) +id: endpoints +date: 2020-04-23 +full_link: +short_description: > + 端点负责记录与服务(Service)的选择器相匹配的 Pods 的 IP 地址。 + +aka: +tags: +- networking +--- + 端点负责记录与服务的{{< glossary_tooltip text="选择器" term_id="selector" >}}相匹配的 Pods 的 IP 地址。 + + + + + +端点可以手动配置到{{< glossary_tooltip text="服务(Service)" term_id="service" >}}上,而不必设置选择算符。 +{{< glossary_tooltip text="EndpointSlice" term_id="endpoint-slice" >}} 资源为 Endpoints +提供了一种可伸缩、可扩展的替代方案。 diff --git a/content/zh/docs/reference/glossary/kube-apiserver.md b/content/zh/docs/reference/glossary/kube-apiserver.md index c2d0e8a9eb56c..05f9605b2adcf 100644 --- a/content/zh/docs/reference/glossary/kube-apiserver.md +++ b/content/zh/docs/reference/glossary/kube-apiserver.md @@ -2,9 +2,9 @@ title: kube-apiserver id: kube-apiserver date: 2018-04-12 -full_link: /docs/reference/generated/kube-apiserver/ +full_link: /zh/docs/reference/command-line-tools-reference/kube-apiserver/ short_description: > - 主节点上负责提供 Kubernetes API 服务的组件;它是 Kubernetes 控制面的前端。 + 提供 Kubernetes API 服务的控制面组件。 aka: tags: @@ -16,9 +16,9 @@ tags: title: kube-apiserver id: kube-apiserver date: 2018-04-12 -full_link: /docs/reference/generated/kube-apiserver/ +full_link: /zh/docs/reference/command-line-tools-reference/kube-apiserver/ short_description: > - Component on the master that exposes the Kubernetes API. It is the front-end for the Kubernetes control plane. + Control plane component that serves the Kubernetes API. aka: tags: @@ -27,18 +27,21 @@ tags: --> - -主节点上负责提供 Kubernetes API 服务的组件;它是 Kubernetes 控制面的前端。 +API 服务器是 Kubernetes {{< glossary_tooltip text="控制面" term_id="control-plane" >}}的组件, +该组件公开了 Kubernetes API。 +API 服务器是 Kubernetes 控制面的前端。 - -kube-apiserver 在设计上考虑了水平扩缩的需要。 -换言之,通过部署多个实例可以实现扩缩。 -参见[构造高可用集群](/docs/admin/high-availability/)。 - +Kubernetes API 服务器的主要实现是 [kube-apiserver](/zh/docs/reference/command-line-tools-reference/kube-apiserver/)。 +kube-apiserver 设计上考虑了水平伸缩,也就是说,它可通过部署多个实例进行伸缩。 +你可以运行 kube-apiserver 的多个实例,并在这些实例之间平衡流量。 diff --git a/content/zh/docs/reference/glossary/mainfest.md b/content/zh/docs/reference/glossary/manifest.md similarity index 91% rename from content/zh/docs/reference/glossary/mainfest.md rename to content/zh/docs/reference/glossary/manifest.md index 3987ff67d69c0..2f489f87e9b7a 100644 --- a/content/zh/docs/reference/glossary/mainfest.md +++ b/content/zh/docs/reference/glossary/manifest.md @@ -12,7 +12,6 @@ tags: JSON 或 YAML 格式的 Kubernetes API 对象规范。 @@ -31,4 +29,4 @@ tags: -清单指定了在应用该清单时 Kubrenetes 将维护的对象的期望状态。每个配置文件可包含多个清单。 \ No newline at end of file +清单指定了在应用该清单时 Kubrenetes 将维护的对象的期望状态。每个配置文件可包含多个清单。 diff --git a/content/zh/docs/reference/glossary/shuffle-sharding.md b/content/zh/docs/reference/glossary/shuffle-sharding.md new file mode 100644 index 0000000000000..1f8d8a0990077 --- /dev/null +++ b/content/zh/docs/reference/glossary/shuffle-sharding.md @@ -0,0 +1,84 @@ +--- +title: 混排切片(Shuffle Sharding) +id: shuffle-sharding +date: 2020-03-04 +full_link: +short_description: > + 一种将请求指派给队列的技术,其隔离性好过对队列个数哈希取模的方式。 + +aka: +tags: +- fundamental +--- +一种将请求指派给队列的技术,其隔离性好过对队列个数哈希取模的方式。 + + + + + + +我们通常会关心不同的请求序列间的相互隔离问题,目的是为了确保密度较高的 +请求序列不会湮没密度较低的序列。 +将请求放入不同队列的一种简单方法是对请求的某些特征值执行哈希函数, +将结果对队列的个数取模,从而得到要使用的队列的索引。 +这一哈希函数使用请求的与其序列相对应的特征作为其输入。例如,在因特网上, +这一特征通常指的是由源地址、目标地址、协议、源端口和目标端口所组成的 +五元组。 + + +这种简单的基于哈希的模式有一种特性,高密度的请求序列(流)会湮没那些被 +哈希到同一队列的其他低密度请求序列(流)。 +为大量的序列提供较好的隔离性需要提供大量的队列,因此是有问题的。 +混排切片是一种更为灵活的机制,能够更好地将低密度序列与高密度序列隔离。 +混排切片的术语采用了对一叠扑克牌进行洗牌的类比,每个队列可类比成一张牌。 +混排切片技术首先对请求的特定于所在序列的特征执行哈希计算,生成一个长度 +为十几个二进制位或更长的哈希值。 +接下来,用该哈希值作为信息熵的来源,对一叠牌来混排,并对整个一手牌(队列)来洗牌。 +最后,对所有处理过的队列进行检查,选择长度最短的已检查队列作为请求的目标队列。 +在队列数量适中的时候,检查所有已处理的牌的计算量并不大,对于任一给定的 +低密度的请求序列而言,有相当的概率能够消除给定高密度序列的湮没效应。 +当队列数量较大时,检查所有已处理队列的操作会比较耗时,低密度请求序列 +消除一组高密度请求序列的湮没效应的机会也随之降低。因此,选择队列数目 +时要颇为谨慎。 + diff --git a/content/zh/docs/reference/issues-security/_index.md b/content/zh/docs/reference/issues-security/_index.md index fb724fc2a2dac..7d3da26e9798a 100644 --- a/content/zh/docs/reference/issues-security/_index.md +++ b/content/zh/docs/reference/issues-security/_index.md @@ -1,5 +1,4 @@ --- title: Kubernetes 问题和安全 weight: 10 -toc-hide: true --- diff --git a/content/zh/docs/reference/kubernetes-api/api-index.md b/content/zh/docs/reference/kubernetes-api/api-index.md index 674e5ece7edc2..140233e659397 100644 --- a/content/zh/docs/reference/kubernetes-api/api-index.md +++ b/content/zh/docs/reference/kubernetes-api/api-index.md @@ -1,6 +1,6 @@ --- -title: v1.17 +title: v1.19 weight: 50 --- -[Kubernetes API v1.17](/docs/reference/generated/kubernetes-api/v1.17/) \ No newline at end of file +[Kubernetes API v1.19](/docs/reference/generated/kubernetes-api/v1.19/) \ No newline at end of file diff --git a/content/zh/docs/reference/kubernetes-api/labels-annotations-taints.md b/content/zh/docs/reference/kubernetes-api/labels-annotations-taints.md index e271e1885e7aa..5a32824da0bf3 100644 --- a/content/zh/docs/reference/kubernetes-api/labels-annotations-taints.md +++ b/content/zh/docs/reference/kubernetes-api/labels-annotations-taints.md @@ -1,5 +1,5 @@ --- -title: 知名标签(Label)、注解(Annotation)和 Taints +title: 知名标签(Label)、注解(Annotation)和 污点(Taint) content_type: concept weight: 60 --- @@ -227,9 +227,9 @@ on the nodes if it makes sense in your topology. --> 示例: -`failure-domain.beta.kubernetes.io/region=us-east-1` +`topology.kubernetes.io/region=us-east-1` -`failure-domain.beta.kubernetes.io/zone=us-east-1c` +`topology.kubernetes.io/zone=us-east-1c` 用于:Node、PersistentVolume @@ -271,5 +271,3 @@ adding the labels manually (or adding support for `PersistentVolumeLabel`). With --> 如果 `PersistentVolumeLabel` 准入控制器不支持自动为 PersistentVolume 打标签,且用户希望防止 pod 跨区域进行卷的挂载, 应考虑手动打标签 (或对 `PersistentVolumeLabel` 增加支持)。如果用户的基础设施没有这种约束,则不需要为卷添加区域标签。 - - diff --git a/content/zh/docs/reference/setup-tools/_index.md b/content/zh/docs/reference/setup-tools/_index.md index 9bb6809cde29f..9cd9d94f0deba 100644 --- a/content/zh/docs/reference/setup-tools/_index.md +++ b/content/zh/docs/reference/setup-tools/_index.md @@ -1,5 +1,4 @@ --- title: 安装工具 weight: 50 -toc-hide: true --- diff --git a/content/zh/docs/reference/setup-tools/kubeadm/_index.md b/content/zh/docs/reference/setup-tools/kubeadm/_index.md index 6863791207ba9..02e5561dba468 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/_index.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/_index.md @@ -1,5 +1,58 @@ --- title: "Kubeadm" weight: 10 -toc-hide: true +no_list: true +content_type: concept +card: + name: reference + weight: 40 --- + + + +Kubeadm 是一个提供了 `kubeadm init` 和 `kubeadm join` 的工具,作为创建 Kubernetes 集群的 “快捷途径” 的最佳实践。 + + +kubeadm 通过执行必要的操作来启动和运行最小可用集群。按照设计,它只关注启动引导,而非配置机器。同样的,安装各种 “锦上添花” 的扩展,例如 Kubernetes Dashboard, +监控方案,以及特定云平台的扩展,都不在讨论范围内。 + + +相反,我们希望在 Kubeadm 之上构建更高级别以及更加合规的工具,理想情况下,使用 kubeadm 作为所有部署工作的基准将会更加易于创建一致性集群。 + + +## 如何安装 + + +要安装 kubeadm, 请查阅[安装指南](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). + +## {{% heading "whatsnext" %}} + + +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init) 用于搭建控制平面节点 +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join) 用于搭建工作节点并将其加入到集群中 +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade) 用于升级 Kubernetes 集群到新版本 +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config) 如果你使用了 v1.7.x 或更低版本的 kubeadm 版本初始化你的集群,则使用 `kubeadm upgrade` 来配置你的集群 +* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token) 用于管理 `kubeadm join` 使用的令牌 +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset) 用于恢复通过 `kubeadm init` 或者 `kubeadm join` 命令对节点进行的任何变更 +* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version) 用于打印 kubeadm 的版本信息 +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha) 用于预览一组可用于收集社区反馈的特性 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md new file mode 100644 index 0000000000000..dd1e3f5c78604 --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs.md @@ -0,0 +1,55 @@ + + +### 概要 + + + + +与处理 kubernetes 证书相关的命令 + + +### 选项 + + ++++ + + + + + + + + + + + +
-h, --help
certs 命令的帮助
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_generate-csr.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_generate-csr.md new file mode 100644 index 0000000000000..cbf2a6368e514 --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_generate-csr.md @@ -0,0 +1,111 @@ + + +### 概要 + + +为运行控制平面所需的所有证书生成密钥和证书签名请求(CSR)。该命令会生成部分 kubeconfig 文件, +其中 "users > user > client-key-data" 字段包含私钥数据,并为每个 kubeconfig 文件创建一个随附的".csr"文件。 + + +该命令设计用于 [Kubeadm 外部 CA 模式](https://kubernetes.io/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#external-ca-mode)。 +它生成 CSR,然后你可以将其提交给外部证书颁发机构进行签名。 + + +然后,应使用 ".crt" 作为文件扩展名将 PEM 编码的签名证书与密钥文件一起保存,或者,对于 kubeconfig 文件, +PEM 编码的签名证书应使用 base64 编码,并添加到 "users > user > client-certificate-data" 字段。 + +``` +kubeadm alpha certs generate-csr [flags] +``` + + +### 示例 + + +``` + # 以下命令将为所有控制平面证书和 kubeconfig 文件生成密钥和 CSR : + kubeadm alpha certs generate-csr --kubeconfig-dir /tmp/etc-k8s --cert-dir /tmp/etc-k8s/pki +``` + + +### 选项 + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--cert-dir string
保存证书的路径
--config string
kubeadm 配置文件的路径。
-h, --help
generate-csr 命令的帮助
--kubeconfig-dir string     默认值:"/etc/kubernetes"
保存 kubeconfig 文件的路径。
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md new file mode 100644 index 0000000000000..644a8c604eea4 --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md @@ -0,0 +1,71 @@ + + +### 概要 + + +TLS 引导后更新与 kubelet 相关的设置 + +``` +kubeadm init phase kubelet-finalize [flags] +``` + + +### 示例 + + +``` + # 在 TLS 引导后更新与 kubelet 相关的设置 + kubeadm init phase kubelet-finalize all --config +``` + + +### 选项 + + ++++ + + + + + + + + + + + +
-h, --help
kubelet-finalize 操作的帮助命令
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md new file mode 100644 index 0000000000000..d99d6036a1325 --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md @@ -0,0 +1,89 @@ + + +### 概要 + + + +运行所有 kubelet-finalize 阶段 + +``` +kubeadm init phase kubelet-finalize all [flags] +``` + + +### 示例 + + +``` + # 在 TLS 引导后更新与 kubelet 相关的设置 + kubeadm init phase kubelet-finalize all --config +``` + + +### 选项 + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--cert-dir string     默认值: "/etc/kubernetes/pki"
保存和存储证书的路径。
--config string
kubeadm 配置文件的路径。
-h, --help
all 操作的帮助命令
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md new file mode 100644 index 0000000000000..77517935cab55 --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md @@ -0,0 +1,75 @@ + + +### 概要 + + + + +启用 kubelet 客户端证书轮换 + +``` +kubeadm init phase kubelet-finalize experimental-cert-rotation [flags] +``` + + +### 选项 + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--cert-dir string     Default: "/etc/kubernetes/pki"
保存和存储证书的路径。
--config string
kubeadm 配置文件的路径。
-h, --help
experimental-cert-rotation 操作的帮助命令
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md new file mode 100644 index 0000000000000..2fd33ffc6312c --- /dev/null +++ b/content/zh/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md @@ -0,0 +1,69 @@ + + +### 概要 + + + + +执行 kubeadm 升级节点的预检。 + +``` +kubeadm upgrade node phase preflight [flags] +``` + + +### 选项 + + ++++ + + + + + + + + + + + + + + + + + + + +
-h, --help
preflight 操作的帮助命令
--ignore-preflight-errors stringSlice
错误将显示为警告的检查清单。示例:'IsPrivilegedUser,Swap'。值为'all'表示忽略所有检查的错误。
+ + + + +### 继承于父命令的选项 + + ++++ + + + + + + + + + + + +
--rootfs string
[实验] 到'真实'主机根文件系统的路径。
+ + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index 93782893aeed5..313f47edb9a49 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -1,5 +1,6 @@ --- title: kubeadm alpha +content_type: concept weight: 90 --- @@ -111,11 +113,8 @@ The subcommand `pivot` can be used to convert a static Pod-hosted control plane {{< tab name="pivot" include="generated/kubeadm_alpha_selfhosting_pivot.md" />}} {{< /tabs >}} +## {{% heading "whatsnext" %}} - -## 接下来 @@ -260,14 +262,14 @@ For more details on each field in the `v1beta2` configuration you can navigate t --> 有关 `v1beta2` 配置中每个字段的更多详细信息,可以访问 [API](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)。 +## {{% heading "whatsnext" %}} + -## 接下来 * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) 引导 Kubernetes 控制平面节点 * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) 将节点连接到集群 * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 恢复通过 `kubeadm init` 或 `kubeadm join` 操作对主机所做的任何更改 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md index 8283ac7d22c87..17e604629a922 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md @@ -81,10 +81,8 @@ Using this phase you can join a node as a control-plane instance. {{< tab name="mark-control-plane" include="generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md" />}} {{< /tabs >}} - -## 下一步 +## {{% heading "whatsnext" %}} + @@ -81,14 +83,14 @@ Using this phase you can perform cleanup on this node. {{< tab name="cleanup-node" include="generated/kubeadm_reset_phase_cleanup-node.md" />}} {{< /tabs >}} +## {{% heading "whatsnext" %}} + -## 下一步 * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) 引导 Kubernetes 控制平面节点 * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) 将节点连接到集群 * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 恢复通过 `kubeadm init` 或 `kubeadm join` 操作对主机所做的任何更改 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md index 33b80a54867dd..22633af056977 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade-phase.md @@ -1,6 +1,7 @@ --- title: kubeadm upgrade phase weight: 90 +content_type: concept --- - -## 接下来 * [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) 引导一个 Kubernetes 控制平面节点 * [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) 将节点加入到群集 * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 还原 `kubeadm init` 或 `kubeadm join` 命令对主机所做的任何更改 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm.md deleted file mode 100644 index ad5e7a26658d1..0000000000000 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: kubeadm 概述 -weight: 10 -card: - name: reference - weight: 40 ---- - - - - -Kubeadm 是一个工具,它提供了 `kubeadm init` 以及 `kubeadm join` 这两个命令作为快速创建 kubernetes 集群的最佳实践。 - - -kubeadm 通过执行必要的操作来启动和运行一个最小可用的集群。它被故意设计为只关心启动集群,而不是准备节点环境的工作。同样的,诸如安装各种各样的可有可无的插件,例如 Kubernetes 控制面板、监控解决方案以及特定云提供商的插件,这些都不在它负责的范围。 - - -相反,我们期望由一个基于 kubeadm 从更高层设计的更加合适的工具来做这些事情;并且,理想情况下,使用 kubeadm 作为所有部署的基础将会使得创建一个符合期望的集群变得容易。 - - -## 如何安装 -要安装 kubeadm,请参考[安装指南](/docs/setup/production-environment/tools/kubeadm/install-kubeadm)。 - -## 接下可以做什么 - - -* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init) 启动引导一个 Kubernetes 主节点 -* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join) 启动引导一个 Kubernetes 工作节点并且将其加入到集群 -* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade) 更新 Kubernetes 集群到新版本 -* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config) 如果你使用 kubeadm v1.7.x 或者更低版本,你需要对你的集群做一些配置以便使用 `kubeadm upgrade` 命令 -* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token) 使用 `kubeadm join` 来管理令牌 -* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset) 还原之前使用 `kubeadm init` 或者 `kubeadm join` 对节点所作改变 -* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version) 打印出 kubeadm 版本 -* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha) 预览一组可用的新功能以便从社区搜集反馈 - diff --git a/content/zh/docs/reference/using-api/_index.md b/content/zh/docs/reference/using-api/_index.md index 2f2a114cec93e..8d2c4b502ca34 100644 --- a/content/zh/docs/reference/using-api/_index.md +++ b/content/zh/docs/reference/using-api/_index.md @@ -1,5 +1,4 @@ --- title: 使用 Kubernetes API weight: 10 -toc-hide: true --- diff --git a/content/zh/docs/reference/using-api/health-checks.md b/content/zh/docs/reference/using-api/health-checks.md new file mode 100644 index 0000000000000..7422ea00635b1 --- /dev/null +++ b/content/zh/docs/reference/using-api/health-checks.md @@ -0,0 +1,145 @@ +--- +title: Kubernetes API 健康端点 +content_type: concept +weight: 50 +--- + + + + +Kubernetes {{< glossary_tooltip term_id="kube-apiserver" text="API 服务器" >}} 提供 API 端点以指示 API 服务器的当前状态。 +本文描述了这些 API 端点,并说明如何使用。 + + + + +## API 健康端点 {#api-endpoints-for-health} + + +Kubernetes API 服务器提供 3 个 API 端点(`healthz`、`livez` 和 `readyz`)来表明 API 服务器的当前状态。 +`healthz` 端点已被弃用(自 Kubernetes v1.16 起),你应该使用更为明确的 `livez` 和 `readyz` 端点。 +`livez` 端点可与 `--livez-grace-period` [标志](/zh/docs/reference/command-line-tools-reference/kube-apiserver)一起使用,来指定启动持续时间。 +为了正常关机,你可以使用 `/readyz` 端点并指定 `--shutdown-delay-duration` [标志](/zh/docs/reference/command-line-tools-reference/kube-apiserver)。 +检查 API 服务器的 `health`/`livez`/`readyz` 端点的机器应依赖于 HTTP 状态代码。 +状态码 `200` 表示 API 服务器是 `healthy`、`live` 还是 `ready`,具体取决于所调用的端点。 +以下更详细的选项供操作人员使用,用来调试其集群或专门调试 API 服务器的状态。 + + +以下示例将显示如何与运行状况 API 端点进行交互。 + + +对于所有端点,都可以使用 `verbose` 参数来打印检查项以及检查状态。 +这对于操作人员调试 API 服务器的当前状态很有用,这些不打算给机器使用: + +```shell +curl -k https://localhost:6443/livez?verbose +``` + + +或从具有身份验证的远程主机: + +```shell +kubectl get --raw='/readyz?verbose' +``` + + +输出将如下所示: + +```shell +[+]ping ok +[+]log ok +[+]etcd ok +[+]poststarthook/start-kube-apiserver-admission-initializer ok +[+]poststarthook/generic-apiserver-start-informers ok +[+]poststarthook/start-apiextensions-informers ok +[+]poststarthook/start-apiextensions-controllers ok +[+]poststarthook/crd-informer-synced ok +[+]poststarthook/bootstrap-controller ok +[+]poststarthook/rbac/bootstrap-roles ok +[+]poststarthook/scheduling/bootstrap-system-priority-classes ok +[+]poststarthook/start-cluster-authentication-info-controller ok +[+]poststarthook/start-kube-aggregator-informers ok +[+]poststarthook/apiservice-registration-controller ok +[+]poststarthook/apiservice-status-available-controller ok +[+]poststarthook/kube-apiserver-autoregistration ok +[+]autoregister-completion ok +[+]poststarthook/apiservice-openapi-controller ok +healthz check passed +``` + + +Kubernetes API 服务器也支持排除特定的检查项。 +查询参数也可以像以下示例一样进行组合: + +```shell +curl -k 'https://localhost:6443/readyz?verbose&exclude=etcd' +``` + + +输出显示排除了 `etcd` 检查: + +```shell +[+]ping ok +[+]log ok +[+]etcd excluded: ok +[+]poststarthook/start-kube-apiserver-admission-initializer ok +[+]poststarthook/generic-apiserver-start-informers ok +[+]poststarthook/start-apiextensions-informers ok +[+]poststarthook/start-apiextensions-controllers ok +[+]poststarthook/crd-informer-synced ok +[+]poststarthook/bootstrap-controller ok +[+]poststarthook/rbac/bootstrap-roles ok +[+]poststarthook/scheduling/bootstrap-system-priority-classes ok +[+]poststarthook/start-cluster-authentication-info-controller ok +[+]poststarthook/start-kube-aggregator-informers ok +[+]poststarthook/apiservice-registration-controller ok +[+]poststarthook/apiservice-status-available-controller ok +[+]poststarthook/kube-apiserver-autoregistration ok +[+]autoregister-completion ok +[+]poststarthook/apiservice-openapi-controller ok +[+]shutdown ok +healthz check passed +``` + + +## 独立健康检查 {#individual-health-check} + +{{< feature-state state="alpha" >}} + + +每个单独的健康检查都会公开一个 http 端点,并且可以单独检查。 +单个运行状况检查的模式为 `/livez/`,其中 `livez` 和 `readyz` 表明你要检查的是 API 服务器是否存活或就绪。 +`` 的路径可以通过上面的 `verbose` 参数发现 ,并采用 `[+]` 和 `ok` 之间的路径。 +这些单独的健康检查不应由机器使用,但对于操作人员调试系统而言,是有帮助的: + +```shell +curl -k https://localhost:6443/livez/etcd +``` diff --git a/content/zh/docs/setup/release/version-skew-policy.md b/content/zh/docs/setup/release/version-skew-policy.md index 518e2490645b8..5ef5a448849ae 100644 --- a/content/zh/docs/setup/release/version-skew-policy.md +++ b/content/zh/docs/setup/release/version-skew-policy.md @@ -30,27 +30,24 @@ Kubernetes 版本号格式为 **x.y.z**,其中 **x** 为大版本号,**y** 更多信息,请参阅 [Kubernetes 发布版本](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)。 -Kubernetes 项目会维护最近的三个小版本分支。 +Kubernetes 项目会维护最近的三个小版本分支({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}})。 +Kubernetes 1.19 及更高的版本将获得大约1年的补丁支持。 +Kubernetes 1.18 及更早的版本获得大约9个月的补丁支持。 -一些 bug 修复,包括安全修复,根据其安全性和可用性,有可能会回合到这些分支。 -补丁版本会定期或根据需要从这些分支中发布。 最终是否发布是由 -[补丁发布团队](https://github.com/kubernetes/sig-release/blob/master/release-engineering/role-handbooks/patch-release-manager.md#release-timing) -来决定的。补丁发布团队同时也是 -[发布管理者](https://github.com/kubernetes/sig-release/blob/master/release-managers.md)。 -如需了解更多信息,请查看 [Kubernetes 补丁发布](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)。 +Patch releases are cut from those branches at a [regular cadence](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence), plus additional urgent releases, when required. - -小版本大约每3个月发布一个,所以每个小版本分支会维护9个月。 +一些 bug 修复,包括安全修复,取决于其严重性和可行性,有可能会反向合并到这三个发布分支。 +补丁版本会[定期](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence)或根据需要从这些分支中发布。 +最终是否发布是由[发布管理者](https://github.com/kubernetes/sig-release/blob/master/release-managers.md)来决定的。 +如需了解更多信息,请查看 Kubernetes [补丁发布](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)。 -* 最新的 `kube-apiserver` 版本号如果是 **1.13** -* 其他 `kube-apiserver` 版本号只能是 **1.13** 或 **1.12** +* 最新的 `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** +* 则受支持的 `kube-apiserver` 版本号包括 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** ### kubelet @@ -87,13 +84,13 @@ Example: 例如: -* `kube-apiserver` 版本号如果是 **1.13** -* `kubelet` 只能是 **1.13** 、 **1.12** 和 **1.11** +* `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** +* 受支持的的 `kubelet` 版本将包括 **{{< skew latestVersion >}}**、**{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** 例如: -* 如果 `kube-apiserver` 的多个实例同时存在 **1.13** 和 **1.12** -* `kubelet` 只能是 **1.12** 或 **1.11**(**1.13** 不再支持,因为它比**1.12**版本的 `kube-apiserver` 更新) +* 如果 `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** +* `kubelet` 的受支持版本将是 **{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** +(**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) 例如: -* 如果 `kube-apiserver` 版本号为 **1.13** -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本支持 **1.13** 和 **1.12** +* 如果 `kube-apiserver` 版本号为 **{{< skew latestVersion >}}** +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本支持 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** 例如: -* `kube-apiserver` 实例同时存在 **1.13** 和 **1.12** 版本 +* `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** 版本 * `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 可以通过 load balancer 与所有的 `kube-apiserver` 通信 -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 可选版本为 **1.12**(**1.13** 不再支持,因为它比 **1.12** 版本的 `kube-apiserver` 更新) +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 可选版本为 **{{< skew prevMinorVersion >}}** +(**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) ### kubectl @@ -166,13 +165,13 @@ Example: 例如: -* 如果 `kube-apiserver` 当前是 **1.13** 版本 -* `kubectl` 则支持 **1.14** 、**1.13** 和 **1.12** +* 如果 `kube-apiserver` 当前是 **{{< skew latestVersion >}}** 版本 +* `kubectl` 则支持 **{{< skew nextMinorVersion >}}**、**{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** 例如: -* `kube-apiserver` 多个实例同时存在 **1.13** 和 **1.12** -* `kubectl` 可选的版本为 **1.13** 和 **1.12**(其他版本不再支持,因为它会比其中某个 `kube-apiserver` 实例高或低一个小版本) +* `kube-apiserver` 多个实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** +* `kubectl` 可选的版本为 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}**(其他版本不再支持,因为它会比其中某个 `kube-apiserver` 实例高或低一个小版本) 组件之间支持的版本偏差会影响组件升级的顺序。 -本节描述组件从版本 **1.n** 到 **1.(n+1)** 的升级次序。 +本节描述组件从版本 **{{< skew prevMinorVersion >}}** 到 **{{< skew latestVersion >}}** 的升级次序。 ### kube-apiserver @@ -212,28 +211,28 @@ Pre-requisites: 前提条件: -* 单实例集群时,`kube-apiserver` 实例版本号须是 **1.n** -* HA 集群时,所有的 `kube-apiserver` 实例版本号必须是 **1.n** 或 **1.(n+1)**(确保满足最新和最旧的实例小版本号相差不大于1) -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本号必须为 **1.n**(确保不高于 API server 的版本,且版本号相差不大于1) -* `kubelet` 实例版本号必须是 **1.n** 或 **1.(n-1)**(确保版本号不高于 API server,且版本号相差不大于2) +* 单实例集群中,`kube-apiserver` 实例版本号须是 **{{< skew prevMinorVersion >}}** +* 高可用(HA)集群中,所有的 `kube-apiserver` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 **{{< skew latestVersion >}}**(确保满足最新和最旧的实例小版本号相差不大于1) +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本号必须为 **{{< skew prevMinorVersion >}}**(确保不高于 API server 的版本,且版本号相差不大于1) +* `kubelet` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**(确保版本号不高于 API server,且版本号相差不大于2) * 注册的 admission 插件必须能够处理新的 `kube-apiserver` 实例发送过来的数据: * `ValidatingWebhookConfiguration` 和 `MutatingWebhookConfiguration` 对象必须升级到可以处理 - **1.(n+1)** 版本新加的 REST 资源(或使用 1.15 版本提供的 + **{{< skew latestVersion >}}** 版本新加的 REST 资源(或使用 1.15 版本提供的 [`matchPolicy: Equivalent` 选项](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy)) - * 插件可以处理任何 **1.(n+1)** 版本新的 REST 资源数据和新加的字段 + * 插件可以处理任何 **{{< skew latestVersion >}}** 版本新的 REST 资源数据和新加的字段 -升级 `kube-apiserver` 到 **1.(n+1)** +升级 `kube-apiserver` 到 **{{< skew latestVersion >}}** {{< note >}} -跟据 [API 弃用策略](/zh/docs/reference/using-api/deprecation-policy/) 和 +根据 [API 弃用策略](/zh/docs/reference/using-api/deprecation-policy/) 和 [API 变更指南](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api_changes.md), `kube-apiserver` 不能跨小版本号升级,即使是单实例集群也不可以。 @@ -255,31 +254,31 @@ require `kube-apiserver` to not skip minor versions when upgrading, even in sing 前提条件: -* `kube-apiserver` 实例必须为 **1.(n+1)** (HA 集群中,所有的`kube-apiserver` 实例必须在组件升级前完成升级) +* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** (HA 集群中,所有的`kube-apiserver` 实例必须在组件升级前完成升级) -升级 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 到 **1.(n+1)** +升级 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 到 **{{< skew latestVersion >}}** ### kubelet 前提条件: -* `kube-apiserver` 实例必须为 **1.(n+1)** 版本 +* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** 版本 -`kubelet` 可以升级到 **1.(n+1)**(或者停留在 **1.n** 或 **1.(n-1)**) +`kubelet` 可以升级到 **{{< skew latestVersion >}}**(或者停留在 **{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**) +### kube-proxy + + +* `kube-proxy` 必须与节点上的 `kubelet` 的小版本相同 +* `kube-proxy` 一定不能比 `kube-apiserver` 小版本更新 +* `kube-proxy` 最多只能比 `kube-apiserver` 早两个小版本 + + +例如: + +如果 `kube-proxy` 的版本是 **{{< skew oldestMinorVersion >}}**: + +* `kubelet` 版本必须相同,也是 **{{< skew oldestMinorVersion >}}** +* `kube-apiserver` 版本必须在 **{{< skew oldestMinorVersion >}}** 到 **{{< skew latestVersion >}}** 之间(闭区间) \ No newline at end of file diff --git a/content/zh/docs/sitemap.md b/content/zh/docs/sitemap.md deleted file mode 100644 index 433569cab834b..0000000000000 --- a/content/zh/docs/sitemap.md +++ /dev/null @@ -1,115 +0,0 @@ ---- ---- - - - - -单击"标记"或使用下拉列表进行筛选。单击"表标题"进行排序或反向排序。 - -

-Filter by Concept:
-Filter by Object:
-Filter by Command: -

- -
diff --git a/content/zh/docs/tasks/access-application-cluster/ingress-minikube.md b/content/zh/docs/tasks/access-application-cluster/ingress-minikube.md index 16f7b00ea79a9..5add4e7920b75 100644 --- a/content/zh/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/zh/docs/tasks/access-application-cluster/ingress-minikube.md @@ -197,26 +197,7 @@ The following file is an Ingress resource that sends traffic to your Service via 1. 根据下面的 YAML 创建文件 `example-ingress.yaml`: - ```yaml - apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - name: example-ingress - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$1 - spec: - rules: - - host: hello-world.info - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: web - port: - number: 8080 - ``` + {{< codenew file="service/networking/example-ingress.yaml" >}} diff --git a/content/zh/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md b/content/zh/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md index 251f28f7bf62f..dc0600615cd91 100644 --- a/content/zh/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md +++ b/content/zh/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy.md @@ -26,17 +26,17 @@ You need to have a Kubernetes cluster. Follow the [kubeadm getting started guide ## 安装 Weave Net 插件 -按照[通过插件集成 Kubernetes](https://www.weave.works/docs/net/latest/kube-addon/) +按照[通过插件集成 Kubernetes](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) 指南执行安装。 Kubernetes 的 Weave Net 插件带有 -[网络策略控制器](https://www.weave.works/docs/net/latest/kube-addon/#npc), +[网络策略控制器](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/#npc), 可自动监控 Kubernetes 所有名字空间的 NetworkPolicy 注释, 配置 `iptables` 规则以允许或阻止策略指示的流量。 diff --git a/content/zh/docs/tasks/administer-cluster/out-of-resource.md b/content/zh/docs/tasks/administer-cluster/out-of-resource.md index ac46f5a69f9cc..f86beff12f428 100644 --- a/content/zh/docs/tasks/administer-cluster/out-of-resource.md +++ b/content/zh/docs/tasks/administer-cluster/out-of-resource.md @@ -73,7 +73,7 @@ container, and if users use the [node allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) feature, out of resource decisions are made local to the end user Pod part of the cgroup hierarchy as well as the root node. This -[script](/docs/tasks/administer-cluster/out-of-resource/memory-available.sh) +[script](/examples/admin/resource/memory-available.sh) reproduces the same set of steps that the `kubelet` performs to calculate `memory.available`. The `kubelet` excludes inactive_file (i.e. # of bytes of file-backed memory on inactive LRU list) from its calculation as it assumes that @@ -83,7 +83,7 @@ memory is reclaimable under pressure. 这很重要,因为 `free -m` 不能在容器中工作,并且如果用户使用了 [节点可分配资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) 特性,资源不足的判定将同时在本地 cgroup 层次结构的终端用户 Pod 部分和根节点做出。 -这个[脚本](/zh/docs/tasks/administer-cluster/out-of-resource/memory-available.sh) +这个[脚本](/zh/examples/admin/resource/memory-available.sh) 复现了与 `kubelet` 计算 `memory.available` 相同的步骤。 `kubelet` 将 `inactive_file`(意即活动 LRU 列表上基于文件后端的内存字节数)从计算中排除, 因为它假设内存在出现压力时将被回收。 diff --git a/content/zh/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/zh/docs/tasks/debug-application-cluster/debug-running-pod.md index 6863fcb723e0f..358ee111c367e 100644 --- a/content/zh/docs/tasks/debug-application-cluster/debug-running-pod.md +++ b/content/zh/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -2,27 +2,16 @@ title: 调试运行中的 Pod content_type: task --- - - - -本页解释了如何调试节点上正在运行(或者正在崩溃)的 Pod。 - - +本页解释如何在节点上调试运行中(或崩溃)的 Pod。 ## {{% heading "prerequisites" %}} - - -* 你的 {{< glossary_tooltip text="Pod" term_id="pod" >}} - 应该已经调度并正在运行。 - 如果 Pod 尚未运行,则从 - [应用故障排查](/zh/docs/tasks/debug-application-cluster/debug-application/) 开始。 -* 对于一些高级调试步骤, - 你需要知道 Pod 在哪个节点上运行,并拥有在该节点上执行 shell 命令的权限。 - 在使用 `kubectl` 运行标准调试步骤时,则不需要这种权限。 - +* 你的 {{< glossary_tooltip text="Pod" term_id="pod" >}} 应该已经被调度并正在运行中, +如果你的 Pod 还没有运行,请参阅[应用问题排查](/docs/tasks/debug-application-cluster/debug-application/)。 +* 对于一些高级调试步骤,你应该知道 Pod 具体运行在哪个节点上,在该节点上有权限去运行一些命令。 + 你不需要任何访问权限就可以使用 `kubectl` 去运行一些标准调试步骤。 @@ -46,85 +31,86 @@ This page explains how to debug Pods running (or crashing) on a Node. ## Examining pod logs {#examine-pod-logs} First, look at the logs of the affected container: ---> -## 检查 Pod 日志 {#examine-pod-logs} - -首先,查看受影响的容器的日志: ```shell kubectl logs ${POD_NAME} ${CONTAINER_NAME} ``` - -如果你的容器以前崩溃过,你可以访问前一个容器的崩溃日志: ```shell kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} ``` +--> +## 检查 Pod 的日志 {#examine-pod-logs} - -## 使用容器 exec 调试 {#container-exec} +```shell +kubectl logs ${POD_NAME} ${CONTAINER_NAME} +``` -如果 {{< glossary_tooltip text="容器镜像" term_id="image" >}} -包含调试工具,就像基于 Linux 和 Windows 基础镜像构建的镜像一样, -你可以使用 `kubectl exec` 在特定的容器中执行命令: +如果你的容器之前崩溃过,你可以通过下面命令访问之前容器的崩溃日志: ```shell -kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} ``` -{{< note >}} -`-c ${CONTAINER_NAME}` 是可选项。 -对于单容器 Pod,可以省略此参数。 -{{< /note >}} +## Debugging with container exec {#container-exec} + +```shell +kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +``` - -例如,要查看正在运行的 Cassandra Pod 的日志,可以执行: ```shell kubectl exec cassandra -- cat /var/log/cassandra/system.log ``` - -你可以使用 `kubectl exec` 的 `-i` 和 `-t` 参数启动一个连接到终端的 shell,例如: ```shell kubectl exec -it cassandra -- sh ``` - -更多细节,参见 -[获取运行容器的 Shell]( -/zh/docs/tasks/debug-application-cluster/get-shell-running-container/)。 +## 使用容器 exec 进行调试 {#container-exec} + +如果 {{< glossary_tooltip text="容器镜像" term_id="image" >}} 包含调试程序, +比如从 Linux 和 Windows 操作系统基础镜像构建的镜像,你可以使用 `kubectl exec` 命令 +在特定的容器中运行一些命令: + +```shell +kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +``` +{{< note >}} +`-c ${CONTAINER_NAME}` 是可选择的。如果Pod中仅包含一个容器,就可以忽略它。 +{{< /note >}} + +例如,要查看正在运行的 Cassandra pod中的日志,可以运行: + +```shell +kubectl exec cassandra -- cat /var/log/cassandra/system.log +``` + +你可以在 `kubectl exec` 命令后面加上 `-i` 和 `-t` 来运行一个连接到你的终端的 Shell,比如: + +```shell +kubectl exec -it cassandra -- sh +``` + +若要了解更多内容,可查看[获取正在运行容器的 Shell](/zh/docs/tasks/debug-application-cluster/get-shell-running-container/)。 -## 使用临时调试容器进行调试 {#ephemeral-container} {{< feature-state state="alpha" for_k8s_version="v1.18" >}} - -因为容器已经崩溃,或因为容器镜像没有内含调试工具,比如 -[distroless images](https://github.com/GoogleContainerTools/distroless), -导致 `kubectl exec` 不足以解决问题时, -{{< glossary_tooltip text="Ephemeral containers" term_id="ephemeral-container" >}} -对交互式故障诊断非常有用。 -从 `v1.18` 开始,`kubectl` 提供 alpha 命令,它可以为调试创建临时容器。 +## 使用临时调试容器来进行调试 {#ephemeral-container} + +{{< feature-state state="alpha" for_k8s_version="v1.18" >}} + +当由于容器崩溃或容器镜像不包含调试程序(例如[无发行版镜像](https://github.com/GoogleContainerTools/distroless)等) +而导致 `kubectl exec` 无法运行时,{{< glossary_tooltip text="临时容器" term_id="ephemeral-container" >}}对于排除交互式故障很有用。 -## 示例:使用临时容器调试 {#ephemeral-container-example} - -{{< note >}} -本节中的示例要求在集群启用 `EphemeralContainers` [特性门控]( -/zh/docs/reference/command-line-tools-reference/feature-gates/ -)。 -并且要求 `kubectl` v1.18 或更高版本。 -{{< /note >}} - - -可以使用 `kubectl alpha debug` 命令将临时容器添加到正在运行的 Pod 中。 -首先,为本例创建一个 Pod: ```shell kubectl run ephemeral-demo --image=k8s.gcr.io/pause:3.1 --restart=Never ``` - +## 使用临时容器来调试的例子 {#ephemeral-container-example} + {{< note >}} -本节在示例中使用 `pause` 容器镜像, -是因为它不包含用户态的调试工具。 -但此方法适用于所有容器镜像。 +本示例需要你的集群已经开启 `EphemeralContainers` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/), +`kubectl` 版本为 v1.18 或者更高。 {{< /note >}} -如果你试图使用 `kubectl exec` 去建立一个 shell, -你会看到一个报错, -这是因为在容器镜像中并没有包含 shell。 +你可以使用 `kubectl alpha debug` 命令来给正在运行中的 Pod 增加一个临时容器。 +首先,像示例一样创建一个 pod: + +```shell +kubectl run ephemeral-demo --image=k8s.gcr.io/pause:3.1 --restart=Never +``` + +{{< note >}} +本节示例中使用 `pause` 容器镜像,因为它不包含任何用户级调试程序,但是这个方法适用于所有容器镜像。 +{{< /note >}} + + -你可以使用 `kubectl alpha debug` 添加一个调试容器。 -如果指定了 `-i`/`--interactive` 参数, -`kubectl` 将自动连接到临时容器的控制台。 +如果你尝试使用 `kubectl exec` 来创建一个 shell,你将会看到一个错误,因为这个容器镜像中没有 shell。 + +```shell +kubectl exec -it ephemeral-demo -- sh +``` + +``` +OCI runtime exec failed: exec failed: container_linux.go:346: starting container process caused "exec: \"sh\": executable file not found in $PATH": unknown +``` + +你可以改为使用 `kubectl alpha debug` 添加调试容器。 +如果你指定 `-i` 或者 `--interactive` 参数,`kubectl` 将自动挂接到临时容器的控制台。 ```shell kubectl alpha debug -it ephemeral-demo --image=busybox --target=ephemeral-demo @@ -227,19 +225,16 @@ isolated process namespace. You can view the state of the newly created ephemeral container using `kubectl describe`: --> -此命令添加一个新的 busybox 容器并连接。 -`--target` 参数指定了另一个容器的进程命名空间。 -这里必须这样做,因为 `kubectl run` 没有在它创建的 Pod 中启用 -[进程命名空间共享](/zh/docs/tasks/configure-pod-container/share-process-namespace/) 。 +此命令添加一个新的 busybox 容器并将其挂接到该容器。`--target` 参数指定另一个容器的进程命名空间。 +这是必需的,因为 `kubectl run` 不能在它创建的pod中启用 +[共享进程命名空间](/zh/docs/tasks/configure-pod-container/share-process-namespace/)。 {{< note >}} -{{< glossary_tooltip text="Container Runtime" term_id="container-runtime" >}} -必须支持 `--target` 参数。 -如果不支持,临时容器可能无法启动, -或者可能使用隔离的进程名称空间启动。 +{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}必须支持`--target`参数。 +如果不支持,则临时容器可能不会启动,或者可能使用隔离的进程命名空间启动。 {{< /note >}} -可以使用 `kubectl describe` 查看新创建的临时容器的状态: +你可以使用 `kubectl describe` 查看新创建的临时容器的状态: ```shell kubectl describe pod ephemeral-demo @@ -266,7 +261,7 @@ Ephemeral Containers: -完成后,使用 `kubectl delete` 删除 Pod: +使用 `kubectl delete` 来移除已经结束掉的 Pod: ```shell kubectl delete pod ephemeral-demo @@ -289,10 +284,9 @@ given tools in the Kubernetes API. Therefore, if you find yourself needing to ssh into a machine, please file a feature request on GitHub describing your use case and why these tools are insufficient. --> -## 通过节点上的 shell 进行调试 {#node-shell-session} - -如果这些方法都不起作用, -你可以找到运行 Pod 的主机并通过 SSH 连接到该主机, -但是 Kubernetes API 中的工具通常不需要这样做。 -因此,如果你发现自己需要 ssh 到一台机器上,请在 GitHub 上提交一个功能请求,描述你的用例以及为什么这些工具不够用。 +## 在节点上通过 shell 来调试 {#node-shell-session} +如果这些方法都不起作用,你可以找到运行 Pod 的主机并通过 SSH 进入该主机, +但是如果使用 Kubernetes API 中的工具,则通常不需要这样做。 +因此,如果你发现自己需要使用 ssh 进入主机,请在GitHub 上提交功能请求, +以描述你的用例以及这些工具不足的原因。 diff --git a/content/zh/docs/tasks/extend-kubernetes/setup-konnectivity.md b/content/zh/docs/tasks/extend-kubernetes/setup-konnectivity.md new file mode 100644 index 0000000000000..f34b7a4f4a562 --- /dev/null +++ b/content/zh/docs/tasks/extend-kubernetes/setup-konnectivity.md @@ -0,0 +1,74 @@ +--- +title: 设置 Konnectivity 服务 +content_type: task +weight: 70 +--- + + + +Konnectivity 服务为控制平面提供集群通信的 TCP 级别代理。 + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 配置 Konnectivity 服务 + +接下来的步骤需要出口配置,比如: + +{{< codenew file="admin/konnectivity/egress-selector-configuration.yaml" >}} + +您需要配置 API 服务器来使用 Konnectivity 服务,并将网络流量定向到集群节点: + +1. 创建一个出口配置文件比如 `admin/konnectivity/egress-selector-configuration.yaml`。 +1. 将 API 服务器的 `--egress-selector-config-file` 参数设置为你的 API 服务器的出口配置文件路径。 + + +接下来,你需要部署 Konnectivity 服务器和代理。[kubernetes-sigs/apiserver-network-proxy](https://github.com/kubernetes-sigs/apiserver-network-proxy) 是参考实现。 + +在控制平面节点上部署 Konnectivity 服务,下面提供的 `konnectivity-server.yaml` 配置清单假定您在集群中 +将 Kubernetes 组件都是部署为{{< glossary_tooltip text="静态 Pod" term_id="static-pod" >}}。如果不是,你可以将 Konnectivity 服务部署为 DaemonSet。 + +{{< codenew file="admin/konnectivity/konnectivity-server.yaml" >}} + + +在您的集群中部署 Konnectivity 代理: + +{{< codenew file="admin/konnectivity/konnectivity-agent.yaml" >}} + + +最后,如果您的集群开启了 RBAC,请创建相关的 RBAC 规则: + +{{< codenew file="admin/konnectivity/konnectivity-rbac.yaml" >}} diff --git a/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md index 02ce76b4abbb7..d6af5bf34b808 100644 --- a/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/zh/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -250,7 +250,7 @@ The generated ConfigMaps and Secrets have a content hash suffix appended. This e 所生成的 ConfigMap 和 Secret 都会包含内容哈希值后缀。 这是为了确保内容发生变化时,所生成的是新的 ConfigMap 或 Secret。 要禁止自动添加后缀的行为,用户可以使用 `generatorOptions`。 -除此以外,为生成的 ConfigMap 和 Secret 指定贯穿性选项也是可能的。 +除此以外,为生成的 ConfigMap 和 Secret 指定贯穿性选项也是可以的。 ```shell cat <./kustomization.yaml @@ -300,7 +300,7 @@ Here is an example: --> ### 设置贯穿性字段 {#setting-cross-cutting-fields} -在项目中未所有 Kubernetes 对象设置贯穿性字段是一种常见操作。 +在项目中为所有 Kubernetes 对象设置贯穿性字段是一种常见操作。 贯穿性字段的一些使用场景如下: * 为所有资源设置相同的名字空间 @@ -888,7 +888,7 @@ EOF This base can be used in multiple overlays. You can add different `namePrefix` or other cross-cutting fields in different overlays. Here are two overlays using the same base. --> -此基准可在多个覆盖中使用。你可以在不同的覆盖中添加不同送的 `namePrefix` 或 +此基准可在多个覆盖中使用。你可以在不同的覆盖中添加不同的 `namePrefix` 或 其他贯穿性字段。下面是两个使用同一基准的覆盖: ```shell @@ -1036,7 +1036,7 @@ deployment.apps "dev-my-nginx" deleted | 字段 | 类型 | 解释 | |-----------------------|--------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------| -| namespace | string | 为所有资源添加名字看 | +| namespace | string | 为所有资源添加名字空间 | | namePrefix | string | 此字段的值将被添加到所有资源名称前面 | | nameSuffix | string | 此字段的值将被添加到所有资源名称后面 | | commonLabels | map[string]string | 要添加到所有资源和选择算符的标签 | @@ -1046,8 +1046,8 @@ deployment.apps "dev-my-nginx" deleted | secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L106) | 列表中的每个条目都会生成一个 Secret | | generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L109) | 更改所有 ConfigMap 和 Secret 生成器的行为 | | bases | []string | 列表中每个条目都应能解析为一个包含 kustomization.yaml 文件的目录 | -| patchesStrategicMerge | []string | 列表中每个条目都赢能解析为某 Kubernetes 对象的策略性合并补丁 | -| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/patchjson6902.go#L8) | 列表中每个条目都赢能解析为一个 Kubernetes 对象和一个 JSON 补丁 | +| patchesStrategicMerge | []string | 列表中每个条目都能解析为某 Kubernetes 对象的策略性合并补丁 | +| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/patchjson6902.go#L8) | 列表中每个条目都能解析为一个 Kubernetes 对象和一个 JSON 补丁 | | vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L31) | 每个条目用来从某资源的字段来析取文字 | | images | [][Image](https://github.com/kubernetes-sigs/kustomize/tree/master/api/types/image.go#L23) | 每个条目都用来更改镜像的名称、标记与/或摘要,不必生成补丁 | | configurations | []string | 列表中每个条目都应能解析为一个包含 [Kustomize 转换器配置](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) 的文件 | diff --git a/content/zh/docs/tasks/tools/_index.md b/content/zh/docs/tasks/tools/_index.md index 922e13f0d3e4d..56e271dbe9212 100644 --- a/content/zh/docs/tasks/tools/_index.md +++ b/content/zh/docs/tasks/tools/_index.md @@ -2,4 +2,107 @@ title: "安装工具" weight: 10 description: 在你的计算机上设置 Kubernetes 工具。 +no_list: true --- + + + + +## kubectl + +Kubernetes 命令行工具,`kubectl`,使得你可以对 Kubernetes 集群运行命令。 +你可以使用 `kubectl` 来部署应用、监测和管理集群资源以及查看日志。 + +关于如何下载和安装 `kubectl` 并配置其访问你的集群,可参阅 +[安装和配置 `kubectl`](/zh/docs/tasks/tools/install-kubectl/)。 + + + +查看 kubectl 安装和配置指南 + + +你也可以阅读 [`kubectl` 参考文档](/zh/docs/reference/kubectl/). + + +## minikube + +[`minikube`](https://minikube.sigs.k8s.io/) 是一个工具,能让你在本地运行 Kubernetes。 +`minikube` 在你本地的个人计算机(包括 Windows、macOS 和 Linux PC)运行一个单节点的 +Kubernetes 集群,以便你来尝试 Kubernetes 或者开展每天的开发工作。 + +如果你关注如何安装此工具,可以按官方的 +[Get Started!](https://minikube.sigs.k8s.io/docs/start/)指南操作。 + + + +查看 minikube 快速入门指南 + + +当你拥有了可工作的 `minikube` 时,就可以用它来 +[运行示例应用](/zh/docs/tutorials/hello-minikube/)了。 + + +## kind + +与 `minikube` 类似,[`kind`](https://kind.sigs.k8s.io/docs/) 让你能够在本地计算机上 +运行 Kubernetes。与`minikube` 不同的是,`kind` 只能使用一种容器运行时: +它要求你安装并配置好 [Docker](https://docs.docker.com/get-docker/)。 + +[快速入门](https://kind.sigs.k8s.io/docs/user/quick-start/)页面提供了开始使用 +`kind` 所需要完成的操作。 + + +查看 kind 的快速入门指南 + + diff --git a/content/zh/docs/tasks/tools/install-minikube.md b/content/zh/docs/tasks/tools/install-minikube.md deleted file mode 100644 index 270524f109dbe..0000000000000 --- a/content/zh/docs/tasks/tools/install-minikube.md +++ /dev/null @@ -1,446 +0,0 @@ ---- -title: 安装 Minikube -content_type: task -weight: 20 -card: - name: tasks - weight: 10 ---- - - - - - - -本页面讲述如何安装 [Minikube](/zh/docs/tutorials/hello-minikube),该工具用于在你电脑中的虚拟机上运行一个单节点的 Kubernetes 集群。 - -## {{% heading "prerequisites" %}} - -{{< tabs name="minikube_before_you_begin" >}} -{{% tab name="Linux" %}} - - - -若要检查你的 Linux 是否支持虚拟化技术,请运行下面的命令并验证输出结果是否不为空: - -``` -grep -E --color 'vmx|svm' /proc/cpuinfo -``` - -{{% /tab %}} - -{{% tab name="macOS" %}} - - -若要检查你的 macOS 是否支持虚拟化技术,请运行下面的命令: - -``` -sysctl -a | grep -E --color 'machdep.cpu.features|VMX' -``` - - -如果你在输出结果中看到了 `VMX` (应该会高亮显示)的字眼,说明你的电脑已启用 VT-x 特性。 - -{{% /tab %}} - -{{% tab name="Windows" %}} - -若要检查你的 Windows8 及以上的系统是否支持虚拟化技术,请终端或者 cmd 中运行以下命令: - -``` -systeminfo -``` - -如果你看到下面的输出,则表示该 Windows 支持虚拟化技术。 - -``` -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - - -如果你看到下面的输出,则表示你的操作系统已经安装了 Hypervisor,你可以跳过安装 Hypervisor 的步骤。 -``` -Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed. -``` - -{{% /tab %}} -{{< /tabs >}} - - - - -## 安装 minikube - -{{< tabs name="tab_with_md" >}} -{{% tab name="Linux" %}} - - -### 安装 kubectl - -请确保你已正确安装 kubectl。你可以根据[安装并设置 kubectl](/zh/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux) -的说明来安装 kubectl。 - - - -### 安装 Hypervisor - -如果还没有装过 hypervisor,请选择以下方式之一进行安装: - - - -- [KVM](https://www.linux-kvm.org/),KVM 也使用了 QEMU -- [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - - -Minikube 还支持使用一个 `--vm-driver=none` 选项,让 Kubernetes 组件运行在主机上,而不是在 VM 中。 -使用这种驱动方式需要 [Docker](https://www.docker.com/products/docker-desktop) 和 Linux 环境,但不需要 hypervisor。 - -如果你在 Debian 系的 OS 中使用了 `none` 这种驱动方式,请使用 `.deb` 包安装 Docker,不要使用 snap 包的方式,Minikube 不支持这种方式。 -你可以从 [Docker](https://www.docker.com/products/docker-desktop) 下载 `.deb` 包。 - - -{{< caution >}} -`none` VM 驱动方式存在导致安全和数据丢失的问题。 -使用 `--vm-driver=none` 之前,请参考[这个文档](https://minikube.sigs.k8s.io/docs/reference/drivers/none/)获取详细信息。 -{{< /caution >}} - - -Minikube 还支持另外一个类似于 Docker 驱动的方式 `vm-driver=podman`。 -使用超级用户权限(root 用户)运行 Podman 可以最好的确保容器具有足够的权限使用 -你的操作系统上的所有特性。 - - -{{< caution >}} -`Podman` 驱动需要以 root 用户身份运行容器,因为普通用户帐户没有足够的权限 -使用容器运行可能需要的操作系统上的所有特性。 -{{< /caution >}} - - -### 使用包安装 Minikube - -Minikube 有 *实验性* 的安装包。你可以在 Minikube 在 GitHub 上的[发行版本](https://github.com/kubernetes/minikube/releases) 找到 Linux (AMD64) 的包。 - -根据你的 Linux 发行版选择安装合适的包。 - - -### 直接下载并安装 Minikube - -如果你不想通过包安装,你也可以下载并使用一个单节点二进制文件。 - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ - && chmod +x minikube -``` - - -将 Minikube 可执行文件添加至 PATH: - -```shell -sudo mkdir -p /usr/local/bin/ -sudo install minikube /usr/local/bin/ -``` - - -### 使用 Homebrew 安装 Minikube - -你还可以使用 Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) 安装 Minikube: - -```shell -brew install minikube -``` - -{{% /tab %}} - -{{% tab name="macOS" %}} - - -### 安装 kubectl - -请确保你已正确安装 kubectl。你可以根据[安装并设置 kubectl](/zh/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux) -的说明来安装 kubectl。 - - -### 安装 Hypervisor - -如果你还没有安装 hypervisor,请选择以下方式之一进行安装: - -• [HyperKit](https://github.com/moby/hyperkit) -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) -• [VMware Fusion](https://www.vmware.com/products/fusion) - - -### 安装 Minikube - -macOS 安装 Minikube 最简单的方法是使用 [Homebrew](https://brew.sh): - -```shell -brew install minikube -``` - - -你也可以通过下载独立的可执行文件进行安装: - -```shell -curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 \ - && chmod +x minikube -``` - - -下面是一个简单的将 Minikube 可执行文件添加至 PATH 的方法: - -```shell -sudo mv minikube /usr/local/bin -``` - -{{% /tab %}} -{{% tab name="Windows" %}} - - -### 安装 kubectl - -请确保你已正确安装 kubectl。你可以根据[安装并设置 kubectl](/zh/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows) -的说明来安装 kubectl。 - - -### 安装 Hypervisor - -如果你还没有安装 hypervisor,请选择以下方式之一进行安装: - -• [Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) -• [VirtualBox](https://www.virtualbox.org/wiki/Downloads) - - -{{< note >}} -Hyper-V 可以运行在三个版本的 Windows 10 上:企业版、专业版和教育版(Enterprise, Professional, Education)。 -{{< /note >}} - - -### 使用 Chocolatey 安装 Minikube - -Windows 安装 Minikube 最简单的方法是使用 [Chocolatey](https://chocolatey.org/) (以管理员身份运行): - -```shell -choco install minikube -``` - - -完成 Minikube 的安装后,关闭当前 CLI 界面再重新打开。 -Minikube 应该已经自动添加至 path 中。 - - - -### 使用安装程序安装 Minikube - -在 Windows 上使用 [Windows Installer](https://docs.microsoft.com/en-us/windows/desktop/msi/windows-installer-portal) 手动安装 Minikube,下载并运行 [`minikube-installer.exe`](https://github.com/kubernetes/minikube/releases/latest/download/minikube-installer.exe) 即可。 - - -### 直接下载并安装 Minikube - -想在 Windows 上手动安装 Minikube,下载 [`minikube-windows-amd64`](https://github.com/kubernetes/minikube/releases/latest) 并将其重命名为 `minikube.exe`,然后将其添加至 path 即可。 - -{{% /tab %}} -{{< /tabs >}} - - -## 安装确认 - -要确认 hypervisor 和 Minikube 均已成功安装,可以运行以下命令来启动本地 Kubernetes 集群: - - -{{< note >}} -若要为 `minikube start` 设置 `--vm-driver`,在下面提到 `<驱动名称>` 的地方, -用小写字母输入你安装的 hypervisor 的名称。 -[指定 VM 驱动程序](/zh/docs/setup/learning-environment/minikube/#specifying-the-vm-driver) -列举了 `--vm-driver` 值的完整列表。 -{{< /note >}} - -{{< note >}} -由于国内无法直接连接 k8s.gcr.io,推荐使用阿里云镜像仓库,在 `minikube start` 中添加 `--image-repository` 参数。 -{{< /note >}} - -```shell -minikube start --vm-driver=<驱动名称> -# 或者在需要时 -minikube start --vm-driver=<驱动名称> --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers -``` - - -一旦 `minikube start` 完成,你可以运行下面的命令来检查集群的状态: - -```shell -minikube status -``` - - -如果你的集群正在运行,`minikube status` 的输出结果应该类似于这样: - -``` -host: Running -kubelet: Running -apiserver: Running -kubeconfig: Configured -``` - - -在确认 Minikube 与 hypervisor 均正常工作后,你可以继续使用 Minikube 或停止集群。要停止集群,请运行: - -```shell -minikube stop -``` - - -## 清理本地状态 {#cleanup-local-state} - -如果你之前安装过 Minikube,并运行了: - -```shell -minikube start -``` - - -并且 `minikube start` 返回了一个错误: - -``` -machine does not exist -``` - - -那么,你需要清理 minikube 的本地状态: - -```shell -minikube delete -``` - -## {{% heading "whatsnext" %}} - - - -* [使用 Minikube 在本地运行 Kubernetes](/zh/docs/setup/learning-environment/minikube/) - diff --git a/content/zh/docs/tutorials/_index.md b/content/zh/docs/tutorials/_index.md index b4151e7e2204e..f283d60ea60df 100644 --- a/content/zh/docs/tutorials/_index.md +++ b/content/zh/docs/tutorials/_index.md @@ -1,25 +1,20 @@ --- title: 教程 main_menu: true +no_list: true weight: 60 content_type: concept --- - -Kubernetes 文档的这一部分包含教程。一个教程展示了如何完成一个比单个[任务](/zh/docs/tasks/)更大的目标。 -通常一个教程有几个部分,每个部分都有一系列步骤。在浏览每个教程之前, -您可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。 - - +Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成一个比单个 +[任务](/zh/docs/tasks/)更大的目标。 +通常一个教程有几个部分,每个部分都有一系列步骤。在浏览每个教程之前, +您可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。 - -## 基础知识 - - -* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/)是一个深入的交互式教程,帮助您理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。 - -* [介绍 Kubernetes (edx)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - - +## 基础知识 -* [你好 Minikube](/zh/docs/tutorials/hello-minikube/) +* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/)是一个深入的 + 交互式教程,帮助您理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。 - +* [介绍 Kubernetes (edx)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) -## 配置 +* [你好 Minikube](/zh/docs/tutorials/hello-minikube/) - -* [使用一个 ConfigMap 配置 Redis](/zh/docs/tutorials/configuration/configure-redis-using-configmap/) - +## 配置 -## 无状态应用程序 +* [使用一个 ConfigMap 配置 Redis](/zh/docs/tutorials/configuration/configure-redis-using-configmap/) - -* [公开外部 IP 地址访问集群中的应用程序](/zh/docs/tutorials/stateless-application/expose-external-ip-address/) - -* [示例:使用 Redis 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/) - - +## 无状态应用程序 -## 有状态应用程序 +* [公开外部 IP 地址访问集群中的应用程序](/zh/docs/tutorials/stateless-application/expose-external-ip-address/) - +* [示例:使用 Redis 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/) -* [StatefulSet 基础](/zh/docs/tutorials/stateful-application/basic-stateful-set/) +## Stateful Applications -* [示例:WordPress 和 MySQL 使用持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) +* [StatefulSet Basics](/docs/tutorials/stateful-application/basic-stateful-set/) - - -* [示例:使用有状态集部署 Cassandra](/zh/docs/tutorials/stateful-application/cassandra/) - - -* [运行 ZooKeeper,CP 分布式系统](/zh/docs/tutorials/stateful-application/zookeeper/) - +## 有状态应用程序 -## 集群 +* [StatefulSet 基础](/zh/docs/tutorials/stateful-application/basic-stateful-set/) -* [AppArmor](/zh/docs/tutorials/clusters/apparmor/) +* [示例:WordPress 和 MySQL 使用持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) + +* [示例:使用有状态集部署 Cassandra](/zh/docs/tutorials/stateful-application/cassandra/) + +* [运行 ZooKeeper,CP 分布式系统](/zh/docs/tutorials/stateful-application/zookeeper/) +## 集群 -## 服务 - -* [使用源 IP](/zh/docs/tutorials/services/source-ip/) +* [AppArmor](/zh/docs/tutorials/clusters/apparmor/) +## 服务 - +* [使用源 IP](/zh/docs/tutorials/services/source-ip/) ## {{% heading "whatsnext" %}} - -如果您想编写教程,请参阅[使用页面模板](/zh/docs/home/contribute/page-templates/) -以获取有关教程页面类型和教程模板的信息。 - - +如果您想编写教程,请参阅[内容页面类型](/zh/docs/contribute/style/page-content-types/) +以获取有关教程页面类型的信息。 diff --git a/content/zh/docs/tutorials/hello-minikube.md b/content/zh/docs/tutorials/hello-minikube.md index 9b9fa099997a7..7a0853348d432 100644 --- a/content/zh/docs/tutorials/hello-minikube.md +++ b/content/zh/docs/tutorials/hello-minikube.md @@ -1,5 +1,5 @@ --- -title: 你好 Minikube +title: 你好,Minikube content_type: tutorial weight: 5 menu: @@ -7,13 +7,12 @@ menu: title: "Get Started" weight: 10 post: > -

Ready to get your hands dirty? Build a simple Kubernetes cluster that runs "Hello World" for Node.js.

+

准备好动手操作了么?构建一个简单的 Kubernetes 集群来运行示例应用。

card: name: tutorials weight: 10 --- -本教程向您展示如何使用 [Minikube](/zh/docs/setup/learning-environment/minikube) 和 Katacoda 在 Kubernetes 上运行一个简单的 “Hello World” Node.js 应用程序。Katacoda 提供免费的浏览器内 Kubernetes 环境。 +本教程向你展示如何使用 [Minikube](/zh/docs/setup/learning-environment/minikube) 和 Katacoda +在 Kubernetes 上运行一个应用示例。Katacoda 提供免费的浏览器内 Kubernetes 环境。 -{{< note >}} -如果您已在本地安装 [Minikube](/zh/docs/tasks/tools/install-minikube/),也可以按照本教程操作。 - +{{< note >}} +如果你已在本地安装 [Minikube](/zh/docs/tasks/tools/install-minikube/),也可以按照本教程操作。 {{< /note >}} - ## {{% heading "objectives" %}} - -* 将 "Hello World" 应用程序部署到 Minikube。 +* 将一个示例应用部署到 Minikube。 * 运行应用程序。 * 查看应用日志 - - ## {{% heading "prerequisites" %}} -本教程提供了从以下文件构建的容器镜像: - -{{< codenew language="js" file="minikube/server.js" >}} - -{{< codenew language="conf" file="minikube/Dockerfile" >}} - - -有关 `docker build` 命令的更多信息,请参阅 [Docker 文档](https://docs.docker.com/engine/reference/commandline/build/)。 - - +本教程提供了容器镜像,使用 NGINX 来对所有请求做出回应: @@ -92,25 +75,27 @@ For more information on the `docker build` command, read the [Docker documentati 1. 点击 **启动终端** - {{< kat-button >}} + {{< kat-button >}} - {{< note >}}If you installed Minikube locally, run `minikube start`.{{< /note >}} + + {{< note >}} + 如果你在本地安装了 Minikube,运行 `minikube start`。 + {{< /note >}} -2. 在浏览器中打开 Kubernetes dashboard: +2. 在浏览器中打开 Kubernetes 仪表板(Dashboard): - ```shell - minikube dashboard - ``` + ```shell + minikube dashboard + ``` - 3. 仅限 Katacoda 环境:在终端窗口的顶部,单击加号,然后单击 **选择要在主机 1 上查看的端口**。 4. 仅限 Katacoda 环境:输入“30000”,然后单击 **显示端口**。 @@ -129,18 +114,22 @@ recommended way to manage the creation and scaling of Pods. ## 创建 Deployment -Kubernetes [*Pod*](/zh/docs/concepts/workloads/pods/pod/) 是由一个或多个为了管理和联网而绑定在一起的容器构成的组。本教程中的 Pod 只有一个容器。Kubernetes [*Deployment*](/zh/docs/concepts/workloads/controllers/deployment/) 检查 Pod 的健康状况,并在 Pod 中的容器终止的情况下重新启动新的容器。Deployment 是管理 Pod 创建和扩展的推荐方法。 +Kubernetes [*Pod*](/zh/docs/concepts/workloads/pods/pod/) 是由一个或多个 +为了管理和联网而绑定在一起的容器构成的组。 本教程中的 Pod 只有一个容器。 +Kubernetes [*Deployment*](/zh/docs/concepts/workloads/controllers/deployment/) +检查 Pod 的健康状况,并在 Pod 中的容器终止的情况下重新启动新的容器。 +Deployment 是管理 Pod 创建和扩展的推荐方法。 +1. 使用 `kubectl create` 命令创建管理 Pod 的 Deployment。该 Pod 根据提供的 Docker + 镜像运行 Container。 -1. 使用 `kubectl create` 命令创建管理 Pod 的 Deployment。该 Pod 根据提供的 Docker 镜像运行 Container。 - - ```shell - kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4 - ``` + ```shell + kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4 + ``` + - 输出结果类似于这样: + 输出结果类似于这样: - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - hello-node 1/1 1 1 1m - ``` + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + hello-node 1/1 1 1 1m + ``` - 3. 查看 Pod: - ```shell - kubectl get pods - ``` + ```shell + kubectl get pods + ``` - + - 输出结果类似于这样: + 输出结果类似于这样: - ``` - NAME READY STATUS RESTARTS AGE - hello-node-5f76cf6ccf-br9b5 1/1 Running 0 1m - ``` + ``` + NAME READY STATUS RESTARTS AGE + hello-node-5f76cf6ccf-br9b5 1/1 Running 0 1m + ``` - 4. 查看集群事件: - ```shell - kubectl get events - ``` + ```shell + kubectl get events + ``` - 5. 查看 `kubectl` 配置: - ```shell - kubectl config view - ``` + ```shell + kubectl config view + ``` - {{< note >}}有关 kubectl 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/reference/kubectl/overview/)。{{< /note >}} +{{< note >}} +有关 kubectl 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/user-guide/kubectl-overview/)。 +{{< /note >}} - ## 创建 Service -默认情况下,Pod 只能通过 Kubernetes 集群中的内部 IP 地址访问。要使得 `hello-node` 容器可以从 Kubernetes 虚拟网络的外部访问,您必须将 Pod 暴露为 Kubernetes [*Service*](/zh/docs/concepts/services-networking/service/)。 +默认情况下,Pod 只能通过 Kubernetes 集群中的内部 IP 地址访问。 +要使得 `hello-node` 容器可以从 Kubernetes 虚拟网络的外部访问,你必须将 Pod +暴露为 Kubernetes [*Service*](/zh/docs/concepts/services-networking/service/)。 - 1. 使用 `kubectl expose` 命令将 Pod 暴露给公网: - ```shell - kubectl expose deployment hello-node --type=LoadBalancer --port=8080 - ``` + ```shell + kubectl expose deployment hello-node --type=LoadBalancer --port=8080 + ``` - The `--type=LoadBalancer` flag indicates that you want to expose your Service - outside of the cluster. + + 这里的 `--type=LoadBalancer` 标志表明你希望将你的 Service 暴露到集群外部。 +2. 查看你刚刚创建的 Service: -2. 查看您刚刚创建的服务: + ```shell + kubectl get services + ``` - ```shell - kubectl get services - ``` + - + 输出结果类似于这样: - 输出结果类似于这样: + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + hello-node LoadBalancer 10.108.144.78 8080:30369/TCP 21s + kubernetes ClusterIP 10.96.0.1 443/TCP 23m + ``` - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - hello-node LoadBalancer 10.108.144.78 8080:30369/TCP 21s - kubernetes ClusterIP 10.96.0.1 443/TCP 23m - ``` - - - 在支持负载均衡器的云服务提供商上,将提供一个外部 IP 来访问该服务。在 Minikube 上,`LoadBalancer` 使得服务可以通过命令 `minikube service` 访问。 + + 对于支持负载均衡器的云服务平台而言,平台将提供一个外部 IP 来访问该服务。 + 在 Minikube 上,`LoadBalancer` 使得服务可以通过命令 `minikube service` 访问。 3. 运行下面的命令: - ```shell - minikube service hello-node - ``` + ```shell + minikube service hello-node + ``` -5. 仅限 Katacoda 环境:请注意在 service 输出中与 `8080` 对应的长度为 5 位的端口号。此端口号是随机生成的,可能与您不同。在端口号文本框中输入您自己的端口号,然后单击显示端口。如果是上面那个例子,就需要输入 `30369`。 +5. 仅限 Katacoda 环境:请注意在 service 输出中与 `8080` 对应的长度为 5 位的端口号。 + 此端口号是随机生成的,可能与你的不同。 + 在端口号文本框中输入你自己的端口号,然后单击显示端口。 + 对应于上面的例子,需要输入 `30369`。 - 这将打开一个浏览器窗口,为您的应用程序提供服务并显示 “Hello World” 消息。 + 这将打开一个浏览器窗口,为你的应用程序提供服务并显示应用的响应。 ## 启用插件 -Minikube 有一组内置的 {{< glossary_tooltip text="插件" term_id="addons" >}},可以在本地 Kubernetes 环境中启用、禁用和打开。 +Minikube 有一组内置的 {{< glossary_tooltip text="插件" term_id="addons" >}}, +可以在本地 Kubernetes 环境中启用、禁用和打开。 1. 列出当前支持的插件: - ```shell - minikube addons list - ``` - - - - 输出结果类似于这样: - - ``` - addon-manager: enabled - dashboard: enabled - default-storageclass: enabled - efk: disabled - freshpod: disabled - gvisor: disabled - helm-tiller: disabled - ingress: disabled - ingress-dns: disabled - logviewer: disabled - metrics-server: disabled - nvidia-driver-installer: disabled - nvidia-gpu-device-plugin: disabled - registry: disabled - registry-creds: disabled - storage-provisioner: enabled - storage-provisioner-gluster: disabled - ``` + ```shell + minikube addons list + ``` + + + 输出结果类似于这样: + + ``` + addon-manager: enabled + dashboard: enabled + default-storageclass: enabled + efk: disabled + freshpod: disabled + gvisor: disabled + helm-tiller: disabled + ingress: disabled + ingress-dns: disabled + logviewer: disabled + metrics-server: disabled + nvidia-driver-installer: disabled + nvidia-gpu-device-plugin: disabled + registry: disabled + registry-creds: disabled + storage-provisioner: enabled + storage-provisioner-gluster: disabled + ``` - 2. 启用插件,例如 `metrics-server`: - ```shell - minikube addons enable metrics-server - ``` + ```shell + minikube addons enable metrics-server + ``` - + - 输出结果类似于这样: + 输出结果类似于这样: - ``` - metrics-server was successfully enabled - ``` + ``` + metrics-server was successfully enabled + ``` - 3. 查看刚才创建的 Pod 和 Service: - ```shell - kubectl get pod,svc -n kube-system - ``` - - - - 输出结果类似于这样: - - ``` - NAME READY STATUS RESTARTS AGE - pod/coredns-5644d7b6d9-mh9ll 1/1 Running 0 34m - pod/coredns-5644d7b6d9-pqd2t 1/1 Running 0 34m - pod/metrics-server-67fb648c5 1/1 Running 0 26s - pod/etcd-minikube 1/1 Running 0 34m - pod/influxdb-grafana-b29w8 2/2 Running 0 26s - pod/kube-addon-manager-minikube 1/1 Running 0 34m - pod/kube-apiserver-minikube 1/1 Running 0 34m - pod/kube-controller-manager-minikube 1/1 Running 0 34m - pod/kube-proxy-rnlps 1/1 Running 0 34m - pod/kube-scheduler-minikube 1/1 Running 0 34m - pod/storage-provisioner 1/1 Running 0 34m - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/metrics-server ClusterIP 10.96.241.45 80/TCP 26s - service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 34m - service/monitoring-grafana NodePort 10.99.24.54 80:30002/TCP 26s - service/monitoring-influxdb ClusterIP 10.111.169.94 8083/TCP,8086/TCP 26s - ``` + ```shell + kubectl get pod,svc -n kube-system + ``` + + + + 输出结果类似于这样: + + ``` + NAME READY STATUS RESTARTS AGE + pod/coredns-5644d7b6d9-mh9ll 1/1 Running 0 34m + pod/coredns-5644d7b6d9-pqd2t 1/1 Running 0 34m + pod/metrics-server-67fb648c5 1/1 Running 0 26s + pod/etcd-minikube 1/1 Running 0 34m + pod/influxdb-grafana-b29w8 2/2 Running 0 26s + pod/kube-addon-manager-minikube 1/1 Running 0 34m + pod/kube-apiserver-minikube 1/1 Running 0 34m + pod/kube-controller-manager-minikube 1/1 Running 0 34m + pod/kube-proxy-rnlps 1/1 Running 0 34m + pod/kube-scheduler-minikube 1/1 Running 0 34m + pod/storage-provisioner 1/1 Running 0 34m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/metrics-server ClusterIP 10.96.241.45 80/TCP 26s + service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 34m + service/monitoring-grafana NodePort 10.99.24.54 80:30002/TCP 26s + service/monitoring-influxdb ClusterIP 10.111.169.94 8083/TCP,8086/TCP 26s + ``` - 4. 禁用 `metrics-server`: + ```shell + minikube addons disable metrics-server + ``` - ```shell - minikube addons disable metrics-server - ``` - - + - 输出结果类似于这样: + 输出结果类似于这样: - ``` - metrics-server was successfully disabled - ``` + ``` + metrics-server was successfully disabled + ``` - ## 清理 -现在可以清理您在集群中创建的资源: +现在可以清理你在集群中创建的资源: ```shell kubectl delete service hello-node @@ -427,8 +417,7 @@ kubectl delete deployment hello-node - -可选的,停止 Minikube 虚拟机(VM): +可选地,停止 Minikube 虚拟机(VM): ```shell minikube stop @@ -437,25 +426,20 @@ minikube stop - -可选的,删除 Minikube 虚拟机(VM): +可选地,删除 Minikube 虚拟机(VM): ```shell minikube delete ``` - - ## {{% heading "whatsnext" %}} - * 进一步了解 [Deployment 对象](/zh/docs/concepts/workloads/controllers/deployment/)。 -* 学习更多关于 [部署应用](/zh/docs/tasks/run-application/run-stateless-application-deployment/)。 -* 学习更多关于 [Service 对象](/zh/docs/concepts/services-networking/service/)。 - +* 进一步了解[部署应用](/zh/docs/tasks/run-application/run-stateless-application-deployment/)。 +* 进一步了解 [Service 对象](/zh/docs/concepts/services-networking/service/)。 diff --git a/content/zh/examples/admin/konnectivity/egress-selector-configuration.yaml b/content/zh/examples/admin/konnectivity/egress-selector-configuration.yaml new file mode 100644 index 0000000000000..6659ff3fbb4fb --- /dev/null +++ b/content/zh/examples/admin/konnectivity/egress-selector-configuration.yaml @@ -0,0 +1,21 @@ +apiVersion: apiserver.k8s.io/v1beta1 +kind: EgressSelectorConfiguration +egressSelections: +# Since we want to control the egress traffic to the cluster, we use the +# "cluster" as the name. Other supported values are "etcd", and "master". +- name: cluster + connection: + # This controls the protocol between the API Server and the Konnectivity + # server. Supported values are "GRPC" and "HTTPConnect". There is no + # end user visible difference between the two modes. You need to set the + # Konnectivity server to work in the same mode. + proxyProtocol: GRPC + transport: + # This controls what transport the API Server uses to communicate with the + # Konnectivity server. UDS is recommended if the Konnectivity server + # locates on the same machine as the API Server. You need to configure the + # Konnectivity server to listen on the same UDS socket. + # The other supported transport is "tcp". You will need to set up TLS + # config to secure the TCP transport. + uds: + udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket diff --git a/content/zh/examples/admin/konnectivity/konnectivity-agent.yaml b/content/zh/examples/admin/konnectivity/konnectivity-agent.yaml new file mode 100644 index 0000000000000..c3dc71040b9c8 --- /dev/null +++ b/content/zh/examples/admin/konnectivity/konnectivity-agent.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +# Alternatively, you can deploy the agents as Deployments. It is not necessary +# to have an agent on each node. +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: konnectivity-agent + namespace: kube-system + name: konnectivity-agent +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.8 + name: konnectivity-agent + command: ["/proxy-agent"] + args: [ + "--logtostderr=true", + "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + # Since the konnectivity server runs with hostNetwork=true, + # this is the IP address of the master machine. + "--proxy-server-host=35.225.206.7", + "--proxy-server-port=8132", + "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token" + ] + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + livenessProbe: + httpGet: + port: 8093 + path: /healthz + initialDelaySeconds: 15 + timeoutSeconds: 15 + serviceAccountName: konnectivity-agent + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + path: konnectivity-agent-token + audience: system:konnectivity-server diff --git a/content/zh/examples/admin/konnectivity/konnectivity-rbac.yaml b/content/zh/examples/admin/konnectivity/konnectivity-rbac.yaml new file mode 100644 index 0000000000000..7687f49b77e82 --- /dev/null +++ b/content/zh/examples/admin/konnectivity/konnectivity-rbac.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:konnectivity-server + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: konnectivity-agent + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/content/zh/examples/admin/konnectivity/konnectivity-server.yaml b/content/zh/examples/admin/konnectivity/konnectivity-server.yaml new file mode 100644 index 0000000000000..730c26c66a801 --- /dev/null +++ b/content/zh/examples/admin/konnectivity/konnectivity-server.yaml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: Pod +metadata: + name: konnectivity-server + namespace: kube-system +spec: + priorityClassName: system-cluster-critical + hostNetwork: true + containers: + - name: konnectivity-server-container + image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-server:v0.0.8 + command: ["/proxy-server"] + args: [ + "--log-file=/var/log/konnectivity-server.log", + "--logtostderr=false", + "--log-file-max-size=0", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket", + # The following two lines assume the Konnectivity server is + # deployed on the same machine as the apiserver, and the certs and + # key of the API Server are at the specified location. + "--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt", + "--cluster-key=/etc/srv/kubernetes/pki/apiserver.key", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--mode=grpc", + "--server-port=0", + "--agent-port=8132", + "--admin-port=8133", + "--agent-namespace=kube-system", + "--agent-service-account=konnectivity-agent", + "--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig", + "--authentication-audience=system:konnectivity-server" + ] + livenessProbe: + httpGet: + scheme: HTTP + host: 127.0.0.1 + port: 8133 + path: /healthz + initialDelaySeconds: 30 + timeoutSeconds: 60 + ports: + - name: agentport + containerPort: 8132 + hostPort: 8132 + - name: adminport + containerPort: 8133 + hostPort: 8133 + volumeMounts: + - name: varlogkonnectivityserver + mountPath: /var/log/konnectivity-server.log + readOnly: false + - name: pki + mountPath: /etc/srv/kubernetes/pki + readOnly: true + - name: konnectivity-uds + mountPath: /etc/srv/kubernetes/konnectivity-server + readOnly: false + volumes: + - name: varlogkonnectivityserver + hostPath: + path: /var/log/konnectivity-server.log + type: FileOrCreate + - name: pki + hostPath: + path: /etc/srv/kubernetes/pki + - name: konnectivity-uds + hostPath: + path: /etc/srv/kubernetes/konnectivity-server + type: DirectoryOrCreate diff --git a/content/zh/docs/tasks/administer-cluster/memory-available.sh b/content/zh/examples/admin/resource/memory-available.sh similarity index 100% rename from content/zh/docs/tasks/administer-cluster/memory-available.sh rename to content/zh/examples/admin/resource/memory-available.sh diff --git a/content/zh/examples/federation/policy-engine-deployment.yaml b/content/zh/examples/federation/policy-engine-deployment.yaml deleted file mode 100644 index 168af7ba4cf0f..0000000000000 --- a/content/zh/examples/federation/policy-engine-deployment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: opa - name: opa - namespace: federation-system -spec: - replicas: 1 - selector: - matchLabels: - app: opa - template: - metadata: - labels: - app: opa - name: opa - spec: - containers: - - name: opa - image: openpolicyagent/opa:0.4.10 - args: - - "run" - - "--server" - - name: kube-mgmt - image: openpolicyagent/kube-mgmt:0.2 - args: - - "-kubeconfig=/srv/kubernetes/kubeconfig" - - "-cluster=federation/v1beta1/clusters" - volumeMounts: - - name: federation-kubeconfig - mountPath: /srv/kubernetes - readOnly: true - volumes: - - name: federation-kubeconfig - secret: - secretName: federation-controller-manager-kubeconfig diff --git a/content/zh/examples/federation/policy-engine-service.yaml b/content/zh/examples/federation/policy-engine-service.yaml deleted file mode 100644 index 287a972d64ee8..0000000000000 --- a/content/zh/examples/federation/policy-engine-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: opa - namespace: federation-system -spec: - selector: - app: opa - ports: - - name: http - protocol: TCP - port: 8181 - targetPort: 8181 \ No newline at end of file diff --git a/content/zh/examples/federation/replicaset-example-policy.yaml b/content/zh/examples/federation/replicaset-example-policy.yaml deleted file mode 100644 index 43dc83b18b200..0000000000000 --- a/content/zh/examples/federation/replicaset-example-policy.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - labels: - app: nginx-pci - name: nginx-pci - annotations: - requires-pci: "true" -spec: - replicas: 3 - selector: - matchLabels: - app: nginx-pci - template: - metadata: - labels: - app: nginx-pci - spec: - containers: - - image: nginx - name: nginx-pci diff --git a/content/zh/examples/federation/scheduling-policy-admission.yaml b/content/zh/examples/federation/scheduling-policy-admission.yaml deleted file mode 100644 index a164722425555..0000000000000 --- a/content/zh/examples/federation/scheduling-policy-admission.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: admission - namespace: federation-system -data: - config.yml: | - apiVersion: apiserver.k8s.io/v1alpha1 - kind: AdmissionConfiguration - plugins: - - name: SchedulingPolicy - path: /etc/kubernetes/admission/scheduling-policy-config.yml - scheduling-policy-config.yml: | - kubeconfig: /etc/kubernetes/admission/opa-kubeconfig - opa-kubeconfig: | - clusters: - - name: opa-api - cluster: - server: http://opa.federation-system.svc.cluster.local:8181/v0/data/kubernetes/placement - users: - - name: scheduling-policy - user: - token: deadbeefsecret - contexts: - - name: default - context: - cluster: opa-api - user: scheduling-policy - current-context: default diff --git a/content/zh/examples/minikube/Dockerfile b/content/zh/examples/minikube/Dockerfile deleted file mode 100644 index dd58cb7e7547e..0000000000000 --- a/content/zh/examples/minikube/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM node:6.14.2 -EXPOSE 8080 -COPY server.js . -CMD [ "node", "server.js" ] diff --git a/content/zh/examples/minikube/server.js b/content/zh/examples/minikube/server.js deleted file mode 100644 index 76345a17d81db..0000000000000 --- a/content/zh/examples/minikube/server.js +++ /dev/null @@ -1,9 +0,0 @@ -var http = require('http'); - -var handleRequest = function(request, response) { - console.log('Received request for URL: ' + request.url); - response.writeHead(200); - response.end('Hello World!'); -}; -var www = http.createServer(handleRequest); -www.listen(8080); diff --git a/content/zh/examples/service/networking/example-ingress.yaml b/content/zh/examples/service/networking/example-ingress.yaml new file mode 100644 index 0000000000000..b309d13275105 --- /dev/null +++ b/content/zh/examples/service/networking/example-ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: hello-world.info + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: web + port: + number: 8080 \ No newline at end of file diff --git a/content/zh/examples/service/networking/external-lb.yaml b/content/zh/examples/service/networking/external-lb.yaml new file mode 100644 index 0000000000000..adcf7a2fd0a53 --- /dev/null +++ b/content/zh/examples/service/networking/external-lb.yaml @@ -0,0 +1,10 @@ +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: external-lb +spec: + controller: example.com/ingress-controller + parameters: + apiGroup: k8s.example.com + kind: IngressParameters + name: external-lb diff --git a/content/zh/examples/service/networking/ingress-resource-backend.yaml b/content/zh/examples/service/networking/ingress-resource-backend.yaml new file mode 100644 index 0000000000000..87b6bbd0f3757 --- /dev/null +++ b/content/zh/examples/service/networking/ingress-resource-backend.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-resource-backend +spec: + defaultBackend: + resource: + apiGroup: k8s.example.com + kind: StorageBucket + name: static-assets + rules: + - http: + paths: + - path: /icons + pathType: ImplementationSpecific + backend: + resource: + apiGroup: k8s.example.com + kind: StorageBucket + name: icon-assets diff --git a/content/zh/examples/service/networking/ingress-wildcard-host.yaml b/content/zh/examples/service/networking/ingress-wildcard-host.yaml new file mode 100644 index 0000000000000..2be7016706cee --- /dev/null +++ b/content/zh/examples/service/networking/ingress-wildcard-host.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-wildcard-host +spec: + rules: + - host: "foo.bar.com" + http: + paths: + - pathType: Prefix + path: "/bar" + backend: + service: + name: service1 + port: + number: 80 + - host: "*.foo.com" + http: + paths: + - pathType: Prefix + path: "/foo" + backend: + service: + name: service2 + port: + number: 80 diff --git a/content/zh/examples/service/networking/ingress.yaml b/content/zh/examples/service/networking/ingress.yaml deleted file mode 100644 index 56a0d5138f4e4..0000000000000 --- a/content/zh/examples/service/networking/ingress.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: test-ingress -spec: - backend: - serviceName: testsvc - servicePort: 80 - diff --git a/content/zh/examples/service/networking/minimal-ingress.yaml b/content/zh/examples/service/networking/minimal-ingress.yaml new file mode 100644 index 0000000000000..76640b9447fbd --- /dev/null +++ b/content/zh/examples/service/networking/minimal-ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: minimal-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + rules: + - http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 diff --git a/content/zh/examples/service/networking/name-virtual-host-ingress-no-third-host.yaml b/content/zh/examples/service/networking/name-virtual-host-ingress-no-third-host.yaml new file mode 100644 index 0000000000000..16a560b1ff287 --- /dev/null +++ b/content/zh/examples/service/networking/name-virtual-host-ingress-no-third-host.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: name-virtual-host-ingress-no-third-host +spec: + rules: + - host: first.bar.com + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: service1 + port: + number: 80 + - host: second.bar.com + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: service2 + port: + number: 80 + - http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: service3 + port: + number: 80 diff --git a/content/zh/examples/service/networking/name-virtual-host-ingress.yaml b/content/zh/examples/service/networking/name-virtual-host-ingress.yaml new file mode 100644 index 0000000000000..213a73d261250 --- /dev/null +++ b/content/zh/examples/service/networking/name-virtual-host-ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: name-virtual-host-ingress +spec: + rules: + - host: foo.bar.com + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: service1 + port: + number: 80 + - host: bar.foo.com + http: + paths: + - pathType: Prefix + path: "/" + backend: + service: + name: service2 + port: + number: 80 diff --git a/content/zh/examples/service/networking/simple-fanout-example.yaml b/content/zh/examples/service/networking/simple-fanout-example.yaml new file mode 100644 index 0000000000000..19fef9455be70 --- /dev/null +++ b/content/zh/examples/service/networking/simple-fanout-example.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: simple-fanout-example +spec: + rules: + - host: foo.bar.com + http: + paths: + - path: /foo + pathType: Prefix + backend: + service: + name: service1 + port: + number: 4200 + - path: /bar + pathType: Prefix + backend: + service: + name: service2 + port: + number: 8080 diff --git a/content/zh/examples/service/networking/test-ingress.yaml b/content/zh/examples/service/networking/test-ingress.yaml new file mode 100644 index 0000000000000..acd384ab5633a --- /dev/null +++ b/content/zh/examples/service/networking/test-ingress.yaml @@ -0,0 +1,10 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: test-ingress +spec: + defaultBackend: + service: + name: test + port: + number: 80 diff --git a/content/zh/examples/service/networking/tls-example-ingress.yaml b/content/zh/examples/service/networking/tls-example-ingress.yaml new file mode 100644 index 0000000000000..fe5d52a0cbef0 --- /dev/null +++ b/content/zh/examples/service/networking/tls-example-ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tls-example-ingress +spec: + tls: + - hosts: + - https-example.foo.com + secretName: testsecret-tls + rules: + - host: https-example.foo.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: service1 + port: + number: 80 diff --git a/content/zh/includes/federated-task-tutorial-prereqs.md b/content/zh/includes/federated-task-tutorial-prereqs.md deleted file mode 100644 index 565b2f73d0d2d..0000000000000 --- a/content/zh/includes/federated-task-tutorial-prereqs.md +++ /dev/null @@ -1,16 +0,0 @@ - - -本指南假设您已安装有一个正在运行的 Kubernetes 集群联邦。如果没有,那么请转到 -[联邦管理指南](/docs/tutorials/federation/set-up-cluster-federation-kubefed/),了解如何启动联邦集群(或者让集群管理员为您执行此操作)。 -其他教程,例如 Kelsey Hightower 的[联邦 Kubernetes 教程](https://github.com/kelseyhightower/kubernetes-cluster-federation), -也可能帮助您创建联邦 Kubernetes 集群。 - diff --git a/content/zh/includes/partner-script.js b/content/zh/includes/partner-script.js deleted file mode 100644 index 499dc3f2e3829..0000000000000 --- a/content/zh/includes/partner-script.js +++ /dev/null @@ -1,1609 +0,0 @@ -;(function () { - var partners = [ - { - type: 0, - name: 'Sysdig', - logo: 'sys_dig', - link: 'https://sysdig.com/blog/monitoring-kubernetes-with-sysdig-cloud/', - blurb: 'Sysdig is the container intelligence company. Sysdig has created the only unified platform to deliver monitoring, security, and troubleshooting in a microservices-friendly architecture.' - }, - { - type: 0, - name: 'Puppet', - logo: 'puppet', - link: 'https://puppet.com/blog/announcing-kream-and-new-kubernetes-helm-and-docker-modules', - blurb: 'We\'ve developed tools and products to make your adoption of Kubernetes as efficient as possible, covering your full workflow cycle from development to production. And now Puppet Pipelines for Containers is your complete DevOps dashboard for Kubernetes.' - }, - { - type: 0, - name: 'Citrix', - logo: 'citrix', - link: 'https://www.citrix.com/networking/microservices.html', - blurb: 'Netscaler CPX gives app developers all the features they need to load balance their microservices and containerized apps with Kubernetes.' - }, - { - type: 0, - name: 'Cockroach Labs', - logo: 'cockroach_labs', - link: 'https://www.cockroachlabs.com/blog/running-cockroachdb-on-kubernetes/', - blurb: 'CockroachDB is a distributed SQL database whose built-in replication and survivability model pair with Kubernetes to truly make data easy.' - }, - { - type: 2, - name: 'Weaveworks', - logo: 'weave_works', - link: ' https://weave.works/kubernetes', - blurb: 'Weaveworks enables Developers and Dev/Ops teams to easily connect, deploy, secure, manage, and troubleshoot microservices in Kubernetes.' - }, - { - type: 0, - name: 'Intel', - logo: 'intel', - link: 'https://tectonic.com/press/intel-coreos-collaborate-on-openstack-with-kubernetes.html', - blurb: 'Powering the GIFEE (Google’s Infrastructure for Everyone Else), to run OpenStack deployments on Kubernetes.' - }, - { - type: 3, - name: 'Platform9', - logo: 'platform9', - link: 'https://platform9.com/products/kubernetes/', - blurb: 'Platform9 is the open source-as-a-service company that takes all of the goodness of Kubernetes and delivers it as a managed service.' - }, - { - type: 0, - name: 'Datadog', - logo: 'datadog', - link: 'http://docs.datadoghq.com/integrations/kubernetes/', - blurb: 'Full-stack observability for dynamic infrastructure & applications. Includes precision alerting, analytics and deep Kubernetes integrations. ' - }, - { - type: 0, - name: 'AppFormix', - logo: 'appformix', - link: 'http://www.appformix.com/solutions/appformix-for-kubernetes/', - blurb: 'AppFormix is a cloud infrastructure performance optimization service helping enterprise operators streamline their cloud operations on any Kubernetes cloud. ' - }, - { - type: 0, - name: 'Crunchy', - logo: 'crunchy', - link: 'http://info.crunchydata.com/blog/advanced-crunchy-containers-for-postgresql', - blurb: 'Crunchy PostgreSQL Container Suite is a set of containers for managing PostgreSQL with DBA microservices leveraging Kubernetes and Helm.' - }, - { - type: 0, - name: 'Aqua', - logo: 'aqua', - link: 'http://blog.aquasec.com/security-best-practices-for-kubernetes-deployment', - blurb: 'Deep, automated security for your containers running on Kubernetes.' - }, - { - type: 0, - name: 'Distelli', - logo: 'distelli', - link: 'https://www.distelli.com/', - blurb: 'Pipelines from your source repositories to your Kubernetes Clusters on any cloud.' - }, - { - type: 0, - name: 'Nuage networks', - logo: 'nuagenetworks', - link: 'https://github.com/nuagenetworks/nuage-kubernetes', - blurb: 'The Nuage SDN platform provides policy-based networking between Kubernetes Pods and non-Kubernetes environments with visibility and security monitoring.' - }, - { - type: 0, - name: 'Sematext', - logo: 'sematext', - link: 'https://sematext.com/kubernetes/', - blurb: 'Logging & Monitoring: Automatic collection and processing of Metrics, Events and Logs for auto-discovered pods and Kubernetes nodes.' - }, - { - type: 0, - name: 'Diamanti', - logo: 'diamanti', - link: 'https://www.diamanti.com/products/', - blurb: 'Diamanti deploys containers with guaranteed performance using Kubernetes in the first hyperconverged appliance purpose built for containerized applications.' - }, - { - type: 0, - name: 'Aporeto', - logo: 'aporeto', - link: 'https://aporeto.com/trireme', - blurb: 'Aporeto makes cloud-native applications secure by default without impacting developer velocity and works at any scale, on any cloud.' - }, - { - type: 2, - name: 'Giant Swarm', - logo: 'giantswarm', - link: 'https://giantswarm.io', - blurb: 'Giant Swarm enables you to simply and rapidly create and use Kubernetes clusters on-demand either on-premises or in the cloud. Contact Giant Swarm to learn about the best way to run cloud native applications anywhere.' - }, - { - type: 3, - name: 'Giant Swarm', - logo: 'giantswarm', - link: 'https://giantswarm.io/product/', - blurb: 'Giant Swarm enables you to simply and rapidly create and use Kubernetes clusters on-demand either on-premises or in the cloud. Contact Giant Swarm to learn about the best way to run cloud native applications anywhere.' - }, - { - type: 3, - name: 'Hasura', - logo: 'hasura', - link: 'https://hasura.io', - blurb: 'Hasura is a Kubernetes-based PaaS and a Postgres-based BaaS that accelerates app development with ready-to-use components.' - }, - { - type: 3, - name: 'Mirantis', - logo: 'mirantis', - link: 'https://www.mirantis.com/software/kubernetes/', - blurb: 'Mirantis - Mirantis Cloud Platform' - }, - { - type: 2, - name: 'Mirantis', - logo: 'mirantis', - link: 'https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html', - blurb: 'Mirantis builds and manages private clouds with open source software such as OpenStack, deployed as containers orchestrated by Kubernetes.' - }, - { - type: 0, - name: 'Kubernetic', - logo: 'kubernetic', - link: 'https://kubernetic.com/', - blurb: 'Kubernetic is a Kubernetes Desktop client that simplifies and democratizes cluster management for DevOps.' - }, - { - type: 1, - name: 'Reactive Ops', - logo: 'reactive_ops', - link: 'https://www.reactiveops.com/the-kubernetes-experts/', - blurb: 'ReactiveOps has written automation on best practices for infrastructure as code on GCP & AWS using Kubernetes, helping you build and maintain a world-class infrastructure at a fraction of the price of an internal hire.' - }, - { - type: 2, - name: 'Livewyer', - logo: 'livewyer', - link: 'https://livewyer.io/services/kubernetes-experts/', - blurb: 'Kubernetes experts that on-board applications and empower IT teams to get the most out of containerised technology.' - }, - { - type: 2, - name: 'Samsung SDS', - logo: 'samsung_sds', - link: 'http://www.samsungsdsa.com/cloud-infrastructure_kubernetes', - blurb: 'Samsung SDS’s Cloud Native Computing Team offers expert consulting across the range of technical aspects involved in building services targeted at a Kubernetes cluster.' - }, - { - type: 2, - name: 'Container Solutions', - logo: 'container_solutions', - link: 'http://container-solutions.com/resources/kubernetes/', - blurb: 'Container Solutions is a premium software consultancy that focuses on programmable infrastructure, offering our expertise in software development, strategy and operations to help you innovate at speed and scale.' - }, - { - type: 4, - name: 'Container Solutions', - logo: 'container_solutions', - link: 'http://container-solutions.com/resources/kubernetes/', - blurb: 'Container Solutions is a premium software consultancy that focuses on programmable infrastructure, offering our expertise in software development, strategy and operations to help you innovate at speed and scale.' - }, - { - type: 2, - name: 'Jetstack', - logo: 'jetstack', - link: 'https://www.jetstack.io/', - blurb: 'Jetstack is an organisation focused entirely on Kubernetes. They will help you to get the most out of Kubernetes through expert professional services and open source tooling. Get in touch, and accelerate your project.' - }, - { - type: 0, - name: 'Tigera', - logo: 'tigera', - link: 'http://docs.projectcalico.org/latest/getting-started/kubernetes/', - blurb: 'Tigera builds high performance, policy driven, cloud native networking solutions for Kubernetes.' - }, - { - type: 1, - name: 'Harbur', - logo: 'harbur', - link: 'https://harbur.io/', - blurb: 'Based in Barcelona, Harbur is a consulting firm that helps companies deploy self-healing solutions empowered by Container technologies' - }, - { - type: 0, - name: 'Spotinst', - logo: 'spotinst', - link: 'http://blog.spotinst.com/2016/08/04/elastigroup-kubernetes-minions-steroids/', - blurb: 'Your Kubernetes For 80% Less. Run K8s workloads on Spot Instances with 100% availability to save 80% + autoscale your Kubernetes with maximum efficiency in heterogenous environments.' - }, - { - type: 2, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'http://www.inwinstack.com/index.php/en/solutions-en/', - blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.' - }, - { - type: 4, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'http://www.inwinstack.com/index.php/en/solutions-en/', - blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.' - }, - { - type: 3, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'https://github.com/inwinstack/kube-ansible', - blurb: 'inwinSTACK - kube-ansible' - }, - { - type: 1, - name: 'Semantix', - logo: 'semantix', - link: 'http://www.semantix.com.br/', - blurb: 'Semantix is a company that works with data analytics and distributed systems. Kubernetes is used to orchestrate services for our customers.' - }, - { - type: 0, - name: 'ASM Technologies Limited', - logo: 'asm', - link: 'http://www.asmtech.com/', - blurb: 'Our technology supply chain portfolio enables your software products to be accessible, viable and available more effectively.' - }, - { - type: 1, - name: 'InfraCloud Technologies', - logo: 'infracloud', - link: 'http://blog.infracloud.io/state-of-kubernetes/', - blurb: 'InfraCloud Technologies is software consultancy which provides services in Containers, Cloud and DevOps.' - }, - { - type: 0, - name: 'SignalFx', - logo: 'signalfx', - link: 'https://github.com/signalfx/integrations/tree/master/kubernetes', - blurb: 'Gain real-time visibility across metrics & the most intelligent alerts for todays architectures, including deep integration with Kubernetes' - }, - { - type: 0, - name: 'NATS', - logo: 'nats', - link: 'https://github.com/pires/kubernetes-nats-cluster', - blurb: 'NATS is a simple, secure, and scalable cloud native messaging system.' - }, - { - type: 2, - name: 'RX-M', - logo: 'rxm', - link: 'http://rx-m.com/training/kubernetes-training/', - blurb: 'Market neutral Kubernetes Dev, DevOps and Production training and consulting services.' - }, - { - type: 4, - name: 'RX-M', - logo: 'rxm', - link: 'http://rx-m.com/training/kubernetes-training/', - blurb: 'Market neutral Kubernetes Dev, DevOps and Production training and consulting services.' - }, - { - type: 1, - name: 'Emerging Technology Advisors', - logo: 'eta', - link: 'https://www.emergingtechnologyadvisors.com/services/kubernetes.html', - blurb: 'ETA helps companies architect, implement, and manage scalable applications using Kubernetes on public or private cloud.' - }, - { - type: 0, - name: 'CloudPlex.io', - logo: 'cloudplex', - link: 'http://www.cloudplex.io', - blurb: 'CloudPlex enables operations teams to visually deploy, orchestrate, manage, and monitor infrastructure, applications, and services in public or private cloud.' - }, - { - type: 2, - name: 'Kumina', - logo: 'kumina', - link: 'https://www.kumina.nl/managed_kubernetes', - blurb: 'Kumina combines the power of Kubernetes with 10+ years of experience in IT operations. We create, build and support fully managed Kubernetes solutions on your choice of infrastructure. We also provide consulting and training.' - }, - { - type: 0, - name: 'CA Technologies', - logo: 'ca', - link: 'https://docops.ca.com/ca-continuous-delivery-director/integrations/en/plug-ins/kubernetes-plug-in', - blurb: 'The CA Continuous Delivery Director Kubernetes plugin orchestrates deployment of containerized applications within an end-to-end release pipeline.' - }, - { - type: 0, - name: 'CoScale', - logo: 'coscale', - link: 'http://www.coscale.com/blog/how-to-monitor-your-kubernetes-cluster', - blurb: 'Full stack monitoring of containers and microservices orchestrated by Kubernetes. Powered by anomaly detection to find problems faster.' - }, - { - type: 2, - name: 'Supergiant.io', - logo: 'supergiant', - link: 'https://supergiant.io/blog/supergiant-packing-algorithm-unique-save-money', - blurb: 'Supergiant autoscales hardware for Kubernetes. Open-source, it makes HA, distributed, stateful apps easy to deploy, manage, and scale.' - }, - { - type: 0, - name: 'Avi Networks', - logo: 'avinetworks', - link: 'https://kb.avinetworks.com/avi-vantage-openshift-installation-guide/', - blurb: 'Avis elastic application services fabric provides scalable, feature rich & integrated L4-7 networking for K8S environments.' - }, - { - type: 1, - name: 'Codecrux web technologies pvt ltd', - logo: 'codecrux', - link: 'http://codecrux.com/kubernetes/', - blurb: 'At CodeCrux we help your organization get the most out of Containers and Kubernetes, regardless of where you are in your journey' - }, - { - type: 0, - name: 'Greenqloud', - logo: 'qstack', - link: 'https://www.qstack.com/application-orchestration/', - blurb: 'Qstack provides self-serviceable on-site Kubernetes clusters with an intuitive User Interface for Infrastructure and Kubernetes management.' - }, - { - type: 1, - name: 'StackOverdrive.io', - logo: 'stackoverdrive', - link: 'http://www.stackoverdrive.net/kubernetes-consulting/', - blurb: 'StackOverdrive helps organizations of all sizes leverage Kubernetes for container based orchestration and management.' - }, - { - type: 0, - name: 'StackIQ, Inc.', - logo: 'stackiq', - link: 'https://www.stackiq.com/kubernetes/', - blurb: 'With Stacki and the Stacki Pallet for Kubernetes, you can go from bare metal to containers in one step very quickly and easily.' - }, - { - type: 0, - name: 'Cobe', - logo: 'cobe', - link: 'https://cobe.io/product-page/', - blurb: 'Manage Kubernetes clusters with a live, searchable model that captures all relationships and performance data in full visualised context.' - }, - { - type: 0, - name: 'Datawire', - logo: 'datawire', - link: 'http://www.datawire.io', - blurb: 'Datawires open source tools let your microservices developers be awesomely productive on Kubernetes, while letting ops sleep at night.' - }, - { - type: 0, - name: 'Mashape, Inc.', - logo: 'kong', - link: 'https://getkong.org/install/kubernetes/', - blurb: 'Kong is a scalable open source API layer that runs in front of any RESTful API and can be provisioned to a Kubernetes cluster.' - }, - { - type: 0, - name: 'F5 Networks', - logo: 'f5networks', - link: 'http://github.com/f5networks', - blurb: 'We have a LB integration into Kubernetes.' - }, - { - type: 1, - name: 'Lovable Tech', - logo: 'lovable', - link: 'http://lovable.tech/', - blurb: 'World class engineers, designers, and strategic consultants helping you ship Lovable web & mobile technology.' - }, - { - type: 0, - name: 'StackState', - logo: 'stackstate', - link: 'http://stackstate.com/platform/container-monitoring', - blurb: 'Operational Analytics across teams and tools. Includes topology visualization, root cause analysis and anomaly detection for Kubernetes.' - }, - { - type: 1, - name: 'INEXCCO INC', - logo: 'inexcco', - link: 'https://www.inexcco.com/', - blurb: 'Strong DevOps and Cloud talent working with couple clients on kubernetes and helm implementations. ' - }, - { - type: 2, - name: 'Bitnami', - logo: 'bitnami', - link: 'http://bitnami.com/kubernetes', - blurb: 'Bitnami brings a catalog of trusted, up to date, and easy to use applications and application building blocks to Kubernetes.' - }, - { - type: 1, - name: 'Nebulaworks', - logo: 'nebulaworks', - link: 'http://www.nebulaworks.com/container-platforms', - blurb: 'Nebulaworks provides services to help the enterprise adopt modern container platforms and optimized processes to enable innovation at scale.' - }, - { - type: 1, - name: 'EASYNUBE', - logo: 'easynube', - link: 'http://easynube.co.uk/devopsnube/', - blurb: 'EasyNube provide architecture, implementation, and manage scalable applications using Kubernetes and Openshift.' - }, - { - type: 1, - name: 'Opcito Technologies', - logo: 'opcito', - link: 'http://www.opcito.com/kubernetes/', - blurb: 'Opcito is a software consultancy that uses Kubernetes to help organisations build, architect & deploy highly scalable applications.' - }, - { - type: 0, - name: 'code by Dell EMC', - logo: 'codedellemc', - link: 'https://blog.codedellemc.com', - blurb: 'Respected as a thought leader in storage persistence for containerized applications. Contributed significant work to K8 and Ecosystem' - }, - { - type: 0, - name: 'Instana', - logo: 'instana', - link: 'https://www.instana.com/supported-technologies/', - blurb: 'Instana monitors performance of the applications, infrastructure, containers and services deployed on a Kubernetes cluster.' - }, - { - type: 0, - name: 'Netsil', - logo: 'netsil', - link: 'https://netsil.com/kubernetes/', - blurb: 'Generate a real-time, auto-discovered application topology map! Monitor Kubernetes pods and namespaces without any code instrumentation.' - }, - { - type: 2, - name: 'Treasure Data', - logo: 'treasuredata', - link: 'https://fluentd.treasuredata.com/kubernetes-logging/', - blurb: 'Fluentd Enterprise brings smart, secure logging to Kubernetes, and brings integrations with backends such as Splunk, Kafka, or AWS S3.' - }, - { - type: 2, - name: 'Kenzan', - logo: 'Kenzan', - link: 'http://kenzan.com/?ref=kubernetes', - blurb: 'We provide custom consulting services leveraging Kubernetes as our foundation. This involves the platform development, delivery pipelines, and the application development within Kubernetes.' - }, - { - type: 2, - name: 'New Context', - logo: 'newcontext', - link: 'https://www.newcontext.com/devsecops-infrastructure-automation-orchestration/', - blurb: 'New Context builds and uplifts secure Kubernetes implementations and migrations, from initial design to infrastructure automation and management.' - }, - { - type: 2, - name: 'Banzai', - logo: 'banzai', - link: 'https://banzaicloud.com/platform/', - blurb: 'Banzai Cloud brings cloud native to the enterprise and simplifies the transition to microservices on Kubernetes.' - }, - { - type: 3, - name: 'Kublr', - logo: 'kublr', - link: 'http://kublr.com', - blurb: 'Kublr - Accelerate and control the deployment, scaling, monitoring and management of your containerized applications.' - }, - { - type: 1, - name: 'ControlPlane', - logo: 'controlplane', - link: 'https://control-plane.io', - blurb: 'We are a London-based Kubernetes consultancy with a focus on security and continuous delivery. We offer consulting & training.' - }, - { - type: 3, - name: 'Nirmata', - logo: 'nirmata', - link: 'https://www.nirmata.com/', - blurb: 'Nirmata - Nirmata Managed Kubernetes' - }, - { - type: 2, - name: 'Nirmata', - logo: 'nirmata', - link: 'https://www.nirmata.com/', - blurb: 'Nirmata is a software platform that helps DevOps teams deliver enterprise-grade and cloud-provider agnostic Kubernetes based container management solutions.' - }, - { - type: 3, - name: 'TenxCloud', - logo: 'tenxcloud', - link: 'https://tenxcloud.com', - blurb: 'TenxCloud - TenxCloud Container Engine (TCE)' - }, - { - type: 2, - name: 'TenxCloud', - logo: 'tenxcloud', - link: 'https://www.tenxcloud.com/', - blurb: 'Founded in October 2014, TenxCloud is a leading enterprise container cloud computing service provider in China, covering the areas such as container PaaS cloud platform, micro-service management, DevOps, development test, AIOps and so on. Provide private cloud PaaS products and solutions for financial, energy, operator, manufacturing, education and other industry customers.' - }, - { - type: 0, - name: 'Twistlock', - logo: 'twistlock', - link: 'https://www.twistlock.com/', - blurb: 'Security at Kubernetes Scale: Twistlock allows you to deploy fearlessly with assurance that your images and containers are free of vulnerabilities and protected at runtime.' - }, - { - type: 0, - name: 'Endocode AG', - logo: 'endocode', - link: 'https://endocode.com/kubernetes/', - blurb: 'Endocode practices and teaches the open source way. Kernel to cluster - Dev to Ops. We offer Kubernetes trainings, services and support.' - }, - { - type: 2, - name: 'Accenture', - logo: 'accenture', - link: 'https://www.accenture.com/us-en/service-application-containers', - blurb: 'Architecture, implementation and operation of world-class Kubernetes solutions for cloud-native clients.' - }, - { - type: 1, - name: 'Biarca', - logo: 'biarca', - link: 'http://biarca.io/', - blurb: 'Biarca is a cloud services provider and key focus areas Key areas of focus for Biarca include Cloud Adoption Services, Infrastructure Services, DevOps Services and Application Services. Biarca leverages Kubernetes to deliver containerized solutions.' - }, - { - type: 2, - name: 'Claranet', - logo: 'claranet', - link: 'http://www.claranet.co.uk/hosting/google-cloud-platform-consulting-managed-services', - blurb: 'Claranet helps people migrate to the cloud and take full advantage of the new world it offers. We consult, design, build and proactively manage the right infrastructure and automation tooling for clients to achieve this.' - }, - { - type: 1, - name: 'CloudKite', - logo: 'cloudkite', - link: 'https://cloudkite.io/', - blurb: 'CloudKite.io helps companies build and maintain highly automated, resilient, and impressively performing software on Kubernetes.' - }, - { - type: 2, - name: 'CloudOps', - logo: 'CloudOps', - link: 'https://www.cloudops.com/services/docker-and-kubernetes-workshops/', - blurb: 'CloudOps gets you hands-on with the K8s ecosystem via workshop/lab. Get prod ready K8s in cloud(s) of your choice with our managed services.' - }, - { - type: 2, - name: 'Ghostcloud', - logo: 'ghostcloud', - link: 'https://www.ghostcloud.cn/ecos-kubernetes', - blurb: 'EcOS is an enterprise-grade PaaS / CaaS based on Docker and Kubernetes, which makes it easier to configure, deploy and manage containerized applications.' - }, - { - type: 3, - name: 'Ghostcloud', - logo: 'ghostcloud', - link: 'https://www.ghostcloud.cn/ecos-kubernetes', - blurb: 'EcOS is an enterprise-grade PaaS / CaaS based on Docker and Kubernetes, which makes it easier to configure, deploy and manage containerized applications.' - }, - { - type: 2, - name: 'Contino', - logo: 'contino', - link: 'https://www.contino.io/', - blurb: 'We help enterprise organizations adopt DevOps, containers and cloud computing. Contino is a global consultancy that enables regulated organizations to accelerate innovation through the adoption of modern approaches to software delivery.' - }, - { - type: 2, - name: 'Booz Allen Hamilton', - logo: 'boozallenhamilton', - link: 'https://www.boozallen.com/', - blurb: 'Booz Allen partners with public and private sector clients to solve their most difficult challenges through a combination of consulting, analytics, mission operations, technology, systems delivery, cybersecurity, engineering, and innovation expertise.' - }, - { - type: 1, - name: 'BigBinary', - logo: 'bigbinary', - link: 'http://blog.bigbinary.com/categories/Kubernetes', - blurb: 'Provider of Digital Solutions for federal and commercial clients, to include DevSecOps, cloud platforms, transformation strategy, cognitive solutions, and UX.' - }, - { - type: 0, - name: 'CloudPerceptions', - logo: 'cloudperceptions', - link: 'https://www.meetup.com/Triangle-Kubernetes-Meetup/files/', - blurb: 'Container security solution for small-to-medium size enterprises who plan to run Kubernetes on shared infrastructure.' - }, - { - type: 2, - name: 'Creationline, Inc.', - logo: 'creationline', - link: 'https://www.creationline.com/ci', - blurb: 'Total solution for container based IT resource management.' - }, - { - type: 0, - name: 'DataCore Software', - logo: 'datacore', - link: 'https://www.datacore.com/solutions/virtualization/containerization', - blurb: 'DataCore provides highly-available, high-performance universal block storage for Kubernetes, radically improving the speed of deployment.' - }, - { - type: 0, - name: 'Elastifile', - logo: 'elastifile', - link: 'https://www.elastifile.com/stateful-containers', - blurb: 'Elastifile’s cross-cloud data fabric delivers elastically scalable, high performance, software-defined persistent storage for Kubernetes.' - }, - { - type: 0, - name: 'GitLab', - logo: 'gitlab', - link: 'https://about.gitlab.com/2016/11/14/idea-to-production/', - blurb: 'With GitLab and Kubernetes, you can deploy a complete CI/CD pipeline with multiple environments, automatic deployments, and automatic monitoring.' - }, - { - type: 0, - name: 'Gravitational, Inc.', - logo: 'gravitational', - link: 'https://gravitational.com/telekube/', - blurb: 'Telekube combines Kubernetes with Teleport, our modern SSH server, so operators can remotely manage a multitude of K8s application deployments.' - }, - { - type: 0, - name: 'Hitachi Data Systems', - logo: 'hitachi', - link: 'https://www.hds.com/en-us/products-solutions/application-solutions/unified-compute-platform-with-kubernetes-orchestration.html', - blurb: 'Build the Applications You Need to Drive Your Business - DEVELOP AND DEPLOY APPLICATIONS FASTER AND MORE RELIABLY.' - }, - { - type: 1, - name: 'Infosys Technologies', - logo: 'infosys', - link: 'https://www.infosys.com', - blurb: 'Monolithic to microservices on openshift is a offering that we are building as part of open source practice.' - }, - { - type: 0, - name: 'JFrog', - logo: 'jfrog', - link: 'https://www.jfrog.com/use-cases/12584/', - blurb: 'You can use Artifactory to store and manage all of your application’s container images and deploy to Kubernetes and setup a build, test, deploy pipeline using Jenkins and Artifactory. Once an image is ready to be rolled out, Artifactory can trigger a rolling-update deployment into a Kubernetes cluster without downtime – automatically!' - }, - { - type: 0, - name: 'Navops by Univa', - logo: 'navops', - link: 'https://www.navops.io', - blurb: 'Navops is a suite of products that enables enterprises to take full advantage of Kubernetes and provides the ability to quickly and efficiently run containers at scale.' - }, - { - type: 0, - name: 'NeuVector', - logo: 'neuvector', - link: 'http://neuvector.com/solutions-for-kubernetes-security/', - blurb: 'NeuVector delivers an application and network intelligent container network security solution integrated with and optimized for Kubernetes.' - }, - { - type: 1, - name: 'OpsZero', - logo: 'opszero', - link: 'https://www.opszero.com/kubernetes.html', - blurb: 'opsZero provides DevOps for Startups. We build and service your Kubernetes and Cloud Infrastructure to accelerate your release cycle.' - }, - { - type: 1, - name: 'Shiwaforce.com Ltd.', - logo: 'shiwaforce', - link: 'https://www.shiwaforce.com/en/', - blurb: 'Shiwaforce.com is the Agile Partner in Digital Transformation. Our solutions follow business changes quickly, easily and cost-effectively.' - }, - { - type: 1, - name: 'SoftServe', - logo: 'softserve', - link: 'https://www.softserveinc.com/en-us/blogs/kubernetes-travis-ci/', - blurb: 'SoftServe allows its clients to adopt modern application design patterns and benefit from fully integrated, highly available, cost effective Kubernetes clusters at any scale.' - }, - { - type: 1, - name: 'Solinea', - logo: 'solinea', - link: 'https://www.solinea.com/cloud-consulting-services/container-microservices-offerings', - blurb: 'Solinea is a digital transformation consultancy that enables businesses to build innovative solutions by adopting cloud native computing.' - }, - { - type: 1, - name: 'Sphere Software, LLC', - logo: 'spheresoftware', - link: 'https://sphereinc.com/kubernetes/', - blurb: 'The Sphere Software team of experts allows customers to architect and implement scalable applications using Kubernetes in Google Cloud, AWS, and Azure.' - }, - { - type: 1, - name: 'Altoros', - logo: 'altoros', - link: 'https://www.altoros.com/container-orchestration-tools-enablement.html', - blurb: 'Deployment and configuration of Kubernetes, Optimization of existing solutions, training for developers on using Kubernetes, support.' - }, - { - type: 0, - name: 'Cloudbase Solutions', - logo: 'cloudbase', - link: 'https://cloudbase.it/kubernetes', - blurb: 'Cloudbase Solutions provides Kubernetes cross-cloud interoperability for Windows and Linux deployments based on open source technologies.' - }, - { - type: 0, - name: 'Codefresh', - logo: 'codefresh', - link: 'https://codefresh.io/kubernetes-deploy/', - blurb: 'Codefresh is a complete DevOps platform built for containers and Kubernetes. With CI/CD pipelines, image management, and deep integrations into Kubernetes and Helm.' - }, - { - type: 0, - name: 'NetApp', - logo: 'netapp', - link: 'http://netapp.io/2016/12/23/introducing-trident-dynamic-persistent-volume-provisioner-kubernetes/', - blurb: 'Dynamic provisioning and persistent storage support.' - }, - { - type: 0, - name: 'OpenEBS', - logo: 'OpenEBS', - link: 'https://openebs.io/', - blurb: 'OpenEBS is containerized storage for containers integrated tightly into Kubernetes and based on distributed block storage and containerization of storage control. OpenEBS derives intent from K8s and other YAML or JSON such as per container QoS SLAs, tiering and replica policies, and more. OpenEBS is EBS API compliant.' - }, - { - type: 3, - name: 'Google Kubernetes Engine', - logo: 'google', - link: 'https://cloud.google.com/kubernetes-engine/', - blurb: 'Google - Google Kubernetes Engine' - }, - { - type: 1, - name: 'Superorbital', - logo: 'superorbital', - link: 'https://superorbit.al/workshops/kubernetes/', - blurb: 'Helping companies navigate the Cloud Native waters through Kubernetes consulting and training.' - }, - { - type: 3, - name: 'Apprenda', - logo: 'apprenda', - link: 'https://apprenda.com/kismatic/', - blurb: 'Apprenda - Kismatic Enterprise Toolkit (KET)' - }, - { - type: 3, - name: 'Red Hat', - logo: 'redhat', - link: 'https://www.openshift.com', - blurb: 'Red Hat - OpenShift Online and OpenShift Container Platform' - }, - { - type: 3, - name: 'Rancher', - logo: 'rancher', - link: 'http://rancher.com/kubernetes/', - blurb: 'Rancher Inc. - Rancher Kubernetes' - }, - { - type: 3, - name: 'Canonical', - logo: 'canonical', - link: 'https://www.ubuntu.com/kubernetes', - blurb: 'The Canonical Distribution of Kubernetes enables you to operate Kubernetes clusters on demand on any major public cloud and private infrastructure.' - }, - { - type: 2, - name: 'Canonical', - logo: 'canonical', - link: 'https://www.ubuntu.com/kubernetes', - blurb: 'Canonical Ltd. - Canonical Distribution of Kubernetes' - }, - { - type: 3, - name: 'Cisco', - logo: 'cisco', - link: 'https://www.cisco.com', - blurb: 'Cisco Systems - Cisco Container Platform' - }, - { - type: 3, - name: 'Cloud Foundry', - logo: 'cff', - link: 'https://www.cloudfoundry.org/container-runtime/', - blurb: 'Cloud Foundry - Cloud Foundry Container Runtime' - }, - { - type: 3, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud/container-service', - blurb: 'IBM - IBM Cloud Kubernetes Service' - }, - { - type: 2, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud-computing/bluemix/containers', - blurb: 'The IBM Bluemix Container Service combines Docker and Kubernetes to deliver powerful tools, an intuitive user experiences, and built-in security and isolation to enable rapid delivery of applications all while leveraging Cloud Services including cognitive capabilities from Watson.' - }, - { - type: 3, - name: 'Samsung', - logo: 'samsung_sds', - link: 'https://github.com/samsung-cnct/kraken', - blurb: 'Samsung SDS - Kraken' - }, - { - type: 3, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud-computing/products/ibm-cloud-private/', - blurb: 'IBM - IBM Cloud Private' - }, - { - type: 3, - name: 'Kinvolk', - logo: 'kinvolk', - link: 'https://github.com/kinvolk/kube-spawn', - blurb: 'Kinvolk - kube-spawn' - }, - { - type: 3, - name: 'Heptio', - logo: 'heptio', - link: 'https://aws.amazon.com/quickstart/architecture/heptio-kubernetes', - blurb: 'Heptio - AWS-Quickstart' - }, - { - type: 2, - name: 'Heptio', - logo: 'heptio', - link: 'http://heptio.com', - blurb: 'Heptio helps businesses of all sizes get closer to the vibrant Kubernetes community.' - }, - { - type: 3, - name: 'StackPointCloud', - logo: 'stackpoint', - link: 'https://stackpoint.io', - blurb: 'StackPointCloud - StackPointCloud' - }, - { - type: 2, - name: 'StackPointCloud', - logo: 'stackpoint', - link: 'https://stackpoint.io', - blurb: 'StackPointCloud offers a wide range of support plans for managed Kubernetes clusters built through its universal control plane for Kubernetes Anywhere.' - }, - { - type: 3, - name: 'Caicloud', - logo: 'caicloud', - link: 'https://caicloud.io/products/compass', - blurb: 'Caicloud - Compass' - }, - { - type: 2, - name: 'Caicloud', - logo: 'caicloud', - link: 'https://caicloud.io/', - blurb: 'Founded by ex-Googlers,and early Kubernetes contributors, Caicloud leverages Kubernetes to provide container products which have successfully served Fortune 500 enterprises, and further utilizes Kubernetes as a vehicle to deliver ultra-speed deep learning experience.' - }, - { - type: 3, - name: 'Alibaba', - logo: 'alibaba', - link: 'https://www.aliyun.com/product/containerservice?spm=5176.8142029.388261.219.3836dbccRpJ5e9', - blurb: 'Alibaba Cloud - Alibaba Cloud Container Service' - }, - { - type: 3, - name: 'Tencent', - logo: 'tencent', - link: 'https://cloud.tencent.com/product/ccs?lang=en', - blurb: 'Tencent Cloud - Tencent Cloud Container Service' - }, - { - type: 3, - name: 'Huawei', - logo: 'huawei', - link: 'http://www.huaweicloud.com/product/cce.html', - blurb: 'Huawei - Huawei Cloud Container Engine' - }, - { - type: 2, - name: 'Huawei', - logo: 'huawei', - link: 'http://developer.huawei.com/ict/en/site-paas', - blurb: 'FusionStage is an enterprise-grade Platform as a Service product, the core of which is based on mainstream open source container technology including Kubernetes and Docker.' - }, - { - type: 3, - name: 'Google', - logo: 'google', - link: 'https://github.com/kubernetes/kubernetes/tree/master/cluster', - blurb: 'Google - kube-up.sh on Google Compute Engine' - }, - { - type: 3, - name: 'Poseidon', - logo: 'poseidon', - link: 'https://typhoon.psdn.io/', - blurb: 'Poseidon - Typhoon' - }, - { - type: 3, - name: 'Netease', - logo: 'netease', - link: 'https://www.163yun.com/product/container-service-dedicated', - blurb: 'Netease - Netease Container Service Dedicated' - }, - { - type: 2, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse provides Kubernetes training & consulting, and host related events regularly across Europe.' - }, - { - type: 4, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse provides Kubernetes training & consulting, and host related events regularly across Europe.' - }, - { - type: 4, - name: 'LF Training', - logo: 'lf-training', - link: 'https://training.linuxfoundation.org/', - blurb: 'The Linux Foundation’s training program combines the broad, foundational knowledge with the networking opportunities that attendees need to thrive in their careers today.' - }, - { - type: 3, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse - Kubermatic Container Engine' - }, - { - type: 1, - name: 'LTI', - logo: 'lti', - link: 'https://www.lntinfotech.com/', - blurb: 'LTI helps enterprises architect, develop and support scalable cloud native apps using Docker and Kubernetes for private or public cloud.' - }, - { - type: 3, - name: 'Microsoft', - logo: 'microsoft', - link: 'https://github.com/Azure/acs-engine', - blurb: 'Microsoft - Azure acs-engine' - }, - { - type: 3, - name: 'Microsoft', - logo: 'microsoft', - link: 'https://docs.microsoft.com/en-us/azure/aks/', - blurb: 'Microsoft - Azure Container Service AKS' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'http://www.wercker.com/product', - blurb: 'Oracle - Oracle Container Engine' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'https://github.com/oracle/terraform-kubernetes-installer', - blurb: 'Oracle - Oracle Terraform Kubernetes Installer' - }, - { - type: 3, - name: 'Mesosphere', - logo: 'mesosphere', - link: 'https://mesosphere.com/kubernetes/', - blurb: 'Mesosphere - Kubernetes on DC/OS' - }, - { - type: 3, - name: 'Appscode', - logo: 'appscode', - link: 'https://appscode.com/products/cloud-deployment/', - blurb: 'Appscode - Pharmer' - }, - { - type: 3, - name: 'SAP', - logo: 'sap', - link: 'https://cloudplatform.sap.com/index.html', - blurb: 'SAP - Cloud Platform - Gardener (not yet released)' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'https://www.oracle.com/linux/index.html', - blurb: 'Oracle - Oracle Linux Container Services for use with Kubernetes' - }, - { - type: 3, - name: 'CoreOS', - logo: 'coreos', - link: 'https://github.com/kubernetes-incubator/bootkube', - blurb: 'CoreOS - bootkube' - }, - { - type: 2, - name: 'CoreOS', - logo: 'coreos', - link: 'https://coreos.com/', - blurb: 'Tectonic is the enterprise-ready Kubernetes product, by CoreOS. It adds key features to allow you to manage, update, and control clusters in production.' - }, - { - type: 3, - name: 'Weaveworks', - logo: 'weave_works', - link: '/docs/setup/independent/create-cluster-kubeadm/', - blurb: 'Weaveworks - kubeadm' - }, - { - type: 3, - name: 'Joyent', - logo: 'joyent', - link: 'https://github.com/joyent/triton-kubernetes', - blurb: 'Joyent - Triton Kubernetes' - }, - { - type: 3, - name: 'Wise2c', - logo: 'wise2c', - link: 'http://www.wise2c.com/solution', - blurb: 'Wise2C Technology - WiseCloud' - }, - { - type: 2, - name: 'Wise2c', - logo: 'wise2c', - link: 'http://www.wise2c.com', - blurb: 'Using Kubernetes to providing IT continuous delivery and Enterprise grade container management solution to Financial Industry.' - }, - { - type: 3, - name: 'Docker', - logo: 'docker', - link: 'https://www.docker.com/enterprise-edition', - blurb: 'Docker - Docker Enterprise Edition' - }, - { - type: 3, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'DaoCloud - DaoCloud Enterprise' - }, - { - type: 2, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'We provide enterprise-level cloud native application platform that supports both Kubernetes and Docker Swarm.' - }, - { - type: 4, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'We provide enterprise-level cloud native application platform that supports both Kubernetes and Docker Swarm.' - }, - { - type: 3, - name: 'SUSE', - logo: 'suse', - link: 'https://www.suse.com/products/caas-platform/', - blurb: 'SUSE - SUSE CaaS (Container as a Service) Platform' - }, - { - type: 3, - name: 'Pivotal', - logo: 'pivotal', - link: 'https://cloud.vmware.com/pivotal-container-service', - blurb: 'Pivotal/VMware - Pivotal Container Service (PKS)' - }, - { - type: 3, - name: 'VMware', - logo: 'vmware', - link: 'https://cloud.vmware.com/pivotal-container-service', - blurb: 'Pivotal/VMware - Pivotal Container Service (PKS)' - }, - { - type: 3, - name: 'Alauda', - logo: 'alauda', - link: 'http://www.alauda.cn/product/detail/id/68.html', - blurb: 'Alauda - Alauda EE' - }, - { - type: 4, - name: 'Alauda', - logo: 'alauda', - link: 'http://www.alauda.cn/product/detail/id/68.html', - blurb: 'Alauda provides Kubernetes-Centric Enterprise Platform-as-a-Service offerings with a razor focus on delivering Cloud Native capabilities and DevOps best practices to enterprise customers across industries in China.' - }, - { - type: 2, - name: 'Alauda', - logo: 'alauda', - link: 'www.alauda.io', - blurb: 'Alauda provides Kubernetes-Centric Enterprise Platform-as-a-Service offerings with a razor focus on delivering Cloud Native capabilities and DevOps best practices to enterprise customers across industries in China.' - }, - { - type: 3, - name: 'EasyStack', - logo: 'easystack', - link: 'https://easystack.cn/eks/', - blurb: 'EasyStack - EasyStack Kubernetes Service (EKS)' - }, - { - type: 3, - name: 'CoreOS', - logo: 'coreos', - link: 'https://coreos.com/tectonic/', - blurb: 'CoreOS - Tectonic' - }, - { - type: 0, - name: 'GoPaddle', - logo: 'gopaddle', - link: 'https://gopaddle.io', - blurb: 'goPaddle is a DevOps platform for Kubernetes developers. It simplifies the Kubernetes Service creation and maintenance through source to image conversion, build & version management, team management, access controls and audit logs, single click provision of Kubernetes Clusters across multiple clouds from a single console.' - }, - { - type: 0, - name: 'Vexxhost', - logo: 'vexxhost', - link: 'https://vexxhost.com/public-cloud/container-services/kubernetes/', - blurb: 'VEXXHOST offers a high-performance container management service powered by Kubernetes and OpenStack Magnum.' - }, - { - type: 1, - name: 'Component Soft', - logo: 'componentsoft', - link: 'https://www.componentsoft.eu/?p=3925', - blurb: 'Component Soft offers training, consultation and support around open cloud technologies like Kubernetes, Docker, Openstack and Ceph.' - }, - { - type: 0, - name: 'Datera', - logo: 'datera', - link: 'http://www.datera.io/kubernetes/', - blurb: 'Datera delivers high performance, self-managing elastic block storage with self-service provisioning for deploying Kubernetes at scale.' - }, - { - type: 0, - name: 'Containership', - logo: 'containership', - link: 'https://containership.io/', - blurb: 'Containership is a cloud agnostic managed kubernetes offering that supports automatic provisioning on over 14 cloud providers.' - }, - { - type: 0, - name: 'Pure Storage', - logo: 'pure_storage', - link: 'https://hub.docker.com/r/purestorage/k8s/', - blurb: 'Our flexvol driver and dynamic provisioner allow FlashArray/Flashblade storage devices to be consumed as first class persistent storage from within Kubernetes.' - }, - { - type: 0, - name: 'Elastisys', - logo: 'elastisys', - link: 'https://elastisys.com/kubernetes/', - blurb: 'Predictive autoscaling - detects recurring workload variations, irregular traffic spikes, and everything in between. Runs K8s in any public or private cloud.' - }, - { - type: 0, - name: 'Portworx', - logo: 'portworx', - link: 'https://portworx.com/use-case/kubernetes-storage/', - blurb: 'With Portworx, you can manage any database or stateful service on any infrastructure using Kubernetes. You get a single data management layer for all of your stateful services, no matter where they run.' - }, - { - type: 1, - name: 'Object Computing, Inc.', - logo: 'objectcomputing', - link: 'https://objectcomputing.com/services/software-engineering/devops/kubernetes-services', - blurb: 'Our portfolio of DevOps consulting services includes Kubernetes support, development, and training.' - }, - { - type: 1, - name: 'Isotoma', - logo: 'isotoma', - link: 'https://www.isotoma.com/blog/2017/10/24/containerisation-tips-for-using-kubernetes-with-aws/', - blurb: 'Based in the North of England, Amazon partners who are delivering Kubernetes solutions on AWS for replatforming and native development.' - }, - { - type: 1, - name: 'Servian', - logo: 'servian', - link: 'https://www.servian.com/cloud-and-technology/', - blurb: 'Based in Australia, Servian provides advisory, consulting and managed services to support both application and data centric kubernetes use cases.' - }, - { - type: 1, - name: 'Redzara', - logo: 'redzara', - link: 'http://redzara.com/cloud-service', - blurb: 'Redzara has wide and in-depth experience in Cloud automation, now taking one giant step by providing container service offering and services to our customers.' - }, - { - type: 0, - name: 'Dataspine', - logo: 'dataspine', - link: 'http://dataspine.xyz/', - blurb: 'Dataspine is building a secure, elastic and serverless deployment platform for production ML/AI workloads on top of k8s.' - }, - { - type: 1, - name: 'CloudBourne', - logo: 'cloudbourne', - link: 'https://cloudbourne.com/kubernetes-enterprise-hybrid-cloud/', - blurb: 'Want to achieve maximum build, deploy and monitoring automation using Kubernetes? We can help.' - }, - { - type: 0, - name: 'CloudBourne', - logo: 'cloudbourne', - link: 'https://cloudbourne.com/', - blurb: 'Our AppZ Hybrid Cloud Platform can help you achieve your digital transformation goals using the powerful Kubernetes.' - }, - { - type: 3, - name: 'BoCloud', - logo: 'bocloud', - link: 'http://www.bocloud.com.cn/en/index.html', - blurb: 'BoCloud - BeyondcentContainer' - }, - { - type: 2, - name: 'Naitways', - logo: 'naitways', - link: 'https://www.naitways.com/', - blurb: 'Naitways is an Operator (AS57119), Integrator and Cloud Services Provider (our own !). We aim to provide value-added services through our mastering of the whole value chain (Infrastructure, Network, Human skills). Private and Public Cloud is available through Kubernetes managed or unmanaged.' - }, - { - type: 2, - name: 'Kinvolk', - logo: 'kinvolk', - link: 'https://kinvolk.io/kubernetes/', - blurb: 'Kinvolk offers Kubernetes engineering & operations support from cluster to kernel. Leading cloud-native organizations turn to Kinvolk for deep-stack Linux expertise.' - }, - { - type: 1, - name: 'Cascadeo Corporation', - logo: 'cascadeo', - link: 'http://www.cascadeo.com/', - blurb: 'Cascadeo designs, implements, and manages containerized workloads with Kubernetes, for both existing applications and greenfield development projects.' - }, - { - type: 1, - name: 'Elastisys AB', - logo: 'elastisys', - link: 'https://elastisys.com/services/#kubernetes', - blurb: 'We design, build, and operate Kubernetes clusters. We are experts in highly available and self-optimizing Kubernetes infrastructures' - }, - { - type: 1, - name: 'Greenfield Guild', - logo: 'greenfield', - link: 'http://greenfieldguild.com/', - blurb: 'The Greenfield Guild builds quality open source solutions on, and offers training and support for, Kubernetes in any environment.' - }, - { - type: 1, - name: 'PolarSeven', - logo: 'polarseven', - link: 'https://polarseven.com/what-we-do/kubernetes/', - blurb: 'To get started up and running with Kubernetes (K8s) our PolarSeven consultants can help you with creating a fully functional dockerized environment to run and deploy your applications.' - }, - { - type: 1, - name: 'Kloia', - logo: 'kloia', - link: 'https://kloia.com/kubernetes/', - blurb: 'Kloia is DevOps and Microservices Consultancy company that helps its customers to migrate their environment to cloud platforms for enabling more scalable and secure environments. We use Kubernetes to provide our customers all-in-one solutions in an cloud-agnostic way.' - }, - { - type: 0, - name: 'Bluefyre', - logo: 'bluefyre', - link: 'https://www.bluefyre.io', - blurb: 'Bluefyre offers a developer-first security platform that is native to Kubernetes. Bluefyre helps your development team ship secure code on Kubernetes faster!' - }, - { - type: 0, - name: 'Harness', - logo: 'harness', - link: 'https://harness.io/harness-continuous-delivery/secret-sauce/smart-automation/', - blurb: 'Harness offers Continuous Delivery As-A-Service will full support for containerized apps and Kubernetes clusters.' - }, - { - type: 0, - name: 'VMware - Wavefront', - logo: 'wavefront', - link: 'https://www.wavefront.com/solutions/container-monitoring/', - blurb: 'The Wavefront platform provides metrics-driven analytics and monitoring for Kubernetes and container dashboards for DevOps and developer teams delivering visibility into high-level services as well as granular container metrics.' - }, - { - type: 0, - name: 'Bloombase, Inc.', - logo: 'bloombase', - link: 'https://www.bloombase.com/go/kubernetes', - blurb: 'Bloombase provides high bandwidth, defense-in-depth data-at-rest encryption to lock down Kubernetes crown-jewels at scale.' - }, - { - type: 0, - name: 'Kasten', - logo: 'kasten', - link: 'https://kasten.io/product/', - blurb: 'Kasten provides enterprise solutions specifically built to address the operational complexity of data management in cloud-native environments.' - }, - { - type: 0, - name: 'Humio', - logo: 'humio', - link: 'https://humio.com', - blurb: 'Humio is a log aggregation database. We offer a Kubernetes integration that will give you insights to your logs across apps and instances.' - }, - { - type: 0, - name: 'Outcold Solutions LLC', - logo: 'outcold', - link: 'https://www.outcoldsolutions.com/#monitoring-kubernetes', - blurb: 'Powerful Certified Splunk applications for Monitoring OpenShift, Kubernetes and Docker.' - }, - { - type: 0, - name: 'SysEleven GmbH', - logo: 'syseleven', - link: 'http://www.syseleven.de/', - blurb: 'Enterprise Customers who are in need of bulletproof operations (High Performance E-Commerce and Enterprise Portals)' - }, - { - type: 0, - name: 'Landoop', - logo: 'landoop', - link: 'http://lenses.stream', - blurb: 'Lenses for Apache Kafka, to deploy, manage and operate with confidence data streaming pipelines and topologies at scale with confidence and native Kubernetes integration.' - }, - { - type: 0, - name: 'Redis Labs', - logo: 'redis', - link: 'https://redislabs.com/blog/getting-started-with-kubernetes-and-redis-using-redis-enterprise/', - blurb: 'Redis Enterprise extends open source Redis and delivers stable high performance and linear scaling required for building microservices on the Kubernetes platform.' - }, - { - type: 3, - name: 'Diamanti', - logo: 'diamanti', - link: 'https://diamanti.com/', - blurb: 'Diamanti - Diamanti-D10' - }, - { - type: 3, - name: 'Eking', - logo: 'eking', - link: 'http://www.eking-tech.com/', - blurb: 'Hainan eKing Technology Co. - eKing Cloud Container Platform' - }, - { - type: 3, - name: 'Harmony Cloud', - logo: 'harmony', - link: 'http://harmonycloud.cn/products/rongqiyun/', - blurb: 'Harmonycloud - Harmonycloud Container Platform' - }, - { - type: 3, - name: 'Woqutech', - logo: 'woqutech', - link: 'http://woqutech.com/product_qfusion.html', - blurb: 'Woqutech - QFusion' - }, - { - type: 3, - name: 'Baidu', - logo: 'baidu', - link: 'https://cloud.baidu.com/product/cce.html', - blurb: 'Baidu Cloud - Baidu Cloud Container Engine' - }, - { - type: 3, - name: 'ZTE', - logo: 'zte', - link: 'https://sdnfv.zte.com.cn/en/home', - blurb: 'ZTE - TECS OpenPalette' - }, - { - type: 1, - name: 'Automatic Server AG', - logo: 'asag', - link: 'http://www.automatic-server.com/paas.html', - blurb: 'We install and operate Kubernetes in big enterprises, create deployment workflows and help to migrate.' - }, - { - type: 1, - name: 'Circulo Siete', - logo: 'circulo', - link: 'https://circulosiete.com/consultoria/kubernetes/', - blurb: 'We are a Mexico based company offering training, consulting and support to migrate your workloads to Kubernetes, Cloud Native Microservices & Devops.' - }, - { - type: 1, - name: 'DevOpsGuru', - logo: 'devopsguru', - link: 'http://devopsguru.ca/workshop', - blurb: 'DevOpsGuru work with small business to transform from physical to virtual to containerization.' - }, - { - type: 1, - name: 'EIN Intelligence Co., Ltd', - logo: 'ein', - link: 'https://ein.io', - blurb: 'Startups and agile enterprises in South Korea.' - }, - { - type: 0, - name: 'GuardiCore', - logo: 'guardicore', - link: 'https://www.guardicore.com/', - blurb: 'GuardiCore provided process level visibility and network policy enforcement on containerized assets on the Kubernetes platform.' - }, - { - type: 0, - name: 'Hedvig', - logo: 'hedvig', - link: 'https://www.hedviginc.com/blog/provisioning-hedvig-storage-with-kubernetes', - blurb: 'Hedvig is software-defined storage that uses NFS or iSCSI for persistent volumes for provisioning shared storage for pods and containers.' - }, - { - type: 0, - name: 'Hewlett Packard Enterprise', - logo: 'hpe', - link: ' https://www.hpe.com/us/en/storage/containers.html', - blurb: 'Persistent Storage that makes data as easy to manage as containers: dynamic provisioning, policy-based performance & protection, QoS, & more.' - }, - { - type: 0, - name: 'JetBrains', - logo: 'jetbrains', - link: 'https://blog.jetbrains.com/teamcity/2017/10/teamcity-kubernetes-support-plugin/', - blurb: 'Run TeamCity cloud build agents in a Kubernetes cluster. Provides Helm support as a build step.' - }, - { - type: 2, - name: 'Opensense', - logo: 'opensense', - link: 'http://www.opensense.fr/en/kubernetes-en/', - blurb: 'We provide Kubernetes services (integration, operation, training) as well as development of banking microservices based on our extended experience with cloud of containers, microservices, data management and financial sector.' - }, - { - type: 2, - name: 'SAP SE', - logo: 'sap', - link: 'https://cloudplatform.sap.com', - blurb: 'The SAP Cloud Platform provides in-memory capabilities and unique business services for building and extending applications. With open sourced Project Gardener, SAP utilizes the power of Kubernetes to enable an open, robust, multi-cloud experience for our customers. You can use simple, modern cloud native design principles and leverage skills your organization already has to deliver agile and transformative applications, while integrating with the latest SAP Leonardo business features.' - }, - { - type: 1, - name: 'Mobilise Cloud Services Limited', - logo: 'mobilise', - link: 'https://www.mobilise.cloud/en/services/serverless-application-delivery/', - blurb: 'Mobilise helps organisations adopt Kubernetes and integrate with their CI/CD tooling.' - }, - { - type: 3, - name: 'AWS', - logo: 'aws', - link: 'https://aws.amazon.com/eks/', - blurb: 'Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to install and operate your own Kubernetes clusters.' - }, - { - type: 3, - name: 'Kontena', - logo: 'kontena', - link: 'https://pharos.sh', - blurb: 'Kontena Pharos - The simple, solid, certified Kubernetes distribution that just works.' - }, - { - type: 2, - name: 'NTTData', - logo: 'nttdata', - link: 'http://de.nttdata.com/altemista-cloud', - blurb: 'NTT DATA, a member of the NTT Group, brings the power of the worlds leading infrastructure provider in the global K8s community.' - }, - { - type: 2, - name: 'OCTO', - logo: 'octo', - link: 'https://www.octo.academy/fr/formation/275-kubernetes-utiliser-architecturer-et-administrer-une-plateforme-de-conteneurs', - blurb: 'OCTO technology provides training, architecture, technical consulting and delivery services including containers and Kubernetes.' - }, - { - type: 0, - name: 'Logdna', - logo: 'logdna', - link: 'https://logdna.com/kubernetes', - blurb: 'Pinpoint production issues instantly with LogDNA, the best logging platform you will ever use. Get started with only 2 kubectl commands.' - } - ] - - var kcspContainer = document.getElementById('kcspContainer') - var distContainer = document.getElementById('distContainer') - var ktpContainer = document.getElementById('ktpContainer') - var isvContainer = document.getElementById('isvContainer') - var servContainer = document.getElementById('servContainer') - - var sorted = partners.sort(function (a, b) { - if (a.name > b.name) return 1 - if (a.name < b.name) return -1 - return 0 - }) - - sorted.forEach(function (obj) { - var box = document.createElement('div') - box.className = 'partner-box' - - var img = document.createElement('img') - img.src = '/images/square-logos/' + obj.logo + '.png' - - var div = document.createElement('div') - - var p = document.createElement('p') - p.textContent = obj.blurb - - var link = document.createElement('a') - link.href = obj.link - link.target = '_blank' - link.textContent = 'Learn more' - - div.appendChild(p) - div.appendChild(link) - - box.appendChild(img) - box.appendChild(div) - - var container; - if (obj.type === 0) { - container = isvContainer; - } else if (obj.type === 1) { - container = servContainer; - } else if (obj.type === 2) { - container = kcspContainer; - } else if (obj.type === 3) { - container = distContainer; - } else if (obj.type === 4) { - container = ktpContainer; - } - - container.appendChild(box) - }) -})(); diff --git a/content/zh/partners/_index.html b/content/zh/partners/_index.html index 2dc39fee0e05b..3bbe08cdf43e0 100644 --- a/content/zh/partners/_index.html +++ b/content/zh/partners/_index.html @@ -7,13 +7,11 @@ ---
@@ -109,6 +107,3 @@
Kubernetes 培训合作伙伴
{{< include "partner-style.css" >}} - diff --git a/static/_redirects b/static/_redirects index 805c7b8546cf6..4653d97893a11 100644 --- a/static/_redirects +++ b/static/_redirects @@ -502,3 +502,5 @@ /docs/setup/cluster-large/ /docs/setup/best-practices/cluster-large/ 301 /docs/setup/node-conformance/ /docs/setup/best-practices/node-conformance/ 301 /docs/setup/certificates/ /docs/setup/best-practices/certificates/ 301 + +/docs/tasks/tools/install-minikube/ https://minikube.sigs.k8s.io/docs/start/ 302 diff --git a/static/images/docs/node-capacity.svg b/static/images/docs/node-capacity.svg new file mode 100644 index 0000000000000..0bbacfade6543 --- /dev/null +++ b/static/images/docs/node-capacity.svg @@ -0,0 +1,27 @@ + + + background + + + + + + + Layer 1 + + + + + + Node Capacity + kube-reserved + system-reserved + eviction-threshold + allocatable + (available for pods) + + + + + +