Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix CNI issue related to picking up wrong CNI #10985

Merged
merged 2 commits into from
Apr 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions cmd/minikube/cmd/start_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,9 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
if _, ok := cnm.(cni.Disabled); !ok {
klog.Infof("Found %q CNI - setting NetworkPlugin=cni", cnm)
cc.KubernetesConfig.NetworkPlugin = "cni"
if err := setCNIConfDir(&cc, cnm); err != nil {
klog.Errorf("unable to set CNI Config Directory: %v", err)
}
}
}

Expand All @@ -415,6 +418,24 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
return createNode(cc, kubeNodeName, existing)
}

// setCNIConfDir sets kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment) to avoid conflicting CNI configs.
// ref: https://github.com/kubernetes/minikube/issues/10984
// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed.
func setCNIConfDir(cc *config.ClusterConfig, cnm cni.Manager) error {
if _, kindnet := cnm.(cni.KindNet); kindnet {
// auto-set custom CNI Config Directory, if not user-specified
eo := fmt.Sprintf("kubelet.cni-conf-dir=%s", cni.CustomCNIConfDir)
if !cc.KubernetesConfig.ExtraOptions.Exists(eo) {
klog.Infof("auto-setting extra-config to %q", eo)
if err := cc.KubernetesConfig.ExtraOptions.Set(eo); err != nil {
return fmt.Errorf("failed auto-setting extra-config %q: %v", eo, err)
}
klog.Infof("extra-config set to %q", eo)
}
}
return nil
}

func checkNumaCount(k8sVersion string) {
if viper.GetInt(kvmNUMACount) < 1 || viper.GetInt(kvmNUMACount) > 8 {
exit.Message(reason.Usage, "--kvm-numa-count range is 1-8")
Expand Down
4 changes: 2 additions & 2 deletions pkg/drivers/kvm/domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,11 @@ const domainTmpl = `
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='{{.Network}}'/>
<source network='{{.PrivateNetwork}}'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='{{.PrivateNetwork}}'/>
<source network='{{.Network}}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
Expand Down
13 changes: 13 additions & 0 deletions pkg/minikube/cni/cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ const (
DefaultPodCIDR = "10.244.0.0/16"
)

var (
// CustomCNIConfDir is the custom CNI Config Directory path used to avoid conflicting CNI configs
// ref: https://github.com/kubernetes/minikube/issues/10984
CustomCNIConfDir = "/etc/cni/net.mk"
)

// Runner is the subset of command.Runner this package consumes
type Runner interface {
RunCmd(cmd *exec.Cmd) (*command.RunResult, error)
Expand All @@ -62,6 +68,7 @@ type tmplInput struct {
ImageName string
PodCIDR string
DefaultRoute string
CNIConfDir string
}

// New returns a new CNI manager
Expand All @@ -73,6 +80,12 @@ func New(cc config.ClusterConfig) (Manager, error) {

klog.Infof("Creating CNI manager for %q", cc.KubernetesConfig.CNI)

// respect user-specified custom CNI Config Directory, if any
userCNIConfDir := cc.KubernetesConfig.ExtraOptions.Get("cni-conf-dir", "kubelet")
if userCNIConfDir != "" {
CustomCNIConfDir = userCNIConfDir
}

switch cc.KubernetesConfig.CNI {
case "", "auto":
return chooseDefault(cc), nil
Expand Down
4 changes: 3 additions & 1 deletion pkg/minikube/cni/kindnet.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ spec:
volumes:
- name: cni-cfg
hostPath:
path: /etc/cni/net.d
path: {{.CNIConfDir}}
type: DirectoryOrCreate
- name: xtables-lock
hostPath:
path: /run/xtables.lock
Expand Down Expand Up @@ -158,6 +159,7 @@ func (c KindNet) manifest() (assets.CopyableFile, error) {
DefaultRoute: "0.0.0.0/0", // assumes IPv4
PodCIDR: DefaultPodCIDR,
ImageName: images.KindNet(c.cc.KubernetesConfig.ImageRepository),
CNIConfDir: CustomCNIConfDir,
}

b := bytes.Buffer{}
Expand Down
2 changes: 1 addition & 1 deletion site/content/en/docs/commands/start.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ minikube start [flags]
-h, --help
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_file string If non-empty, use this log file (default "")
--log_file string If non-empty, use this log file
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--logtostderr log to standard error instead of files
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
matchExpressions: [{ key: app, operator: In, values: [hello] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
Expand Down
58 changes: 29 additions & 29 deletions site/content/en/docs/tutorials/multi_node.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,24 @@ date: 2019-11-24
minikube start --nodes 2 -p multinode-demo
```
```
πŸ˜„ [multinode-demo] minikube v1.16.0 on Darwin 10.15.7
✨ Automatically selected the docker driver. Other choices: hyperkit, virtualbox
πŸ˜„ [multinode-demo] minikube v1.18.1 on Opensuse-Tumbleweed
✨ Automatically selected the docker driver
πŸ‘ Starting control plane node multinode-demo in cluster multinode-demo
πŸ”₯ Creating docker container (CPUs=2, Memory=2200MB) ...
🐳 Preparing Kubernetes v1.20.0 on Docker 20.10.0 ...
πŸ”— Configuring CNI (Container Networking Interface) ...
πŸ”₯ Creating docker container (CPUs=2, Memory=8000MB) ...
🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.3 ...
β–ͺ Generating certificates and keys ...
β–ͺ Booting up control plane ...
β–ͺ Configuring RBAC rules ...
πŸ”— Configuring CNI (Container Networking Interface) ...
πŸ”Ž Verifying Kubernetes components...
β–ͺ Using image gcr.io/k8s-minikube/storage-provisioner:v5
🌟 Enabled addons: storage-provisioner, default-storageclass

πŸ‘ Starting node multinode-demo-m02 in cluster multinode-demo
πŸ”₯ Creating docker container (CPUs=2, Memory=2200MB) ...
πŸ”₯ Creating docker container (CPUs=2, Memory=8000MB) ...
🌐 Found network options:
β–ͺ NO_PROXY=192.168.49.2
🐳 Preparing Kubernetes v1.20.0 on Docker 20.10.0 ...
🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.3 ...
β–ͺ env NO_PROXY=192.168.49.2
πŸ”Ž Verifying Kubernetes components...
πŸ„ Done! kubectl is now configured to use "multinode-demo" cluster and "default" namespace by default
Expand All @@ -50,9 +51,9 @@ minikube start --nodes 2 -p multinode-demo
kubectl get nodes
```
```
NAME STATUS ROLES AGE VERSION
multinode-demo Ready master 72s v1.18.2
multinode-demo-m02 Ready <none> 33s v1.18.2
NAME STATUS ROLES AGE VERSION
multinode-demo Ready control-plane,master 99s v1.20.2
multinode-demo-m02 Ready <none> 73s v1.20.2
```

- You can also check the status of your nodes:
Expand All @@ -68,7 +69,6 @@ host: Running
kubelet: Running
apiserver: Running
kubeconfig: Configured
timeToStop: Nonexistent

multinode-demo-m02
type: Worker
Expand Down Expand Up @@ -106,9 +106,9 @@ service/hello created
kubectl get pods -o wide
```
```
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-c7b8df44f-qbhxh 1/1 Running 0 31s 10.244.0.3 multinode-demo <none> <none>
hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinode-demo <none> <none>
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-695c67cf9c-bzrzk 1/1 Running 0 22s 10.244.1.2 multinode-demo-m02 <none> <none>
hello-695c67cf9c-frcvw 1/1 Running 0 22s 10.244.0.3 multinode-demo <none> <none>
```

- Look at our service, to know what URL to hit
Expand All @@ -117,31 +117,31 @@ hello-c7b8df44f-xv4v6 1/1 Running 0 31s 10.244.0.2 multinod
minikube service list -p multinode-demo
```
```
|-------------|------------|--------------|-----------------------------|
| NAMESPACE | NAME | TARGET PORT | URL |
|-------------|------------|--------------|-----------------------------|
| default | hello | 80 | http://192.168.64.226:31000 |
| default | kubernetes | No node port | |
| kube-system | kube-dns | No node port | |
|-------------|------------|--------------|-----------------------------|
|-------------|------------|--------------|---------------------------|
| NAMESPACE | NAME | TARGET PORT | URL |
|-------------|------------|--------------|---------------------------|
| default | hello | 80 | http://192.168.49.2:31000 |
| default | kubernetes | No node port | |
| kube-system | kube-dns | No node port | |
|-------------|------------|--------------|---------------------------|
```

- Let's hit the URL a few times and see what comes back

```shell
curl http://192.168.64.226:31000
curl http://192.168.49.2:31000
```
```
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
Hello from hello-695c67cf9c-frcvw (10.244.0.3)

curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-qbhxh (10.244.0.3)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-bzrzk (10.244.1.2)

curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-bzrzk (10.244.1.2)

curl http://192.168.64.226:31000
Hello from hello-c7b8df44f-xv4v6 (10.244.0.2)
curl http://192.168.49.2:31000
Hello from hello-695c67cf9c-frcvw (10.244.0.3)
```

- Multiple nodes!
Expand Down
23 changes: 12 additions & 11 deletions test/integration/multinode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -393,12 +393,12 @@ func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile str
// Create a deployment for app
_, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/multinodes/multinode-pod-dns-test.yaml"))
if err != nil {
t.Errorf("failed to create hello deployment to multinode cluster")
t.Errorf("failed to create busybox deployment to multinode cluster")
}

_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/hello"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/busybox"))
if err != nil {
t.Errorf("failed to delploy hello to multinode cluster")
t.Errorf("failed to delploy busybox to multinode cluster")
}

// resolve Pod IPs
Expand All @@ -423,24 +423,25 @@ func validateDeployAppToMultiNode(ctx context.Context, t *testing.T, profile str

// verify both Pods could resolve a public DNS
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.io"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.io"))
if err != nil {
t.Errorf("Pod %s could not resolve 'kubernetes.io': %v", name, err)
}
}
// verify both pods could resolve to a local service.
// verify both Pods could resolve "kubernetes.default"
// this one is also checked by k8s e2e node conformance tests:
// https://github.com/kubernetes/kubernetes/blob/f137c4777095b3972e2dd71a01365d47be459389/test/e2e_node/environment/conformance.go#L125-L179
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "nslookup", "kubernetes.default.svc.cluster.local"))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default"))
if err != nil {
t.Errorf("Pod %s could not resolve local service (kubernetes.default.svc.cluster.local): %v", name, err)
t.Errorf("Pod %s could not resolve 'kubernetes.default': %v", name, err)
}
}

// clean up, delete all pods
// verify both pods could resolve to a local service.
for _, name := range podNames {
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "delete", "pod", name))
_, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default.svc.cluster.local"))
if err != nil {
t.Errorf("fail to delete pod %s: %v", name, err)
t.Errorf("Pod %s could not resolve local service (kubernetes.default.svc.cluster.local): %v", name, err)
}
}
}
33 changes: 18 additions & 15 deletions test/integration/testdata/multinodes/multinode-pod-dns-test.yaml
Original file line number Diff line number Diff line change
@@ -1,32 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello
name: busybox
labels:
app: busybox
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
selector:
matchLabels:
app: hello
app: busybox
template:
metadata:
labels:
app: hello
app: busybox
spec:
containers:
- name: busybox
# flaky nslookup in busybox versions newer than 1.28:
# https://github.com/docker-library/busybox/issues/48
# note: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
# has similar issues (ie, resolves but returns exit 1)
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
affinity:
# ⬇⬇⬇ This ensures pods will land on separate hosts
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions: [{ key: app, operator: In, values: [hello-from] }]
matchExpressions: [{ key: app, operator: In, values: [busybox] }]
topologyKey: "kubernetes.io/hostname"
containers:
- name: hello-from
image: pbitty/hello-from:latest
ports:
- name: http
containerPort: 80
terminationGracePeriodSeconds: 1