* * ==> Audit <== * |------------|------------------------------------------------------------------|----------------|----------|---------|----------------------|----------------------| | Command | Args | Profile | User | Version | Start Time | End Time | |------------|------------------------------------------------------------------|----------------|----------|---------|----------------------|----------------------| | delete | e2e-cri-o delete | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:01 CEST | | start | e2e-cri-o start --keep-context | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:01 CEST | | | --container-runtime=cri-o | | | | | | | | --ports=8080 --cni=bridge | | | | | | | delete | e2e-docker delete | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:01 CEST | | start | e2e-docker start | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:01 CEST | | | --keep-context | | | | | | | | --container-runtime=docker | | | | | | | | --ports=8080 | | | | | | | service | crio service list | crio | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:01 CEST | | image | e2e-containerd image load | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:02 CEST | | | docker.io/library/extension-container:latest | | | | | | | image | e2e-cri-o image load | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:01 CEST | 24 Apr 23 14:02 CEST | | | docker.io/library/extension-container:latest | | | | | | | image | e2e-docker image load | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | docker.io/library/extension-container:latest | | | | | | | service | crio service kubernetes --url | crio | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | ssh | e2e-containerd ssh -- sudo ctr | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | --namespace=k8s.io tasks list | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | service | e2e-cri-o service --namespace | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | ssh | e2e-docker ssh -- sudo docker inspect -f='{{.State.Status}}' | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | 24 Apr 23 14:02 CEST | | | a5ac3199a81f884b64dc6903d1847d80431733220395e2d66278065a637fe7ac | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | service | e2e-cri-o service --namespace | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | service | e2e-cri-o service --namespace | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:02 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-containerd service | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | --namespace default | | | | | | | | nginx-blackhole --url | | | | | | | delete | e2e-cri-o delete | e2e-cri-o | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | 24 Apr 23 14:03 CEST | | delete | e2e-containerd delete | e2e-containerd | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | 24 Apr 23 14:03 CEST | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | ssh | crio ssh | crio | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | service | e2e-docker service --namespace | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | | | | default nginx-blackhole --url | | | | | | | delete | e2e-docker delete | e2e-docker | jedmeier | v1.30.1 | 24 Apr 23 14:03 CEST | 24 Apr 23 14:03 CEST | | completion | bash | minikube | jedmeier | v1.30.1 | 24 Apr 23 14:10 CEST | 24 Apr 23 14:10 CEST | | ssh | crio ssh | crio | jedmeier | v1.30.1 | 24 Apr 23 14:10 CEST | | | completion | bash | minikube | jedmeier | v1.30.1 | 24 Apr 23 14:49 CEST | 24 Apr 23 14:49 CEST | | ssh | | minikube | jedmeier | v1.30.1 | 24 Apr 23 14:54 CEST | | | completion | bash | minikube | jedmeier | v1.30.1 | 24 Apr 23 14:58 CEST | 24 Apr 23 14:58 CEST | | delete | crio delete | crio | jedmeier | v1.30.1 | 24 Apr 23 15:01 CEST | 24 Apr 23 15:01 CEST | | start | crio start | crio | jedmeier | v1.30.1 | 24 Apr 23 15:01 CEST | 24 Apr 23 15:02 CEST | | start | crio start --container-runtime | crio | jedmeier | v1.30.1 | 24 Apr 23 15:22 CEST | 24 Apr 23 15:23 CEST | | | cri-o | | | | | | |------------|------------------------------------------------------------------|----------------|----------|---------|----------------------|----------------------| * * ==> Last Start <== * Log file created at: 2023/04/24 15:22:53 Running on machine: joshiste-mbp Binary: Built with gc go1.20.2 for darwin/arm64 Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg I0424 15:22:53.203590 61953 out.go:296] Setting OutFile to fd 1 ... I0424 15:22:53.203735 61953 out.go:348] isatty.IsTerminal(1) = true I0424 15:22:53.203736 61953 out.go:309] Setting ErrFile to fd 2... I0424 15:22:53.203739 61953 out.go:348] isatty.IsTerminal(2) = true I0424 15:22:53.203809 61953 root.go:336] Updating PATH: /Users/jedmeier/.minikube/bin I0424 15:22:53.205768 61953 out.go:303] Setting JSON to false I0424 15:22:53.234681 61953 start.go:125] hostinfo: {"hostname":"joshiste-mbp","uptime":1360350,"bootTime":1680982223,"procs":633,"os":"darwin","platform":"darwin","platformFamily":"Standalone Workstation","platformVersion":"13.3.1","kernelVersion":"22.4.0","kernelArch":"arm64","virtualizationSystem":"","virtualizationRole":"","hostId":"3692cb5c-d979-5956-8a1e-9ad6c41df7d4"} W0424 15:22:53.234761 61953 start.go:133] gopshost.Virtualization returned error: not implemented yet I0424 15:22:53.240143 61953 out.go:177] 😄 [crio] minikube v1.30.1 on Darwin 13.3.1 (arm64) I0424 15:22:53.248410 61953 notify.go:220] Checking for updates... I0424 15:22:53.248699 61953 config.go:182] Loaded profile config "crio": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.26.3 I0424 15:22:53.248730 61953 driver.go:375] Setting default libvirt URI to qemu:///system I0424 15:22:53.307953 61953 docker.go:121] docker version: linux-20.10.23:Docker Desktop 4.17.0 (99724) I0424 15:22:53.308077 61953 cli_runner.go:164] Run: docker system info --format "{{json .}}" I0424 15:22:53.461916 61953 info.go:266] docker info: {ID:KV3W:LHMO:QTSZ:SYRI:VSLA:AYFB:C3RZ:QG42:XMJN:CEW2:RK72:62WS Containers:5 ContainersRunning:5 ContainersPaused:0 ContainersStopped:0 Images:173 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:92 OomKillDisable:false NGoroutines:84 SystemTime:2023-04-24 13:22:53.355783506 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:5 KernelVersion:5.15.49-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:12544057344 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:20.10.23 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2456e983eb9e37e47538f59ea18f2043c9a73640 Expected:2456e983eb9e37e47538f59ea18f2043c9a73640} RuncCommit:{ID:v1.1.4-0-g5fd4c4d Expected:v1.1.4-0-g5fd4c4d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=default name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/Users/jedmeier/.docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-buildx] ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.10.3] map[Name:compose Path:/Users/jedmeier/.docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-compose] ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.15.1] map[Name:dev Path:/Users/jedmeier/.docker/cli-plugins/docker-dev SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-dev] ShortDescription:Docker Dev Environments Vendor:Docker Inc. Version:v0.1.0] map[Name:extension Path:/Users/jedmeier/.docker/cli-plugins/docker-extension SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-extension] ShortDescription:Manages Docker extensions Vendor:Docker Inc. Version:v0.2.18] map[Name:sbom Path:/Users/jedmeier/.docker/cli-plugins/docker-sbom SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-sbom] ShortDescription:View the packaged-based Software Bill Of Materials (SBOM) for an image URL:https://github.com/docker/sbom-cli-plugin Vendor:Anchore Inc. Version:0.6.0] map[Name:scan Path:/Users/jedmeier/.docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scan] ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.25.0] map[Name:scout Path:/Users/jedmeier/.docker/cli-plugins/docker-scout SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scout] ShortDescription:Command line tool for Docker Scout Vendor:Docker Inc. Version:v0.6.0]] Warnings:}} I0424 15:22:53.467050 61953 out.go:177] ✨ Using the docker driver based on existing profile I0424 15:22:53.475056 61953 start.go:295] selected driver: docker I0424 15:22:53.475062 61953 start.go:870] validating driver "docker" against &{Name:crio KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 Memory:11914 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.3 ClusterName:crio Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.26.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:} I0424 15:22:53.475120 61953 start.go:881] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} I0424 15:22:53.475593 61953 cli_runner.go:164] Run: docker system info --format "{{json .}}" I0424 15:22:53.625692 61953 info.go:266] docker info: {ID:KV3W:LHMO:QTSZ:SYRI:VSLA:AYFB:C3RZ:QG42:XMJN:CEW2:RK72:62WS Containers:5 ContainersRunning:5 ContainersPaused:0 ContainersStopped:0 Images:173 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:92 OomKillDisable:false NGoroutines:84 SystemTime:2023-04-24 13:22:53.524298381 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:5 KernelVersion:5.15.49-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:aarch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:12544057344 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:20.10.23 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2456e983eb9e37e47538f59ea18f2043c9a73640 Expected:2456e983eb9e37e47538f59ea18f2043c9a73640} RuncCommit:{ID:v1.1.4-0-g5fd4c4d Expected:v1.1.4-0-g5fd4c4d} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=default name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/Users/jedmeier/.docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-buildx] ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.10.3] map[Name:compose Path:/Users/jedmeier/.docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-compose] ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.15.1] map[Name:dev Path:/Users/jedmeier/.docker/cli-plugins/docker-dev SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-dev] ShortDescription:Docker Dev Environments Vendor:Docker Inc. Version:v0.1.0] map[Name:extension Path:/Users/jedmeier/.docker/cli-plugins/docker-extension SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-extension] ShortDescription:Manages Docker extensions Vendor:Docker Inc. Version:v0.2.18] map[Name:sbom Path:/Users/jedmeier/.docker/cli-plugins/docker-sbom SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-sbom] ShortDescription:View the packaged-based Software Bill Of Materials (SBOM) for an image URL:https://github.com/docker/sbom-cli-plugin Vendor:Anchore Inc. Version:0.6.0] map[Name:scan Path:/Users/jedmeier/.docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scan] ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.25.0] map[Name:scout Path:/Users/jedmeier/.docker/cli-plugins/docker-scout SchemaVersion:0.1.0 ShadowedPaths:[/usr/local/lib/docker/cli-plugins/docker-scout] ShortDescription:Command line tool for Docker Scout Vendor:Docker Inc. Version:v0.6.0]] Warnings:}} I0424 15:22:53.625879 61953 cni.go:84] Creating CNI manager for "" I0424 15:22:53.625886 61953 cni.go:142] "docker" driver + "crio" runtime found, recommending kindnet I0424 15:22:53.625893 61953 start_flags.go:319] config: {Name:crio KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 Memory:11914 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.3 ClusterName:crio Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.26.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:} I0424 15:22:53.630425 61953 out.go:177] 👍 Starting control plane node crio in cluster crio I0424 15:22:53.638645 61953 cache.go:120] Beginning downloading kic base image for docker with crio I0424 15:22:53.642734 61953 out.go:177] 🚜 Pulling base image ... I0424 15:22:53.650536 61953 preload.go:132] Checking if preload exists for k8s version v1.26.3 and runtime crio I0424 15:22:53.650536 61953 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 in local docker daemon I0424 15:22:53.650581 61953 preload.go:148] Found local preload: /Users/jedmeier/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.3-cri-o-overlay-arm64.tar.lz4 I0424 15:22:53.650593 61953 cache.go:57] Caching tarball of preloaded images I0424 15:22:53.651436 61953 preload.go:174] Found /Users/jedmeier/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.3-cri-o-overlay-arm64.tar.lz4 in cache, skipping download I0424 15:22:53.651485 61953 cache.go:60] Finished verifying existence of preloaded tar for v1.26.3 on crio I0424 15:22:53.651636 61953 profile.go:148] Saving config to /Users/jedmeier/.minikube/profiles/crio/config.json ... I0424 15:22:53.716598 61953 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 in local docker daemon, skipping pull I0424 15:22:53.716617 61953 cache.go:143] gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 exists in daemon, skipping load I0424 15:22:53.716631 61953 cache.go:193] Successfully downloaded all kic artifacts I0424 15:22:53.716656 61953 start.go:364] acquiring machines lock for crio: {Name:mk51f3c110a26e66a32732e5b03ea154cb68d660 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0424 15:22:53.718483 61953 start.go:368] acquired machines lock for "crio" in 1.795083ms I0424 15:22:53.718512 61953 start.go:96] Skipping create...Using existing machine configuration I0424 15:22:53.718520 61953 fix.go:55] fixHost starting: I0424 15:22:53.718872 61953 cli_runner.go:164] Run: docker container inspect crio --format={{.State.Status}} I0424 15:22:53.769784 61953 fix.go:103] recreateIfNeeded on crio: state=Running err= W0424 15:22:53.769810 61953 fix.go:129] unexpected machine state, will restart: I0424 15:22:53.780242 61953 out.go:177] 🏃 Updating the running docker "crio" container ... I0424 15:22:53.783644 61953 machine.go:88] provisioning docker machine ... I0424 15:22:53.783662 61953 ubuntu.go:169] provisioning hostname "crio" I0424 15:22:53.783765 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:53.846269 61953 main.go:141] libmachine: Using SSH client type: native I0424 15:22:53.846620 61953 main.go:141] libmachine: &{{{ 0 [] [] []} docker [0x10050da20] 0x100510400 [] 0s} 127.0.0.1 58871 } I0424 15:22:53.846626 61953 main.go:141] libmachine: About to run SSH command: sudo hostname crio && echo "crio" | sudo tee /etc/hostname I0424 15:22:53.990212 61953 main.go:141] libmachine: SSH cmd err, output: : crio I0424 15:22:53.990325 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:54.051031 61953 main.go:141] libmachine: Using SSH client type: native I0424 15:22:54.051307 61953 main.go:141] libmachine: &{{{ 0 [] [] []} docker [0x10050da20] 0x100510400 [] 0s} 127.0.0.1 58871 } I0424 15:22:54.051326 61953 main.go:141] libmachine: About to run SSH command: if ! grep -xq '.*\scrio' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 crio/g' /etc/hosts; else echo '127.0.1.1 crio' | sudo tee -a /etc/hosts; fi fi I0424 15:22:54.185202 61953 main.go:141] libmachine: SSH cmd err, output: : I0424 15:22:54.185226 61953 ubuntu.go:175] set auth options {CertDir:/Users/jedmeier/.minikube CaCertPath:/Users/jedmeier/.minikube/certs/ca.pem CaPrivateKeyPath:/Users/jedmeier/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/Users/jedmeier/.minikube/machines/server.pem ServerKeyPath:/Users/jedmeier/.minikube/machines/server-key.pem ClientKeyPath:/Users/jedmeier/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/Users/jedmeier/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/Users/jedmeier/.minikube} I0424 15:22:54.185251 61953 ubuntu.go:177] setting up certificates I0424 15:22:54.185267 61953 provision.go:83] configureAuth start I0424 15:22:54.185376 61953 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio I0424 15:22:54.240182 61953 provision.go:138] copyHostCerts I0424 15:22:54.241289 61953 exec_runner.go:144] found /Users/jedmeier/.minikube/key.pem, removing ... I0424 15:22:54.241301 61953 exec_runner.go:207] rm: /Users/jedmeier/.minikube/key.pem I0424 15:22:54.241457 61953 exec_runner.go:151] cp: /Users/jedmeier/.minikube/certs/key.pem --> /Users/jedmeier/.minikube/key.pem (1679 bytes) I0424 15:22:54.241978 61953 exec_runner.go:144] found /Users/jedmeier/.minikube/ca.pem, removing ... I0424 15:22:54.241980 61953 exec_runner.go:207] rm: /Users/jedmeier/.minikube/ca.pem I0424 15:22:54.242042 61953 exec_runner.go:151] cp: /Users/jedmeier/.minikube/certs/ca.pem --> /Users/jedmeier/.minikube/ca.pem (1082 bytes) I0424 15:22:54.242445 61953 exec_runner.go:144] found /Users/jedmeier/.minikube/cert.pem, removing ... I0424 15:22:54.242447 61953 exec_runner.go:207] rm: /Users/jedmeier/.minikube/cert.pem I0424 15:22:54.242552 61953 exec_runner.go:151] cp: /Users/jedmeier/.minikube/certs/cert.pem --> /Users/jedmeier/.minikube/cert.pem (1127 bytes) I0424 15:22:54.242802 61953 provision.go:112] generating server cert: /Users/jedmeier/.minikube/machines/server.pem ca-key=/Users/jedmeier/.minikube/certs/ca.pem private-key=/Users/jedmeier/.minikube/certs/ca-key.pem org=jedmeier.crio san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube crio] I0424 15:22:54.461637 61953 provision.go:172] copyRemoteCerts I0424 15:22:54.461989 61953 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0424 15:22:54.462059 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:54.514948 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:22:54.608615 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes) I0424 15:22:54.626862 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/machines/server.pem --> /etc/docker/server.pem (1196 bytes) I0424 15:22:54.646109 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) I0424 15:22:54.664255 61953 provision.go:86] duration metric: configureAuth took 478.981625ms I0424 15:22:54.664272 61953 ubuntu.go:193] setting minikube options for container-runtime I0424 15:22:54.664419 61953 config.go:182] Loaded profile config "crio": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.26.3 I0424 15:22:54.664538 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:54.723387 61953 main.go:141] libmachine: Using SSH client type: native I0424 15:22:54.723667 61953 main.go:141] libmachine: &{{{ 0 [] [] []} docker [0x10050da20] 0x100510400 [] 0s} 127.0.0.1 58871 } I0424 15:22:54.723677 61953 main.go:141] libmachine: About to run SSH command: sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) " CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' " | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio I0424 15:22:54.951424 61953 main.go:141] libmachine: SSH cmd err, output: : CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 ' I0424 15:22:54.951442 61953 machine.go:91] provisioned docker machine in 1.167784459s I0424 15:22:54.951455 61953 start.go:300] post-start starting for "crio" (driver="docker") I0424 15:22:54.951467 61953 start.go:328] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0424 15:22:54.952075 61953 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0424 15:22:54.952447 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:55.007409 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:22:55.101546 61953 ssh_runner.go:195] Run: cat /etc/os-release I0424 15:22:55.105848 61953 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0424 15:22:55.105863 61953 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0424 15:22:55.105867 61953 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0424 15:22:55.105870 61953 info.go:137] Remote host: Ubuntu 20.04.5 LTS I0424 15:22:55.105874 61953 filesync.go:126] Scanning /Users/jedmeier/.minikube/addons for local assets ... I0424 15:22:55.105992 61953 filesync.go:126] Scanning /Users/jedmeier/.minikube/files for local assets ... I0424 15:22:55.106050 61953 start.go:303] post-start completed in 154.589375ms I0424 15:22:55.106348 61953 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" I0424 15:22:55.106412 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:55.164116 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:22:55.255710 61953 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'" I0424 15:22:55.260967 61953 fix.go:57] fixHost completed within 1.542444083s I0424 15:22:55.260976 61953 start.go:83] releasing machines lock for "crio", held for 1.542472208s I0424 15:22:55.261085 61953 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" crio I0424 15:22:55.315129 61953 ssh_runner.go:195] Run: cat /version.json I0424 15:22:55.315204 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:55.316000 61953 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/ I0424 15:22:55.316265 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:22:55.369463 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:22:55.370551 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:22:55.457232 61953 ssh_runner.go:195] Run: systemctl --version I0424 15:22:55.526810 61953 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null" I0424 15:22:55.707707 61953 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*" I0424 15:22:55.714201 61953 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ; I0424 15:22:55.728856 61953 cni.go:220] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found I0424 15:22:55.729259 61953 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ; I0424 15:22:55.744700 61953 cni.go:261] disabled [/etc/cni/net.d/87-podman.conflist] bridge cni config(s) I0424 15:22:55.744713 61953 start.go:481] detecting cgroup driver to use... I0424 15:22:55.744744 61953 detect.go:196] detected "cgroupfs" cgroup driver on host os I0424 15:22:55.744998 61953 ssh_runner.go:195] Run: sudo systemctl stop -f containerd I0424 15:23:03.917022 61953 ssh_runner.go:235] Completed: sudo systemctl stop -f containerd: (8.171971458s) I0424 15:23:03.917362 61953 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd I0424 15:23:03.927772 61953 docker.go:193] disabling cri-docker service (if available) ... I0424 15:23:03.928007 61953 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket I0424 15:23:03.945082 61953 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service I0424 15:23:03.955868 61953 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket I0424 15:23:04.026221 61953 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service I0424 15:23:04.092929 61953 docker.go:209] disabling docker service ... I0424 15:23:04.093556 61953 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket I0424 15:23:04.104834 61953 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service I0424 15:23:04.115017 61953 ssh_runner.go:195] Run: sudo systemctl disable docker.socket I0424 15:23:04.179736 61953 ssh_runner.go:195] Run: sudo systemctl mask docker.service I0424 15:23:04.245229 61953 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker I0424 15:23:04.255546 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock " | sudo tee /etc/crictl.yaml" I0424 15:23:04.270041 61953 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image... I0424 15:23:04.270281 61953 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf" I0424 15:23:04.279782 61953 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver... I0424 15:23:04.280048 61953 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf" I0424 15:23:04.289342 61953 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf" I0424 15:23:04.298107 61953 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf" I0424 15:23:04.307137 61953 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk" I0424 15:23:04.315294 61953 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0424 15:23:04.321983 61953 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0424 15:23:04.329549 61953 ssh_runner.go:195] Run: sudo systemctl daemon-reload I0424 15:23:04.390016 61953 ssh_runner.go:195] Run: sudo systemctl restart crio I0424 15:23:04.482439 61953 start.go:528] Will wait 60s for socket path /var/run/crio/crio.sock I0424 15:23:04.483216 61953 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock I0424 15:23:04.487381 61953 start.go:549] Will wait 60s for crictl version I0424 15:23:04.487652 61953 ssh_runner.go:195] Run: which crictl I0424 15:23:04.491463 61953 ssh_runner.go:195] Run: sudo /usr/bin/crictl version I0424 15:23:04.518606 61953 start.go:565] Version: 0.1.0 RuntimeName: cri-o RuntimeVersion: 1.24.4 RuntimeApiVersion: v1alpha2 I0424 15:23:04.522490 61953 ssh_runner.go:195] Run: crio --version I0424 15:23:04.554711 61953 ssh_runner.go:195] Run: crio --version I0424 15:23:04.600729 61953 out.go:177] 🎁 Preparing Kubernetes v1.26.3 on CRI-O 1.24.4 ... I0424 15:23:04.611464 61953 cli_runner.go:164] Run: docker exec -t crio dig +short host.docker.internal I0424 15:23:04.723421 61953 network.go:96] got host ip for mount in container by digging dns: 192.168.65.2 I0424 15:23:04.724013 61953 ssh_runner.go:195] Run: grep 192.168.65.2 host.minikube.internal$ /etc/hosts I0424 15:23:04.728554 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" crio I0424 15:23:04.777506 61953 preload.go:132] Checking if preload exists for k8s version v1.26.3 and runtime crio I0424 15:23:04.777619 61953 ssh_runner.go:195] Run: sudo crictl images --output json I0424 15:23:04.803350 61953 crio.go:497] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.26.3". assuming images are not preloaded. I0424 15:23:04.803476 61953 ssh_runner.go:195] Run: which lz4 I0424 15:23:04.807427 61953 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4 I0424 15:23:04.811018 61953 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%!s(MISSING) %!y(MISSING)" /preloaded.tar.lz4: Process exited with status 1 stdout: stderr: stat: cannot stat '/preloaded.tar.lz4': No such file or directory I0424 15:23:04.811035 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.3-cri-o-overlay-arm64.tar.lz4 --> /preloaded.tar.lz4 (377011982 bytes) I0424 15:23:08.023360 61953 crio.go:449] Took 3.216018 seconds to copy over tarball I0424 15:23:08.023937 61953 ssh_runner.go:195] Run: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4 I0424 15:23:09.733187 61953 ssh_runner.go:235] Completed: sudo tar -I lz4 -C /var -xf /preloaded.tar.lz4: (1.709207125s) I0424 15:23:09.733224 61953 crio.go:456] Took 1.709830 seconds to extract the tarball I0424 15:23:09.733239 61953 ssh_runner.go:146] rm: /preloaded.tar.lz4 I0424 15:23:09.844781 61953 ssh_runner.go:195] Run: sudo crictl images --output json I0424 15:23:09.875786 61953 crio.go:501] all images are preloaded for cri-o runtime. I0424 15:23:09.875858 61953 cache_images.go:84] Images are preloaded, skipping loading I0424 15:23:09.876372 61953 ssh_runner.go:195] Run: crio config I0424 15:23:09.913997 61953 cni.go:84] Creating CNI manager for "" I0424 15:23:09.914008 61953 cni.go:142] "docker" driver + "crio" runtime found, recommending kindnet I0424 15:23:09.914027 61953 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16 I0424 15:23:09.914049 61953 kubeadm.go:172] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.26.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:crio NodeName:crio DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m]} I0424 15:23:09.914282 61953 kubeadm.go:177] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.49.2 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/crio/crio.sock name: "crio" kubeletExtraArgs: node-ip: 192.168.49.2 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "192.168.49.2"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" controllerManager: extraArgs: allocate-node-cidrs: "true" leader-elect: "false" scheduler: extraArgs: leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 etcd: local: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" kubernetesVersion: v1.26.3 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: cgroupfs hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%!"(MISSING) nodefs.inodesFree: "0%!"(MISSING) imagefs.available: "0%!"(MISSING) failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 0.0.0.0:10249 conntrack: maxPerCore: 0 # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" tcpEstablishedTimeout: 0s # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" tcpCloseWaitTimeout: 0s I0424 15:23:09.914421 61953 kubeadm.go:968] kubelet [Unit] Wants=crio.service [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.26.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=crio --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2 [Install] config: {KubernetesVersion:v1.26.3 ClusterName:crio Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} I0424 15:23:09.914661 61953 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.26.3 I0424 15:23:09.922454 61953 binaries.go:44] Found k8s binaries, skipping transfer I0424 15:23:09.922639 61953 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0424 15:23:09.930026 61953 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (414 bytes) I0424 15:23:09.943157 61953 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes) I0424 15:23:09.956128 61953 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2078 bytes) I0424 15:23:09.970055 61953 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts I0424 15:23:09.974251 61953 certs.go:56] Setting up /Users/jedmeier/.minikube/profiles/crio for IP: 192.168.49.2 I0424 15:23:09.974271 61953 certs.go:186] acquiring lock for shared ca certs: {Name:mk9ee36038ec1e3347adbb40fa510a79e79ccea8 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0424 15:23:09.975910 61953 certs.go:195] skipping minikubeCA CA generation: /Users/jedmeier/.minikube/ca.key I0424 15:23:09.976287 61953 certs.go:195] skipping proxyClientCA CA generation: /Users/jedmeier/.minikube/proxy-client-ca.key I0424 15:23:09.976476 61953 certs.go:311] skipping minikube-user signed cert generation: /Users/jedmeier/.minikube/profiles/crio/client.key I0424 15:23:09.976630 61953 certs.go:311] skipping minikube signed cert generation: /Users/jedmeier/.minikube/profiles/crio/apiserver.key.dd3b5fb2 I0424 15:23:09.977122 61953 certs.go:311] skipping aggregator signed cert generation: /Users/jedmeier/.minikube/profiles/crio/proxy-client.key I0424 15:23:09.977645 61953 certs.go:401] found cert: /Users/jedmeier/.minikube/certs/Users/jedmeier/.minikube/certs/ca-key.pem (1675 bytes) I0424 15:23:09.977700 61953 certs.go:401] found cert: /Users/jedmeier/.minikube/certs/Users/jedmeier/.minikube/certs/ca.pem (1082 bytes) I0424 15:23:09.977725 61953 certs.go:401] found cert: /Users/jedmeier/.minikube/certs/Users/jedmeier/.minikube/certs/cert.pem (1127 bytes) I0424 15:23:09.977749 61953 certs.go:401] found cert: /Users/jedmeier/.minikube/certs/Users/jedmeier/.minikube/certs/key.pem (1679 bytes) I0424 15:23:09.978099 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/profiles/crio/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes) I0424 15:23:09.996119 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/profiles/crio/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0424 15:23:10.013798 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/profiles/crio/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes) I0424 15:23:10.031704 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/profiles/crio/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes) I0424 15:23:10.050491 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes) I0424 15:23:10.071066 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes) I0424 15:23:10.090480 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes) I0424 15:23:10.109314 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes) I0424 15:23:10.127347 61953 ssh_runner.go:362] scp /Users/jedmeier/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes) I0424 15:23:10.145453 61953 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (740 bytes) I0424 15:23:10.159961 61953 ssh_runner.go:195] Run: openssl version I0424 15:23:10.165541 61953 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0424 15:23:10.176579 61953 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0424 15:23:10.180933 61953 certs.go:444] hashing: -rw-r--r-- 1 root root 1111 Jun 17 2021 /usr/share/ca-certificates/minikubeCA.pem I0424 15:23:10.181097 61953 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0424 15:23:10.186993 61953 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0424 15:23:10.194589 61953 kubeadm.go:401] StartCluster: {Name:crio KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.39@sha256:bf2d9f1e9d837d8deea073611d2605405b6be904647d97ebd9b12045ddfe1106 Memory:11914 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.26.3 ClusterName:crio Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.26.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP:} I0424 15:23:10.194680 61953 cri.go:52] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]} I0424 15:23:10.194860 61953 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0424 15:23:10.221182 61953 cri.go:87] found id: "" I0424 15:23:10.221457 61953 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0424 15:23:10.229147 61953 kubeadm.go:416] found existing configuration files, will attempt cluster restart I0424 15:23:10.229159 61953 kubeadm.go:633] restartCluster start I0424 15:23:10.229407 61953 ssh_runner.go:195] Run: sudo test -d /data/minikube I0424 15:23:10.237257 61953 kubeadm.go:127] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1 stdout: stderr: I0424 15:23:10.237429 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" crio I0424 15:23:10.291465 61953 kubeconfig.go:92] found "crio" server: "https://127.0.0.1:58875" I0424 15:23:10.293351 61953 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new I0424 15:23:10.301110 61953 kubeadm.go:599] needs reconfigure: configs differ: -- stdout -- --- /var/tmp/minikube/kubeadm.yaml 2023-04-24 13:01:54.532654007 +0000 +++ /var/tmp/minikube/kubeadm.yaml.new 2023-04-24 13:23:09.967376000 +0000 @@ -11,7 +11,7 @@ - signing - authentication nodeRegistration: - criSocket: /var/run/cri-dockerd.sock + criSocket: /var/run/crio/crio.sock name: "crio" kubeletExtraArgs: node-ip: 192.168.49.2 -- /stdout -- I0424 15:23:10.301113 61953 kubeadm.go:1120] stopping kube-system containers ... I0424 15:23:10.301118 61953 cri.go:52] listing CRI containers in root : {State:all Name: Namespaces:[kube-system]} I0424 15:23:10.301174 61953 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0424 15:23:10.327564 61953 cri.go:87] found id: "" I0424 15:23:10.327659 61953 ssh_runner.go:195] Run: sudo systemctl stop kubelet I0424 15:23:10.400057 61953 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0424 15:23:10.407861 61953 kubeadm.go:155] found existing configuration files: -rw------- 1 root root 5643 Apr 24 13:01 /etc/kubernetes/admin.conf -rw------- 1 root root 5656 Apr 24 13:01 /etc/kubernetes/controller-manager.conf -rw------- 1 root root 1955 Apr 24 13:02 /etc/kubernetes/kubelet.conf -rw------- 1 root root 5600 Apr 24 13:01 /etc/kubernetes/scheduler.conf I0424 15:23:10.407963 61953 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf I0424 15:23:10.415186 61953 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf I0424 15:23:10.422831 61953 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf I0424 15:23:10.430248 61953 kubeadm.go:166] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 1 stdout: stderr: I0424 15:23:10.430438 61953 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf I0424 15:23:10.437664 61953 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf I0424 15:23:10.445185 61953 kubeadm.go:166] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 1 stdout: stderr: I0424 15:23:10.445377 61953 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf I0424 15:23:10.452798 61953 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0424 15:23:10.461304 61953 kubeadm.go:710] reconfiguring cluster from /var/tmp/minikube/kubeadm.yaml I0424 15:23:10.461314 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:10.511078 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:11.122906 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:11.363727 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:11.415950 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:11.517939 61953 api_server.go:51] waiting for apiserver process to appear ... I0424 15:23:11.518175 61953 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0424 15:23:12.029045 61953 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0424 15:23:12.529426 61953 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0424 15:23:12.540337 61953 api_server.go:71] duration metric: took 1.022400542s to wait for apiserver process to appear ... I0424 15:23:12.540350 61953 api_server.go:87] waiting for apiserver healthz status ... I0424 15:23:12.540380 61953 api_server.go:252] Checking apiserver healthz at https://127.0.0.1:58875/healthz ... I0424 15:23:15.059479 61953 api_server.go:278] https://127.0.0.1:58875/healthz returned 403: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} W0424 15:23:15.059496 61953 api_server.go:102] status: https://127.0.0.1:58875/healthz returned error 403: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403} I0424 15:23:15.560582 61953 api_server.go:252] Checking apiserver healthz at https://127.0.0.1:58875/healthz ... I0424 15:23:15.565925 61953 api_server.go:278] https://127.0.0.1:58875/healthz returned 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/priority-and-fairness-config-consumer ok [+]poststarthook/priority-and-fairness-filter ok [+]poststarthook/storage-object-count-tracker-hook ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [+]poststarthook/priority-and-fairness-config-producer ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok [+]poststarthook/start-legacy-token-tracking-controller ok [+]poststarthook/aggregator-reload-proxy-client-cert ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/apiservice-openapiv3-controller ok healthz check failed W0424 15:23:15.565944 61953 api_server.go:102] status: https://127.0.0.1:58875/healthz returned error 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/priority-and-fairness-config-consumer ok [+]poststarthook/priority-and-fairness-filter ok [+]poststarthook/storage-object-count-tracker-hook ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld [+]poststarthook/priority-and-fairness-config-producer ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok [+]poststarthook/start-legacy-token-tracking-controller ok [+]poststarthook/aggregator-reload-proxy-client-cert ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/apiservice-openapiv3-controller ok healthz check failed I0424 15:23:16.060151 61953 api_server.go:252] Checking apiserver healthz at https://127.0.0.1:58875/healthz ... I0424 15:23:16.066115 61953 api_server.go:278] https://127.0.0.1:58875/healthz returned 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/priority-and-fairness-config-consumer ok [+]poststarthook/priority-and-fairness-filter ok [+]poststarthook/storage-object-count-tracker-hook ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/priority-and-fairness-config-producer ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok [+]poststarthook/start-legacy-token-tracking-controller ok [+]poststarthook/aggregator-reload-proxy-client-cert ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/apiservice-openapiv3-controller ok healthz check failed W0424 15:23:16.066132 61953 api_server.go:102] status: https://127.0.0.1:58875/healthz returned error 500: [+]ping ok [+]log ok [+]etcd ok [+]poststarthook/start-kube-apiserver-admission-initializer ok [+]poststarthook/generic-apiserver-start-informers ok [+]poststarthook/priority-and-fairness-config-consumer ok [+]poststarthook/priority-and-fairness-filter ok [+]poststarthook/storage-object-count-tracker-hook ok [+]poststarthook/start-apiextensions-informers ok [+]poststarthook/start-apiextensions-controllers ok [+]poststarthook/crd-informer-synced ok [+]poststarthook/bootstrap-controller ok [-]poststarthook/rbac/bootstrap-roles failed: reason withheld [+]poststarthook/scheduling/bootstrap-system-priority-classes ok [+]poststarthook/priority-and-fairness-config-producer ok [+]poststarthook/start-cluster-authentication-info-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-controller ok [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok [+]poststarthook/start-legacy-token-tracking-controller ok [+]poststarthook/aggregator-reload-proxy-client-cert ok [+]poststarthook/start-kube-aggregator-informers ok [+]poststarthook/apiservice-registration-controller ok [+]poststarthook/apiservice-status-available-controller ok [+]poststarthook/kube-apiserver-autoregistration ok [+]autoregister-completion ok [+]poststarthook/apiservice-openapi-controller ok [+]poststarthook/apiservice-openapiv3-controller ok healthz check failed I0424 15:23:16.560087 61953 api_server.go:252] Checking apiserver healthz at https://127.0.0.1:58875/healthz ... I0424 15:23:16.565839 61953 api_server.go:278] https://127.0.0.1:58875/healthz returned 200: ok I0424 15:23:16.577816 61953 api_server.go:140] control plane version: v1.26.3 I0424 15:23:16.577828 61953 api_server.go:130] duration metric: took 4.037458834s to wait for apiserver health ... I0424 15:23:16.577835 61953 cni.go:84] Creating CNI manager for "" I0424 15:23:16.577840 61953 cni.go:142] "docker" driver + "crio" runtime found, recommending kindnet I0424 15:23:16.584102 61953 out.go:177] 🔗 Configuring CNI (Container Networking Interface) ... I0424 15:23:16.590346 61953 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap I0424 15:23:16.595591 61953 cni.go:181] applying CNI manifest using /var/lib/minikube/binaries/v1.26.3/kubectl ... I0424 15:23:16.595601 61953 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2428 bytes) I0424 15:23:16.611146 61953 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.26.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0424 15:23:17.101026 61953 system_pods.go:43] waiting for kube-system pods to appear ... I0424 15:23:17.108941 61953 system_pods.go:59] 7 kube-system pods found I0424 15:23:17.108956 61953 system_pods.go:61] "coredns-787d4945fb-4dbl5" [f4859ec9-79ee-4f2f-ab23-281ddb8f1d2a] Running I0424 15:23:17.108961 61953 system_pods.go:61] "etcd-crio" [148d0efd-a3a8-447a-86c6-c6fa3ff3a0dd] Running I0424 15:23:17.108966 61953 system_pods.go:61] "kube-apiserver-crio" [881affe7-d9c5-4505-9779-a33e3dc25aca] Running I0424 15:23:17.108978 61953 system_pods.go:61] "kube-controller-manager-crio" [fce7ed2c-2a31-4012-adbf-b266302e8ba8] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0424 15:23:17.108985 61953 system_pods.go:61] "kube-proxy-hpn6m" [a5ecd065-a654-4381-b77f-19c7bdc6100b] Running I0424 15:23:17.108989 61953 system_pods.go:61] "kube-scheduler-crio" [4125110c-635b-4fc7-8a2f-0b646362cf54] Running I0424 15:23:17.108993 61953 system_pods.go:61] "storage-provisioner" [1f1bd8e9-0f5f-479c-8acf-9d71eb80eb20] Running I0424 15:23:17.108998 61953 system_pods.go:74] duration metric: took 7.962375ms to wait for pod list to return data ... I0424 15:23:17.109003 61953 node_conditions.go:102] verifying NodePressure condition ... I0424 15:23:17.111735 61953 node_conditions.go:122] node storage ephemeral capacity is 123727180Ki I0424 15:23:17.111747 61953 node_conditions.go:123] node cpu capacity is 8 I0424 15:23:17.111757 61953 node_conditions.go:105] duration metric: took 2.750583ms to run NodePressure ... I0424 15:23:17.111770 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.26.3:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml" I0424 15:23:17.236698 61953 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0424 15:23:17.244823 61953 ops.go:34] apiserver oom_adj: -16 I0424 15:23:17.244835 61953 kubeadm.go:637] restartCluster took 7.01564275s I0424 15:23:17.244846 61953 kubeadm.go:403] StartCluster complete in 7.050235916s I0424 15:23:17.244869 61953 settings.go:142] acquiring lock: {Name:mk8530f7c7875286fed27d4acaa34a28bbd9add5 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0424 15:23:17.245761 61953 settings.go:150] Updating kubeconfig: /Users/jedmeier/.kube/config I0424 15:23:17.250616 61953 lock.go:35] WriteFile acquiring /Users/jedmeier/.kube/config: {Name:mk2952b8aabe51ece99b1ca9db588cbc27be5854 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0424 15:23:17.251729 61953 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.26.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml" I0424 15:23:17.251755 61953 addons.go:496] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false] I0424 15:23:17.251800 61953 addons.go:66] Setting storage-provisioner=true in profile "crio" I0424 15:23:17.251807 61953 addons.go:228] Setting addon storage-provisioner=true in "crio" W0424 15:23:17.251809 61953 addons.go:237] addon storage-provisioner should already be in state true I0424 15:23:17.251824 61953 addons.go:66] Setting default-storageclass=true in profile "crio" I0424 15:23:17.251842 61953 host.go:66] Checking if "crio" exists ... I0424 15:23:17.251844 61953 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "crio" I0424 15:23:17.251864 61953 config.go:182] Loaded profile config "crio": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.26.3 I0424 15:23:17.252169 61953 cli_runner.go:164] Run: docker container inspect crio --format={{.State.Status}} I0424 15:23:17.252175 61953 cli_runner.go:164] Run: docker container inspect crio --format={{.State.Status}} I0424 15:23:17.256577 61953 kapi.go:248] "coredns" deployment in "kube-system" namespace and "crio" context rescaled to 1 replicas I0424 15:23:17.256593 61953 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.26.3 ContainerRuntime:crio ControlPlane:true Worker:true} I0424 15:23:17.267059 61953 out.go:177] 🔎 Verifying Kubernetes components... I0424 15:23:17.271592 61953 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet I0424 15:23:17.310916 61953 out.go:177] ▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5 I0424 15:23:17.311548 61953 addons.go:228] Setting addon default-storageclass=true in "crio" W0424 15:23:17.314986 61953 addons.go:237] addon default-storageclass should already be in state true I0424 15:23:17.314002 61953 start.go:889] CoreDNS already contains "host.minikube.internal" host record, skipping... I0424 15:23:17.314087 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" crio I0424 15:23:17.314999 61953 host.go:66] Checking if "crio" exists ... I0424 15:23:17.315022 61953 addons.go:420] installing /etc/kubernetes/addons/storage-provisioner.yaml I0424 15:23:17.315027 61953 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes) I0424 15:23:17.315090 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:23:17.315992 61953 cli_runner.go:164] Run: docker container inspect crio --format={{.State.Status}} I0424 15:23:17.370508 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:23:17.372101 61953 api_server.go:51] waiting for apiserver process to appear ... I0424 15:23:17.372218 61953 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0424 15:23:17.373084 61953 addons.go:420] installing /etc/kubernetes/addons/storageclass.yaml I0424 15:23:17.373088 61953 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0424 15:23:17.373162 61953 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" crio I0424 15:23:17.382488 61953 api_server.go:71] duration metric: took 125.879458ms to wait for apiserver process to appear ... I0424 15:23:17.382506 61953 api_server.go:87] waiting for apiserver healthz status ... I0424 15:23:17.382517 61953 api_server.go:252] Checking apiserver healthz at https://127.0.0.1:58875/healthz ... I0424 15:23:17.387639 61953 api_server.go:278] https://127.0.0.1:58875/healthz returned 200: ok I0424 15:23:17.388429 61953 api_server.go:140] control plane version: v1.26.3 I0424 15:23:17.388432 61953 api_server.go:130] duration metric: took 5.924417ms to wait for apiserver health ... I0424 15:23:17.388435 61953 system_pods.go:43] waiting for kube-system pods to appear ... I0424 15:23:17.392355 61953 system_pods.go:59] 7 kube-system pods found I0424 15:23:17.392361 61953 system_pods.go:61] "coredns-787d4945fb-4dbl5" [f4859ec9-79ee-4f2f-ab23-281ddb8f1d2a] Running I0424 15:23:17.392363 61953 system_pods.go:61] "etcd-crio" [148d0efd-a3a8-447a-86c6-c6fa3ff3a0dd] Running I0424 15:23:17.392365 61953 system_pods.go:61] "kube-apiserver-crio" [881affe7-d9c5-4505-9779-a33e3dc25aca] Running I0424 15:23:17.392370 61953 system_pods.go:61] "kube-controller-manager-crio" [fce7ed2c-2a31-4012-adbf-b266302e8ba8] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0424 15:23:17.392374 61953 system_pods.go:61] "kube-proxy-hpn6m" [a5ecd065-a654-4381-b77f-19c7bdc6100b] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy]) I0424 15:23:17.392377 61953 system_pods.go:61] "kube-scheduler-crio" [4125110c-635b-4fc7-8a2f-0b646362cf54] Running I0424 15:23:17.392379 61953 system_pods.go:61] "storage-provisioner" [1f1bd8e9-0f5f-479c-8acf-9d71eb80eb20] Running I0424 15:23:17.392380 61953 system_pods.go:74] duration metric: took 3.943417ms to wait for pod list to return data ... I0424 15:23:17.392384 61953 kubeadm.go:578] duration metric: took 135.781ms to wait for : map[apiserver:true system_pods:true] ... I0424 15:23:17.392390 61953 node_conditions.go:102] verifying NodePressure condition ... I0424 15:23:17.393985 61953 node_conditions.go:122] node storage ephemeral capacity is 123727180Ki I0424 15:23:17.393989 61953 node_conditions.go:123] node cpu capacity is 8 I0424 15:23:17.393994 61953 node_conditions.go:105] duration metric: took 1.602417ms to run NodePressure ... I0424 15:23:17.393998 61953 start.go:228] waiting for startup goroutines ... I0424 15:23:17.421694 61953 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:58871 SSHKeyPath:/Users/jedmeier/.minikube/machines/crio/id_rsa Username:docker} I0424 15:23:17.469117 61953 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.26.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0424 15:23:17.520721 61953 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.26.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0424 15:23:17.745863 61953 out.go:177] 🌟 Enabled addons: storage-provisioner, default-storageclass I0424 15:23:17.758044 61953 addons.go:499] enable addons completed in 506.269084ms: enabled=[storage-provisioner default-storageclass] I0424 15:23:17.758082 61953 start.go:233] waiting for cluster config update ... I0424 15:23:17.758095 61953 start.go:242] writing updated cluster config ... I0424 15:23:17.759710 61953 ssh_runner.go:195] Run: rm -f paused I0424 15:23:17.907812 61953 start.go:568] kubectl: 1.26.3, cluster: 1.26.3 (minor skew: 0) I0424 15:23:17.913292 61953 out.go:177] 🏄 Done! kubectl is now configured to use "crio" cluster and "default" namespace by default * * ==> CRI-O <== * -- Logs begin at Mon 2023-04-24 13:01:49 UTC, end at Mon 2023-04-24 13:25:51 UTC. -- Apr 24 13:24:47 crio crio[16910]: time="2023-04-24 13:24:47.639774628Z" level=info msg="Created container 6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9: kube-system/kube-scheduler-crio/kube-scheduler" id=bc407b07-9058-46ae-9df7-945ea13c297d name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:47 crio crio[16910]: time="2023-04-24 13:24:47.640184670Z" level=info msg="Starting container: 6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" id=7850e6f9-0659-4c6e-97b9-8747e1f350f1 name=/runtime.v1.RuntimeService/StartContainer Apr 24 13:24:47 crio crio[16910]: time="2023-04-24 13:24:47.652697337Z" level=info msg="Started container" PID=18363 containerID=6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9 description=kube-system/kube-scheduler-crio/kube-scheduler id=7850e6f9-0659-4c6e-97b9-8747e1f350f1 name=/runtime.v1.RuntimeService/StartContainer sandboxID=6512b0cc5c6c03de8ebf82c82062860a2e6778a604e7aa91e80afc6c8da82a2e Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.043877962Z" level=info msg="Removing container: ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" id=b0d77016-0b74-4ba8-a00b-d473cddfc3b3 name=/runtime.v1.RuntimeService/RemoveContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.060231545Z" level=info msg="Removed container ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766: kube-system/kube-scheduler-crio/kube-scheduler" id=b0d77016-0b74-4ba8-a00b-d473cddfc3b3 name=/runtime.v1.RuntimeService/RemoveContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.528170670Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=bf00b0cc-9d9e-4c40-9a7f-4187ebb25f9f name=/runtime.v1.ImageService/ImageStatus Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.528332462Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=bf00b0cc-9d9e-4c40-9a7f-4187ebb25f9f name=/runtime.v1.ImageService/ImageStatus Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.528943379Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=883d6193-b546-4481-9353-fa9a669843e9 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.529050629Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=883d6193-b546-4481-9353-fa9a669843e9 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.529543462Z" level=info msg="Creating container: kube-system/kube-proxy-hpn6m/kube-proxy" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.529613712Z" level=warning msg="Allowed annotations are specified for workload []" Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.601812712Z" level=error msg="Container creation error: time=\"2023-04-24T13:24:48Z\" level=error msg=\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\"\n" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.613549629Z" level=info msg="createCtr: deleting container ID 02cda1af315f2fb000063bb344e3bedfecf938f15e9b8a8e0fed968f32a10eb0 from idIndex" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.613584670Z" level=info msg="createCtr: deleting container ID 02cda1af315f2fb000063bb344e3bedfecf938f15e9b8a8e0fed968f32a10eb0 from idIndex" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.613599379Z" level=info msg="createCtr: deleting container ID 02cda1af315f2fb000063bb344e3bedfecf938f15e9b8a8e0fed968f32a10eb0 from idIndex" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:24:48 crio crio[16910]: time="2023-04-24 13:24:48.617183587Z" level=info msg="createCtr: deleting container ID 02cda1af315f2fb000063bb344e3bedfecf938f15e9b8a8e0fed968f32a10eb0 from idIndex" id=a3e84c3f-2bbb-437a-81f6-47f0c730ceb5 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.529281593Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=911c088a-65a0-4893-bc17-313b7f543120 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.529729552Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=911c088a-65a0-4893-bc17-313b7f543120 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.536909010Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=b1011181-2840-4f91-8219-2b5d857a55d0 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.537160927Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=b1011181-2840-4f91-8219-2b5d857a55d0 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.538090802Z" level=info msg="Creating container: kube-system/kube-proxy-hpn6m/kube-proxy" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.538695802Z" level=warning msg="Allowed annotations are specified for workload []" Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.632379843Z" level=error msg="Container creation error: time=\"2023-04-24T13:25:01Z\" level=error msg=\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\"\n" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.645083135Z" level=info msg="createCtr: deleting container ID b64e3feba9885179b5c9e08c41a81528208d48258883edef1dc1dbe82219ccae from idIndex" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.645120802Z" level=info msg="createCtr: deleting container ID b64e3feba9885179b5c9e08c41a81528208d48258883edef1dc1dbe82219ccae from idIndex" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.645131635Z" level=info msg="createCtr: deleting container ID b64e3feba9885179b5c9e08c41a81528208d48258883edef1dc1dbe82219ccae from idIndex" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:01 crio crio[16910]: time="2023-04-24 13:25:01.649024593Z" level=info msg="createCtr: deleting container ID b64e3feba9885179b5c9e08c41a81528208d48258883edef1dc1dbe82219ccae from idIndex" id=fb704269-39a4-4954-9733-e19f2f5de099 name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.534340669Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=29c38594-6516-4c66-a548-d994bdd5842d name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.537389044Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=29c38594-6516-4c66-a548-d994bdd5842d name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.542272461Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=e1427303-3582-4af6-b653-5a103866dce6 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.542509919Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=e1427303-3582-4af6-b653-5a103866dce6 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.543471294Z" level=info msg="Creating container: kube-system/kube-proxy-hpn6m/kube-proxy" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.543611961Z" level=warning msg="Allowed annotations are specified for workload []" Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.621446127Z" level=error msg="Container creation error: time=\"2023-04-24T13:25:15Z\" level=error msg=\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\"\n" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.633140002Z" level=info msg="createCtr: deleting container ID aaab2789d8d9b6815150c26baad365a7fb28fc9e8e3c90c2184352f02fffeb87 from idIndex" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.633182752Z" level=info msg="createCtr: deleting container ID aaab2789d8d9b6815150c26baad365a7fb28fc9e8e3c90c2184352f02fffeb87 from idIndex" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.633193169Z" level=info msg="createCtr: deleting container ID aaab2789d8d9b6815150c26baad365a7fb28fc9e8e3c90c2184352f02fffeb87 from idIndex" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:15 crio crio[16910]: time="2023-04-24 13:25:15.637200752Z" level=info msg="createCtr: deleting container ID aaab2789d8d9b6815150c26baad365a7fb28fc9e8e3c90c2184352f02fffeb87 from idIndex" id=43f8c4f0-896e-4467-9724-1dddd9d9dffe name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.528702551Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=79a15d84-5ff7-4f5d-8d1c-84af37a50f5c name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.528932051Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=79a15d84-5ff7-4f5d-8d1c-84af37a50f5c name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.529820801Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=82e286b6-ef6c-4bad-ae53-979634ed7cf7 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.529960509Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=82e286b6-ef6c-4bad-ae53-979634ed7cf7 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.530599926Z" level=info msg="Creating container: kube-system/kube-proxy-hpn6m/kube-proxy" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.530673759Z" level=warning msg="Allowed annotations are specified for workload []" Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.604043551Z" level=error msg="Container creation error: time=\"2023-04-24T13:25:30Z\" level=error msg=\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\"\n" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.615203343Z" level=info msg="createCtr: deleting container ID 71d137e73b40cb11b0233bbc368547fd8e5dfd6898e48093d4524d655ca347b5 from idIndex" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.615236801Z" level=info msg="createCtr: deleting container ID 71d137e73b40cb11b0233bbc368547fd8e5dfd6898e48093d4524d655ca347b5 from idIndex" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.615259093Z" level=info msg="createCtr: deleting container ID 71d137e73b40cb11b0233bbc368547fd8e5dfd6898e48093d4524d655ca347b5 from idIndex" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:30 crio crio[16910]: time="2023-04-24 13:25:30.619121968Z" level=info msg="createCtr: deleting container ID 71d137e73b40cb11b0233bbc368547fd8e5dfd6898e48093d4524d655ca347b5 from idIndex" id=f6c55e12-fd2b-4806-bfc3-69c60454c23b name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.530191794Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=4faaf521-7bc4-4467-982a-3d50dd374311 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.530653502Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=4faaf521-7bc4-4467-982a-3d50dd374311 name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.541586752Z" level=info msg="Checking image status: registry.k8s.io/kube-proxy:v1.26.3" id=c71d61a3-2d9c-412a-88a8-d1231d609dbe name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.541945211Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:c859f97be11acc1b39835c42508c82f74a8352edc1d93fc07e3f605bb1c74a24,RepoTags:[registry.k8s.io/kube-proxy:v1.26.3],RepoDigests:[registry.k8s.io/kube-proxy@sha256:0814fd02ea64e0f5ab6e0313fc28652a0e50ea2353456b44ab18c654fb508e51 registry.k8s.io/kube-proxy@sha256:d89b6c6a8ecc920753df713b268b0d226f795135c4a0ecc5ce61660e623dd6da],Size_:63446245,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=c71d61a3-2d9c-412a-88a8-d1231d609dbe name=/runtime.v1.ImageService/ImageStatus Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.544059086Z" level=info msg="Creating container: kube-system/kube-proxy-hpn6m/kube-proxy" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.544193002Z" level=warning msg="Allowed annotations are specified for workload []" Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.617643752Z" level=error msg="Container creation error: time=\"2023-04-24T13:25:45Z\" level=error msg=\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\"\n" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.628921169Z" level=info msg="createCtr: deleting container ID 84205bacc64a9211d6da659a793f886b15085dfae03dde37190fc72913a614a0 from idIndex" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.628956127Z" level=info msg="createCtr: deleting container ID 84205bacc64a9211d6da659a793f886b15085dfae03dde37190fc72913a614a0 from idIndex" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.628966211Z" level=info msg="createCtr: deleting container ID 84205bacc64a9211d6da659a793f886b15085dfae03dde37190fc72913a614a0 from idIndex" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer Apr 24 13:25:45 crio crio[16910]: time="2023-04-24 13:25:45.632626669Z" level=info msg="createCtr: deleting container ID 84205bacc64a9211d6da659a793f886b15085dfae03dde37190fc72913a614a0 from idIndex" id=c532604a-3581-459b-913f-fde30b27fecf name=/runtime.v1.RuntimeService/CreateContainer * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID 6b722dcfa1c69 fa167119f9a55e258bd7fae3b27525c5f0a6a41cbb1992dc8f300b4936cc8876 About a minute ago Exited kube-scheduler 6 6512b0cc5c6c0 6628603a7abd5 docker.io/kindest/kindnetd@sha256:5149f27d2a55574f79d4f1535ca03c4afaada8e9ba6c3c699788ab0362d9f7ae 2 minutes ago Running kindnet-cni 0 c3a0f8b77c343 90df1a9cedd24 ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6 2 minutes ago Running storage-provisioner 3 edb009bfd4e59 91390189a0f33 b19406328e70dd2f6a36d6dbe4e867b0684ced2fdeb2f02ecb54ead39ec0bac0 2 minutes ago Running coredns 1 b2b94597a6107 5300966a45f99 ef245802824036d4a23ba6f8b3f04c055416f9dc73a54d546b1f98ad16f6b8cb 2 minutes ago Running etcd 1 23110a15640aa 1b703e6f5f478 3f1ae10c5c85dc611809282b774bb6c8637bc02b40a202e1f110575b2a2df5a2 2 minutes ago Running kube-apiserver 1 48525a4ded730 5b74edba9b8d1 3b6ac91ff8d39cc54735bbc7a3beaf777c6213ac5edad185c281145102ce479b 2 minutes ago Running kube-controller-manager 2 f2aeced59d6c3 * * ==> coredns [91390189a0f33cbcd3b5a4e99e61a293d7966a0c9276aa79da08de12030b600e] <== * .:53 [INFO] plugin/reload: Running configuration SHA512 = 8846d9ca81164c00fa03e78dfcf1a6846552cc49335bc010218794b8cfaf537759aa4b596e7dc20c0f618e8eb07603c0139662b99dfa3de45b176fbe7fb57ce1 CoreDNS-1.9.3 linux/arm64, go1.18.2, 45b0a11 [INFO] 127.0.0.1:55383 - 49925 "HINFO IN 5339597389294269863.2615050634524003416. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.0474715s * * ==> describe nodes <== * Name: crio Roles: control-plane Labels: beta.kubernetes.io/arch=arm64 beta.kubernetes.io/os=linux kubernetes.io/arch=arm64 kubernetes.io/hostname=crio kubernetes.io/os=linux minikube.k8s.io/commit=08896fd1dc362c097c925146c4a0d0dac715ace0 minikube.k8s.io/name=crio minikube.k8s.io/primary=true minikube.k8s.io/updated_at=2023_04_24T15_02_02_0700 minikube.k8s.io/version=v1.30.1 node-role.kubernetes.io/control-plane= node.kubernetes.io/exclude-from-external-load-balancers= Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock node.alpha.kubernetes.io/ttl: 0 volumes.kubernetes.io/controller-managed-attach-detach: true CreationTimestamp: Mon, 24 Apr 2023 13:01:59 +0000 Taints: Unschedulable: false Lease: HolderIdentity: crio AcquireTime: RenewTime: Mon, 24 Apr 2023 13:25:48 +0000 Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- MemoryPressure False Mon, 24 Apr 2023 13:23:45 +0000 Mon, 24 Apr 2023 13:01:58 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available DiskPressure False Mon, 24 Apr 2023 13:23:45 +0000 Mon, 24 Apr 2023 13:01:58 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure PIDPressure False Mon, 24 Apr 2023 13:23:45 +0000 Mon, 24 Apr 2023 13:01:58 +0000 KubeletHasSufficientPID kubelet has sufficient PID available Ready True Mon, 24 Apr 2023 13:23:45 +0000 Mon, 24 Apr 2023 13:02:00 +0000 KubeletReady kubelet is posting ready status Addresses: InternalIP: 192.168.49.2 Hostname: crio Capacity: cpu: 8 ephemeral-storage: 123727180Ki hugepages-1Gi: 0 hugepages-2Mi: 0 hugepages-32Mi: 0 hugepages-64Ki: 0 memory: 12250056Ki pods: 110 Allocatable: cpu: 8 ephemeral-storage: 123727180Ki hugepages-1Gi: 0 hugepages-2Mi: 0 hugepages-32Mi: 0 hugepages-64Ki: 0 memory: 12250056Ki pods: 110 System Info: Machine ID: 61419f744ec9452499a59356fc030992 System UUID: 61419f744ec9452499a59356fc030992 Boot ID: 26265435-95b0-4704-9761-a0c60154e284 Kernel Version: 5.15.49-linuxkit OS Image: Ubuntu 20.04.5 LTS Operating System: linux Architecture: arm64 Container Runtime Version: cri-o://1.24.4 Kubelet Version: v1.26.3 Kube-Proxy Version: v1.26.3 PodCIDR: 10.244.0.0/24 PodCIDRs: 10.244.0.0/24 Non-terminated Pods: (8 in total) Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age --------- ---- ------------ ---------- --------------- ------------- --- kube-system coredns-787d4945fb-4dbl5 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (1%!)(MISSING) 23m kube-system etcd-crio 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 23m kube-system kindnet-sjwj6 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 2m24s kube-system kube-apiserver-crio 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 23m kube-system kube-controller-manager-crio 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 23m kube-system kube-proxy-hpn6m 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 23m kube-system kube-scheduler-crio 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 23m kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 23m Allocated resources: (Total limits may be over 100 percent, i.e., overcommitted.) Resource Requests Limits -------- -------- ------ cpu 850m (10%!)(MISSING) 100m (1%!)(MISSING) memory 220Mi (1%!)(MISSING) 220Mi (1%!)(MISSING) ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-32Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-64Ki 0 (0%!)(MISSING) 0 (0%!)(MISSING) Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Starting 23m kube-proxy Normal NodeHasSufficientPID 23m (x4 over 23m) kubelet Node crio status is now: NodeHasSufficientPID Normal NodeHasSufficientMemory 23m (x5 over 23m) kubelet Node crio status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 23m (x4 over 23m) kubelet Node crio status is now: NodeHasNoDiskPressure Normal NodeAllocatableEnforced 23m kubelet Updated Node Allocatable limit across pods Normal Starting 23m kubelet Starting kubelet. Normal NodeHasSufficientMemory 23m kubelet Node crio status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 23m kubelet Node crio status is now: NodeHasNoDiskPressure Normal NodeHasSufficientPID 23m kubelet Node crio status is now: NodeHasSufficientPID Normal RegisteredNode 23m node-controller Node crio event: Registered Node crio in Controller Normal Starting 2m40s kubelet Starting kubelet. Normal NodeHasSufficientMemory 2m40s (x8 over 2m40s) kubelet Node crio status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 2m40s (x8 over 2m40s) kubelet Node crio status is now: NodeHasNoDiskPressure Normal NodeHasSufficientPID 2m40s (x8 over 2m40s) kubelet Node crio status is now: NodeHasSufficientPID Normal RegisteredNode 2m24s node-controller Node crio event: Registered Node crio in Controller * * ==> dmesg <== * * * ==> etcd [5300966a45f99d6637ea3ea9f0e2790d0dc37fb049c0cd1359780aaa4b01b5a0] <== * {"level":"warn","ts":"2023-04-24T13:23:12.324Z","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCD_UNSUPPORTED_ARCH=arm64"} {"level":"info","ts":"2023-04-24T13:23:12.324Z","caller":"etcdmain/etcd.go:73","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.49.2:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--experimental-initial-corrupt-check=true","--experimental-watch-progress-notify-interval=5s","--initial-advertise-peer-urls=https://192.168.49.2:2380","--initial-cluster=crio=https://192.168.49.2:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.49.2:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.49.2:2380","--name=crio","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--proxy-refresh-interval=70000","--snapshot-count=10000","--trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt"]} {"level":"info","ts":"2023-04-24T13:23:12.324Z","caller":"etcdmain/etcd.go:116","msg":"server has been already initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"} {"level":"info","ts":"2023-04-24T13:23:12.325Z","caller":"embed/etcd.go:124","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.49.2:2380"]} {"level":"info","ts":"2023-04-24T13:23:12.325Z","caller":"embed/etcd.go:484","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]} {"level":"info","ts":"2023-04-24T13:23:12.325Z","caller":"embed/etcd.go:132","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"]} {"level":"info","ts":"2023-04-24T13:23:12.325Z","caller":"embed/etcd.go:306","msg":"starting an etcd server","etcd-version":"3.5.6","git-sha":"cecbe35ce","go-version":"go1.17.13","go-os":"linux","go-arch":"arm64","max-cpu-set":8,"max-cpu-available":8,"member-initialized":true,"name":"crio","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"cors":["*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"initial-corrupt-check":true,"corrupt-check-time-interval":"0s","compact-check-time-enabled":false,"compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","downgrade-check-interval":"5s"} {"level":"info","ts":"2023-04-24T13:23:12.326Z","caller":"etcdserver/backend.go:81","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"1.007208ms"} {"level":"info","ts":"2023-04-24T13:23:12.330Z","caller":"etcdserver/server.go:530","msg":"No snapshot found. Recovering WAL from scratch!"} {"level":"info","ts":"2023-04-24T13:23:12.401Z","caller":"etcdserver/raft.go:529","msg":"restarting local member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","commit-index":1639} {"level":"info","ts":"2023-04-24T13:23:12.401Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=()"} {"level":"info","ts":"2023-04-24T13:23:12.401Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became follower at term 2"} {"level":"info","ts":"2023-04-24T13:23:12.401Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"newRaft aec36adc501070cc [peers: [], term: 2, commit: 1639, applied: 0, lastindex: 1639, lastterm: 2]"} {"level":"warn","ts":"2023-04-24T13:23:12.402Z","caller":"auth/store.go:1234","msg":"simple token is not cryptographically signed"} {"level":"info","ts":"2023-04-24T13:23:12.402Z","caller":"mvcc/kvstore.go:323","msg":"restored last compact revision","meta-bucket-name":"meta","meta-bucket-name-key":"finishedCompactRev","restored-compact-revision":1088} {"level":"info","ts":"2023-04-24T13:23:12.403Z","caller":"mvcc/kvstore.go:393","msg":"kvstore restored","current-rev":1374} {"level":"info","ts":"2023-04-24T13:23:12.404Z","caller":"etcdserver/quota.go:94","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"} {"level":"info","ts":"2023-04-24T13:23:12.404Z","caller":"etcdserver/corrupt.go:95","msg":"starting initial corruption check","local-member-id":"aec36adc501070cc","timeout":"7s"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"etcdserver/corrupt.go:165","msg":"initial corruption checking passed; no corruption","local-member-id":"aec36adc501070cc"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"etcdserver/server.go:854","msg":"starting etcd server","local-member-id":"aec36adc501070cc","local-server-version":"3.5.6","cluster-version":"to_be_decided"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"etcdserver/server.go:754","msg":"starting initial election tick advance","election-ticks":10} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"} {"level":"info","ts":"2023-04-24T13:23:12.405Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"} {"level":"info","ts":"2023-04-24T13:23:12.407Z","caller":"embed/etcd.go:687","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]} {"level":"info","ts":"2023-04-24T13:23:12.407Z","caller":"embed/etcd.go:275","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]} {"level":"info","ts":"2023-04-24T13:23:12.407Z","caller":"embed/etcd.go:586","msg":"serving peer traffic","address":"192.168.49.2:2380"} {"level":"info","ts":"2023-04-24T13:23:12.407Z","caller":"embed/etcd.go:558","msg":"cmux::serve","address":"192.168.49.2:2380"} {"level":"info","ts":"2023-04-24T13:23:12.407Z","caller":"embed/etcd.go:762","msg":"serving metrics","address":"http://127.0.0.1:2381"} {"level":"info","ts":"2023-04-24T13:23:14.303Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 2"} {"level":"info","ts":"2023-04-24T13:23:14.303Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 2"} {"level":"info","ts":"2023-04-24T13:23:14.303Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 2"} {"level":"info","ts":"2023-04-24T13:23:14.304Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 3"} {"level":"info","ts":"2023-04-24T13:23:14.304Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 3"} {"level":"info","ts":"2023-04-24T13:23:14.304Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 3"} {"level":"info","ts":"2023-04-24T13:23:14.304Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 3"} {"level":"info","ts":"2023-04-24T13:23:14.308Z","caller":"etcdserver/server.go:2054","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:crio ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"} {"level":"info","ts":"2023-04-24T13:23:14.308Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"} {"level":"info","ts":"2023-04-24T13:23:14.309Z","caller":"embed/serve.go:100","msg":"ready to serve client requests"} {"level":"info","ts":"2023-04-24T13:23:14.313Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"127.0.0.1:2379"} {"level":"info","ts":"2023-04-24T13:23:14.314Z","caller":"embed/serve.go:198","msg":"serving client traffic securely","address":"192.168.49.2:2379"} {"level":"info","ts":"2023-04-24T13:23:14.322Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"} {"level":"info","ts":"2023-04-24T13:23:14.322Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"} * * ==> kernel <== * 13:25:51 up 6 days, 9:54, 0 users, load average: 0.33, 0.69, 0.80 Linux crio 5.15.49-linuxkit #1 SMP PREEMPT Tue Sep 13 07:51:32 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux PRETTY_NAME="Ubuntu 20.04.5 LTS" * * ==> kindnet [6628603a7abd59582cb59eaf1183264013d8fc2e74cdfdbe5caf51371f4609c7] <== * I0424 13:23:34.403143 1 main.go:102] connected to apiserver: https://10.96.0.1:443 I0424 13:23:34.403166 1 main.go:107] hostIP = 192.168.49.2 podIP = 192.168.49.2 I0424 13:23:34.403328 1 main.go:116] setting mtu 1500 for CNI I0424 13:23:34.403340 1 main.go:146] kindnetd IP family: "ipv4" I0424 13:23:34.403349 1 main.go:150] noMask IPv4 subnets: [10.244.0.0/16] I0424 13:23:34.998222 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:23:34.999462 1 main.go:227] handling current node I0424 13:23:45.029785 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:23:45.029885 1 main.go:227] handling current node I0424 13:23:55.042989 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:23:55.043014 1 main.go:227] handling current node I0424 13:24:05.056971 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:05.057068 1 main.go:227] handling current node I0424 13:24:15.080028 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:15.080132 1 main.go:227] handling current node I0424 13:24:25.089873 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:25.089954 1 main.go:227] handling current node I0424 13:24:35.297938 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:35.298072 1 main.go:227] handling current node I0424 13:24:45.310613 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:45.310705 1 main.go:227] handling current node I0424 13:24:55.320824 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:24:55.320848 1 main.go:227] handling current node I0424 13:25:05.330146 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:25:05.330188 1 main.go:227] handling current node I0424 13:25:15.334511 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:25:15.334533 1 main.go:227] handling current node I0424 13:25:25.396227 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:25:25.396364 1 main.go:227] handling current node I0424 13:25:35.497088 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:25:35.497194 1 main.go:227] handling current node I0424 13:25:45.504810 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}] I0424 13:25:45.504854 1 main.go:227] handling current node * * ==> kube-apiserver [1b703e6f5f478afcd551e298991648d85ebc5dee6a7e2622dc6e265a0681a294] <== * W0424 13:23:14.675521 1 genericapiserver.go:660] Skipping API flowcontrol.apiserver.k8s.io/v1alpha1 because it has no resources. W0424 13:23:14.677400 1 genericapiserver.go:660] Skipping API apps/v1beta2 because it has no resources. W0424 13:23:14.677412 1 genericapiserver.go:660] Skipping API apps/v1beta1 because it has no resources. W0424 13:23:14.678343 1 genericapiserver.go:660] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. W0424 13:23:14.678354 1 genericapiserver.go:660] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. W0424 13:23:14.679098 1 genericapiserver.go:660] Skipping API events.k8s.io/v1beta1 because it has no resources. W0424 13:23:14.688671 1 genericapiserver.go:660] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. I0424 13:23:15.047107 1 dynamic_cafile_content.go:157] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" I0424 13:23:15.047130 1 secure_serving.go:210] Serving securely on [::]:8443 I0424 13:23:15.047169 1 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" I0424 13:23:15.047239 1 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" I0424 13:23:15.047251 1 available_controller.go:494] Starting AvailableConditionController I0424 13:23:15.047254 1 cache.go:32] Waiting for caches to sync for AvailableConditionController controller I0424 13:23:15.047264 1 controller.go:80] Starting OpenAPI V3 AggregationController I0424 13:23:15.047318 1 autoregister_controller.go:141] Starting autoregister controller I0424 13:23:15.047330 1 cache.go:32] Waiting for caches to sync for autoregister controller I0424 13:23:15.047392 1 crdregistration_controller.go:111] Starting crd-autoregister controller I0424 13:23:15.047399 1 gc_controller.go:78] Starting apiserver lease garbage collector I0424 13:23:15.047445 1 dynamic_serving_content.go:132] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" I0424 13:23:15.047537 1 apf_controller.go:361] Starting API Priority and Fairness config controller I0424 13:23:15.047723 1 apiservice_controller.go:97] Starting APIServiceRegistrationController I0424 13:23:15.047753 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller I0424 13:23:15.047774 1 controller.go:83] Starting OpenAPI AggregationController I0424 13:23:15.047887 1 controller.go:121] Starting legacy_token_tracking_controller I0424 13:23:15.047902 1 shared_informer.go:273] Waiting for caches to sync for configmaps I0424 13:23:15.048027 1 customresource_discovery_controller.go:288] Starting DiscoveryController I0424 13:23:15.047400 1 shared_informer.go:273] Waiting for caches to sync for crd-autoregister I0424 13:23:15.047539 1 tlsconfig.go:240] "Starting DynamicServingCertificateController" I0424 13:23:15.047748 1 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" I0424 13:23:15.051040 1 controller.go:85] Starting OpenAPI controller I0424 13:23:15.051076 1 controller.go:85] Starting OpenAPI V3 controller I0424 13:23:15.051084 1 naming_controller.go:291] Starting NamingConditionController I0424 13:23:15.051095 1 establishing_controller.go:76] Starting EstablishingController I0424 13:23:15.051102 1 nonstructuralschema_controller.go:192] Starting NonStructuralSchemaConditionController I0424 13:23:15.051110 1 apiapproval_controller.go:186] Starting KubernetesAPIApprovalPolicyConformantConditionController I0424 13:23:15.051120 1 crd_finalizer.go:266] Starting CRDFinalizer I0424 13:23:15.047734 1 cluster_authentication_trust_controller.go:440] Starting cluster_authentication_trust_controller controller I0424 13:23:15.051129 1 shared_informer.go:273] Waiting for caches to sync for cluster_authentication_trust_controller I0424 13:23:15.047757 1 dynamic_cafile_content.go:157] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" I0424 13:23:15.100906 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io E0424 13:23:15.102252 1 controller.go:159] Error removing old endpoints from kubernetes service: no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service I0424 13:23:15.133737 1 shared_informer.go:280] Caches are synced for node_authorizer I0424 13:23:15.147656 1 cache.go:39] Caches are synced for AvailableConditionController controller I0424 13:23:15.147707 1 apf_controller.go:366] Running API Priority and Fairness config worker I0424 13:23:15.147712 1 apf_controller.go:369] Running API Priority and Fairness periodic rebalancing process I0424 13:23:15.147745 1 cache.go:39] Caches are synced for autoregister controller I0424 13:23:15.147792 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller I0424 13:23:15.148067 1 shared_informer.go:280] Caches are synced for configmaps I0424 13:23:15.148078 1 shared_informer.go:280] Caches are synced for crd-autoregister I0424 13:23:15.151445 1 shared_informer.go:280] Caches are synced for cluster_authentication_trust_controller I0424 13:23:15.914404 1 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue). I0424 13:23:16.059152 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. I0424 13:23:17.053055 1 controller.go:615] quota admission added evaluator for: serviceaccounts I0424 13:23:17.057231 1 controller.go:615] quota admission added evaluator for: daemonsets.apps I0424 13:23:17.204368 1 controller.go:615] quota admission added evaluator for: deployments.apps I0424 13:23:17.225931 1 controller.go:615] quota admission added evaluator for: roles.rbac.authorization.k8s.io I0424 13:23:17.228568 1 controller.go:615] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io I0424 13:23:17.636503 1 controller.go:615] quota admission added evaluator for: endpoints I0424 13:23:27.974152 1 controller.go:615] quota admission added evaluator for: controllerrevisions.apps I0424 13:23:28.075508 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io * * ==> kube-controller-manager [5b74edba9b8d15692fe700636e674e43e497878ef307ed6690d6dbb1260fef56] <== * I0424 13:23:27.571663 1 deployment_controller.go:154] "Starting controller" controller="deployment" I0424 13:23:27.571672 1 shared_informer.go:273] Waiting for caches to sync for deployment I0424 13:23:27.622475 1 controllermanager.go:622] Started "statefulset" I0424 13:23:27.622577 1 stateful_set.go:152] Starting stateful set controller I0424 13:23:27.622587 1 shared_informer.go:273] Waiting for caches to sync for stateful set I0424 13:23:27.675508 1 controllermanager.go:622] Started "pv-protection" I0424 13:23:27.675759 1 pv_protection_controller.go:75] Starting PV protection controller I0424 13:23:27.675779 1 shared_informer.go:273] Waiting for caches to sync for PV protection I0424 13:23:27.681070 1 shared_informer.go:273] Waiting for caches to sync for resource quota W0424 13:23:27.685182 1 actual_state_of_world.go:541] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="crio" does not exist I0424 13:23:27.691080 1 shared_informer.go:273] Waiting for caches to sync for garbage collector I0424 13:23:27.709066 1 shared_informer.go:280] Caches are synced for endpoint_slice_mirroring I0424 13:23:27.716928 1 shared_informer.go:280] Caches are synced for service account I0424 13:23:27.722965 1 shared_informer.go:280] Caches are synced for TTL I0424 13:23:27.725696 1 shared_informer.go:280] Caches are synced for TTL after finished I0424 13:23:27.745090 1 shared_informer.go:280] Caches are synced for node I0424 13:23:27.745292 1 range_allocator.go:167] Sending events to api server. I0424 13:23:27.745358 1 range_allocator.go:171] Starting range CIDR allocator I0424 13:23:27.745363 1 shared_informer.go:273] Waiting for caches to sync for cidrallocator I0424 13:23:27.745373 1 shared_informer.go:280] Caches are synced for cidrallocator I0424 13:23:27.747714 1 shared_informer.go:280] Caches are synced for ephemeral I0424 13:23:27.751862 1 shared_informer.go:280] Caches are synced for HPA I0424 13:23:27.753856 1 shared_informer.go:280] Caches are synced for taint I0424 13:23:27.754023 1 taint_manager.go:206] "Starting NoExecuteTaintManager" I0424 13:23:27.754088 1 taint_manager.go:211] "Sending events to api server" I0424 13:23:27.754101 1 node_lifecycle_controller.go:1438] Initializing eviction metric for zone: W0424 13:23:27.754196 1 node_lifecycle_controller.go:1053] Missing timestamp for Node crio. Assuming now as a timestamp. I0424 13:23:27.754237 1 node_lifecycle_controller.go:1254] Controller detected that zone is now in state Normal. I0424 13:23:27.754479 1 event.go:294] "Event occurred" object="crio" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node crio event: Registered Node crio in Controller" I0424 13:23:27.756365 1 shared_informer.go:280] Caches are synced for expand I0424 13:23:27.758058 1 shared_informer.go:280] Caches are synced for endpoint I0424 13:23:27.758188 1 shared_informer.go:280] Caches are synced for ReplicationController I0424 13:23:27.762001 1 shared_informer.go:280] Caches are synced for certificate-csrapproving I0424 13:23:27.763387 1 shared_informer.go:280] Caches are synced for daemon sets I0424 13:23:27.764676 1 shared_informer.go:280] Caches are synced for PVC protection I0424 13:23:27.768463 1 shared_informer.go:280] Caches are synced for ClusterRoleAggregator I0424 13:23:27.768592 1 shared_informer.go:280] Caches are synced for cronjob I0424 13:23:27.772194 1 shared_informer.go:280] Caches are synced for deployment I0424 13:23:27.774560 1 shared_informer.go:280] Caches are synced for namespace I0424 13:23:27.775573 1 shared_informer.go:280] Caches are synced for certificate-csrsigning-kubelet-serving I0424 13:23:27.775601 1 shared_informer.go:280] Caches are synced for certificate-csrsigning-kubelet-client I0424 13:23:27.776976 1 shared_informer.go:280] Caches are synced for certificate-csrsigning-legacy-unknown I0424 13:23:27.777007 1 shared_informer.go:280] Caches are synced for certificate-csrsigning-kube-apiserver-client I0424 13:23:27.777011 1 shared_informer.go:280] Caches are synced for PV protection I0424 13:23:27.808541 1 shared_informer.go:280] Caches are synced for endpoint_slice I0424 13:23:27.809966 1 shared_informer.go:280] Caches are synced for ReplicaSet I0424 13:23:27.813052 1 shared_informer.go:280] Caches are synced for persistent volume I0424 13:23:27.816354 1 shared_informer.go:280] Caches are synced for GC I0424 13:23:27.817796 1 shared_informer.go:280] Caches are synced for job I0424 13:23:27.822853 1 shared_informer.go:280] Caches are synced for stateful set I0424 13:23:27.852771 1 shared_informer.go:280] Caches are synced for bootstrap_signer I0424 13:23:27.854806 1 shared_informer.go:280] Caches are synced for attach detach I0424 13:23:27.916194 1 shared_informer.go:280] Caches are synced for crt configmap I0424 13:23:27.944747 1 shared_informer.go:280] Caches are synced for resource quota I0424 13:23:27.966619 1 shared_informer.go:280] Caches are synced for disruption I0424 13:23:27.979382 1 event.go:294] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-sjwj6" I0424 13:23:27.981139 1 shared_informer.go:280] Caches are synced for resource quota I0424 13:23:28.291623 1 shared_informer.go:280] Caches are synced for garbage collector I0424 13:23:28.361644 1 shared_informer.go:280] Caches are synced for garbage collector I0424 13:23:28.361667 1 garbagecollector.go:163] Garbage collector: all resource monitors have synced. Proceeding to collect garbage * * ==> kube-scheduler [6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9] <== * I0424 13:24:47.934798 1 serving.go:348] Generated self-signed cert in-memory E0424 13:24:47.935430 1 run.go:74] "command failed" err="failed to create listener: failed to listen on 127.0.0.1:10259: listen tcp 127.0.0.1:10259: bind: address already in use" * * ==> kubelet <== * -- Logs begin at Mon 2023-04-24 13:01:49 UTC, end at Mon 2023-04-24 13:25:51 UTC. -- Apr 24 13:24:08 crio kubelet[17225]: E0424 13:24:08.626590 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:24:08Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:24:08 crio kubelet[17225]: I0424 13:24:08.810549 17225 scope.go:115] "RemoveContainer" containerID="ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" Apr 24 13:24:08 crio kubelet[17225]: E0424 13:24:08.810774 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:23 crio kubelet[17225]: I0424 13:24:23.527962 17225 scope.go:115] "RemoveContainer" containerID="ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" Apr 24 13:24:23 crio kubelet[17225]: E0424 13:24:23.528162 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:24 crio kubelet[17225]: E0424 13:24:24.626221 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:24:24 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:24:24Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:24 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:24:24 crio kubelet[17225]: E0424 13:24:24.626322 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:24:24Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:24 crio kubelet[17225]: E0424 13:24:24.626346 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:24:24Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:24:36 crio kubelet[17225]: I0424 13:24:36.534202 17225 scope.go:115] "RemoveContainer" containerID="ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" Apr 24 13:24:36 crio kubelet[17225]: E0424 13:24:36.535418 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:37 crio kubelet[17225]: E0424 13:24:37.620910 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:24:37 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:24:37Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:37 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:24:37 crio kubelet[17225]: E0424 13:24:37.621023 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:24:37Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:37 crio kubelet[17225]: E0424 13:24:37.621058 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:24:37Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:24:47 crio kubelet[17225]: I0424 13:24:47.529465 17225 scope.go:115] "RemoveContainer" containerID="ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" Apr 24 13:24:48 crio kubelet[17225]: I0424 13:24:48.043107 17225 scope.go:115] "RemoveContainer" containerID="ed691447569b816fe72dc0c4962ce9b0059fa3e2cd3e60206f1c8c2c89e05766" Apr 24 13:24:48 crio kubelet[17225]: I0424 13:24:48.043219 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:24:48 crio kubelet[17225]: E0424 13:24:48.043395 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:48 crio kubelet[17225]: E0424 13:24:48.617385 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:24:48 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:24:48Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:48 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:24:48 crio kubelet[17225]: E0424 13:24:48.617483 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:24:48Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:24:48 crio kubelet[17225]: E0424 13:24:48.617507 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:24:48Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:24:49 crio kubelet[17225]: I0424 13:24:49.051962 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:24:49 crio kubelet[17225]: E0424 13:24:49.055310 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:53 crio kubelet[17225]: I0424 13:24:53.320469 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:24:53 crio kubelet[17225]: E0424 13:24:53.321380 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:24:54 crio kubelet[17225]: I0424 13:24:54.104474 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:24:54 crio kubelet[17225]: E0424 13:24:54.105886 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:25:01 crio kubelet[17225]: E0424 13:25:01.649208 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:25:01 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:25:01Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:01 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:25:01 crio kubelet[17225]: E0424 13:25:01.649282 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:25:01Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:01 crio kubelet[17225]: E0424 13:25:01.649303 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:25:01Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:25:05 crio kubelet[17225]: I0424 13:25:05.529261 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:25:05 crio kubelet[17225]: E0424 13:25:05.530477 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:25:15 crio kubelet[17225]: E0424 13:25:15.637388 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:25:15 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:25:15Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:15 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:25:15 crio kubelet[17225]: E0424 13:25:15.637485 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:25:15Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:15 crio kubelet[17225]: E0424 13:25:15.637511 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:25:15Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:25:18 crio kubelet[17225]: I0424 13:25:18.530520 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:25:18 crio kubelet[17225]: E0424 13:25:18.531922 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:25:30 crio kubelet[17225]: E0424 13:25:30.619398 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:25:30 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:25:30Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:30 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:25:30 crio kubelet[17225]: E0424 13:25:30.619491 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:25:30Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:30 crio kubelet[17225]: E0424 13:25:30.619515 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:25:30Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:25:33 crio kubelet[17225]: I0424 13:25:33.529058 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:25:33 crio kubelet[17225]: E0424 13:25:33.530257 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 Apr 24 13:25:45 crio kubelet[17225]: E0424 13:25:45.632872 17225 remote_runtime.go:302] "CreateContainer in sandbox from runtime service failed" err=< Apr 24 13:25:45 crio kubelet[17225]: rpc error: code = Unknown desc = container create failed: time="2023-04-24T13:25:45Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:45 crio kubelet[17225]: > podSandboxID="a9227eea34d580b1872c36b6c0d1d854e545124f0fe247281c7a4d9a0dfc4660" Apr 24 13:25:45 crio kubelet[17225]: E0424 13:25:45.632964 17225 kuberuntime_manager.go:872] container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.26.3,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-7ltxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod kube-proxy-hpn6m_kube-system(a5ecd065-a654-4381-b77f-19c7bdc6100b): CreateContainerError: container create failed: time="2023-04-24T13:25:45Z" level=error msg="container_linux.go:380: starting container process caused: apply caps: operation not permitted" Apr 24 13:25:45 crio kubelet[17225]: E0424 13:25:45.632989 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerError: \"container create failed: time=\\\"2023-04-24T13:25:45Z\\\" level=error msg=\\\"container_linux.go:380: starting container process caused: apply caps: operation not permitted\\\"\\n\"" pod="kube-system/kube-proxy-hpn6m" podUID=a5ecd065-a654-4381-b77f-19c7bdc6100b Apr 24 13:25:46 crio kubelet[17225]: I0424 13:25:46.528874 17225 scope.go:115] "RemoveContainer" containerID="6b722dcfa1c69725929c7961209705599748f05015dc5cd68a25eebc386d40a9" Apr 24 13:25:46 crio kubelet[17225]: E0424 13:25:46.529526 17225 pod_workers.go:965] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-scheduler pod=kube-scheduler-crio_kube-system(72bd6c45b35d341b4109d5220964f1a8)\"" pod="kube-system/kube-scheduler-crio" podUID=72bd6c45b35d341b4109d5220964f1a8 * * ==> storage-provisioner [90df1a9cedd247a53368e4136b249d384e0431eaa23911dcbf843e12eeac6211] <== * I0424 13:23:17.110572 1 storage_provisioner.go:116] Initializing the minikube storage provisioner... I0424 13:23:17.116381 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service! I0424 13:23:17.116469 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...