From d8cf0f4697b62ecbad4ef2753a51e7fa47e228d2 Mon Sep 17 00:00:00 2001 From: bobz965 Date: Mon, 23 Oct 2023 16:57:01 +0800 Subject: [PATCH 1/3] update ovn nats Signed-off-by: bobz965 --- docs/advance/ovn-eip-fip-snat.en.md | 126 ++++++++++++++++++++++++++- docs/advance/ovn-eip-fip-snat.md | 130 ++++++++++++++++++++++++++-- 2 files changed, 244 insertions(+), 12 deletions(-) diff --git a/docs/advance/ovn-eip-fip-snat.en.md b/docs/advance/ovn-eip-fip-snat.en.md index 0354e07b6..b1e2a3f96 100644 --- a/docs/advance/ovn-eip-fip-snat.en.md +++ b/docs/advance/ovn-eip-fip-snat.en.md @@ -1,22 +1,31 @@ # Support OVN EIP,FIP and SNAT ``` mermaid + graph LR +pod-->subnet-->vpc-->lrp--bind-->gw-chassis-->snat-->lsp-->external-subnet +lrp-.-peer-.-lsp -pod-->vpc1-subnet-->vpc1-->snat-->lrp-->external-subnet-->gw-node-external-nic ``` The pod access the public network based on the snat +Pod uses a centralized gateway based on Fip, and the path is similar. + ``` mermaid + graph LR -pod-->vpc1-subnet-->vpc1-->fip-->lrp-->external-subnet-->local-node-external-nic +pod-->subnet-->vpc-->lrp--bind-->local-chassis-->snat-->lsp-->external-subnet + + +lrp-.-peer-.-lsp + ``` -The pod access the public network based on the fip +Pod is based on the general flow of distributed gateway FIP (dnat_and_snat) to exit the public network. Finally, POD can exit the public network based on the public network NIC of the local node. The CRD supported by this function is basically the same as the iptable nat gw public network solution. @@ -60,6 +69,7 @@ The neutron ovn mode also has a certain static file configuration designation th ``` bash # provider-network, vlan, subnet # cat 01-provider-network.yaml + apiVersion: kubeovn.io/v1 kind: ProviderNetwork metadata: @@ -68,6 +78,7 @@ spec: defaultInterface: vlan # cat 02-vlan.yaml + apiVersion: kubeovn.io/v1 kind: Vlan metadata: @@ -77,6 +88,7 @@ spec: provider: external204 # cat 03-vlan-subnet.yaml + apiVersion: kubeovn.io/v1 kind: Subnet metadata: @@ -88,12 +100,15 @@ spec: vlan: vlan204 excludeIps: - 10.5.204.1..10.5.204.100 + ``` ### 1.2 Default vpc enable eip_snat ``` bash + # Enable the default vpc and the above underlay public provider subnet interconnection + cat 00-centralized-external-gw-no-ip.yaml apiVersion: v1 kind: ConfigMap @@ -106,6 +121,7 @@ data: type: "centralized" external-gw-nic: "vlan" external-gw-addr: "10.5.204.254/24" + ``` This feature currently supports the ability to create lrp type ovn eip resources without specifying the lrp ip and mac, which is already supported for automatic acquisition. @@ -116,12 +132,14 @@ Of course, you can also manually create the lrp type ovn eip in advance. ``` bash # cat 00-ns.yml + apiVersion: v1 kind: Namespace metadata: name: vpc1 # cat 01-vpc-ecmp-enable-external-bfd.yml + kind: Vpc apiVersion: kubeovn.io/v1 metadata: @@ -160,6 +178,7 @@ After the above template is applied, you should see the following resources exis ```bash # k ko nbctl show vpc1 + router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) port vpc1-vpc1-subnet1 mac: "00:00:00:ED:8E:C7" @@ -176,6 +195,7 @@ router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) ``` bash # k ko nbctl lr-route-list vpc1 + IPv4 Routes Route Table
: 0.0.0.0/0 10.5.204.254 dst-ip @@ -186,11 +206,12 @@ Route Table
: This function is designed and used in the same way as iptables-eip, ovn-eip currently has three types -- nat: indicates ovn dnat, fip, and snat. These nat types are recorded in status +- nat: indicates ovn dnat, fip, and snat. - lrp: indicates the resource used to connect a vpc to the public network - lsp: In the ovn BFD-based ecmp static route scenario, an ovs internal port is provided on the gateway node as the next hop of the ecmp route ``` bash + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -232,6 +253,19 @@ metadata: spec: ovnEip: eip-static ipName: vpc-1-busybox01.vpc1 # the name of the ip crd, which is unique + +-- +# Alternatively, you can specify a vpc or Intranet ip address + +kind: OvnFip +apiVersion: kubeovn.io/v1 +metadata: + name: eip-static +spec: + ovnEip: eip-static + vpc: vpc1 + v4Ip: 192.168.0.2 + ``` ``` bash @@ -255,9 +289,11 @@ rtt min/avg/max/mdev = 0.368/0.734/1.210/0.352 ms [root@pc-node-1 03-cust-vpc]# # pod <--> node ping is working + ``` ``` bash + # The key resources that this public ip can pass include the following ovn nb resources # k ko nbctl show vpc1 @@ -280,8 +316,10 @@ router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) In order to facilitate the use of some vip scenarios, such as inside kubevirt VM, keepalived use vip, kube-vip use vip, etc. the vip need public network access. ``` bash + # First create vip, eip, then bind eip to vip # cat vip.yaml + apiVersion: kubeovn.io/v1 kind: Vip metadata: @@ -290,6 +328,7 @@ spec: subnet: vpc1-subnet1 # cat 04-fip.yaml + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -308,6 +347,20 @@ spec: ovnEip: eip-for-vip ipType: vip # By default fip is for pod ip, here you need to specify the docking to vip resources ipName: test-fip-vip + +--- +# Alternatively, you can specify a vpc or Intranet ip address + +kind: OvnFip +apiVersion: kubeovn.io/v1 +metadata: + name: eip-for-vip +spec: + ovnEip: eip-for-vip + ipType: vip # By default fip is for pod ip, here you need to specify the docking to vip resources + vpc: vpc1 + v4Ip: 192.168.0.3 + ``` ``` bash @@ -362,7 +415,9 @@ tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 byt This feature is designed and used in much the same way as iptables-snat ```bash + # cat 03-subnet-snat.yaml + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -380,6 +435,19 @@ metadata: spec: ovnEip: snat-for-subnet-in-vpc vpcSubnet: vpc1-subnet1 # eip corresponds to the entire network segment + +--- +# Alternatively, you can specify a vpc and subnet cidr on an Intranet + +kind: OvnSnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: snat-for-subnet-in-vpc +spec: + ovnEip: snat-for-subnet-in-vpc + vpc: vpc1 + v4IpCidr: 192.168.0.0/24 # vpc subnet cidr or ip address + ``` ### 3.2 ovn-snat corresponds to a pod IP @@ -406,6 +474,18 @@ spec: ovnEip: snat-for-pod-vpc-ip ipName: vpc-1-busybox02.vpc1 # eip corresponds to a single pod ip +--- +# Alternatively, you can specify a vpc or Intranet ip address + +kind: OvnSnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: snat-for-subnet-in-vpc +spec: + ovnEip: snat-for-subnet-in-vpc + vpc: vpc1 + v4IpCidr: 192.168.0.4 + ``` After the above resources are created, you can see the following resources that the snat public network feature depends on. @@ -496,6 +576,7 @@ rtt min/avg/max/mdev = 22.126/22.518/22.741/0.278 ms ### 4.1 ovn-dnat binds a DNAT to a pod ```yaml + kind: OvnEip apiVersion: kubeovn.io/v1 metadata: @@ -514,6 +595,22 @@ spec: protocol: tcp internalPort: "22" externalPort: "22" + +--- +# Alternatively, you can specify a vpc or Intranet ip address + +kind: OvnDnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: eip-dnat +spec: + ovnEip: eip-dnat + protocol: tcp + internalPort: "22" + externalPort: "22" + vpc: vpc1 + v4Ip: 192.168.0.3 + ``` The configuration of OvnDnatRule is similar to that of IptablesDnatRule. @@ -532,6 +629,7 @@ eip-dnat eip-dnat tcp 10.5.49.4 192.168.0. ### 4.2 ovn-dnat binds a DNAT to a VIP ```yaml + kind: OvnDnatRule apiVersion: kubeovn.io/v1 metadata: @@ -543,6 +641,25 @@ spec: protocol: tcp internalPort: "22" externalPort: "22" + + +--- +# Alternatively, you can specify a vpc or Intranet ip address + +kind: OvnDnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: eip-dnat +spec: + ipType: vip # By default, Dnat is oriented towards pod IPs. Here, it is necessary to specify that it is connected to VIP resources + ovnEip: eip-dnat + ipName: test-dnat-vip + protocol: tcp + internalPort: "22" + externalPort: "22" + vpc: vpc1 + v4Ip: 192.168.0.4 + ``` The configuration of OvnDnatRule is similar to that of IptablesDnatRule. @@ -559,4 +676,5 @@ eip-dnat 10.5.49.4 00:00:00:4D:CE:49 dnat true # kubectl get odnat eip-dnat NAME EIP PROTOCOL V4EIP V4IP INTERNALPORT EXTERNALPORT IPNAME READY eip-dnat eip-dnat tcp 10.5.49.4 192.168.0.4 22 22 test-dnat-vip true + ``` diff --git a/docs/advance/ovn-eip-fip-snat.md b/docs/advance/ovn-eip-fip-snat.md index a628c1ab6..3afb8b373 100644 --- a/docs/advance/ovn-eip-fip-snat.md +++ b/docs/advance/ovn-eip-fip-snat.md @@ -1,22 +1,30 @@ # OVN EIP FIP SNAT DNAT 支持 ``` mermaid + graph LR +pod-->subnet-->vpc-->lrp--bind-->gw-chassis-->snat-->lsp-->external-subnet +lrp-.-peer-.-lsp -pod-->vpc1-subnet-->vpc1-->snat-->lrp-->external-subnet-->gw-node-external-nic ``` Pod 基于 SNAT 出公网的大致流程,最后是经过网关节点的公网网卡。 +Pod 基于 Fip 使用集中式网关,路径也类似。 ``` mermaid + graph LR -pod-->vpc1-subnet-->vpc1-->fip-->lrp-->external-subnet-->local-node-external-nic +pod-->subnet-->vpc-->lrp--bind-->local-chassis-->snat-->lsp-->external-subnet + + +lrp-.-peer-.-lsp + ``` -Pod 基于 FIP 出公网的大致流程,最后可以基于本地节点的公网网卡出公网。 +Pod 基于分布式网关 FIP (dnat_and_snat) 出公网的大致流程,最后可以基于本地节点的公网网卡出公网。 该功能所支持的 CRD 在使用上将和 iptable nat gw 公网方案保持基本一致。 @@ -58,13 +66,16 @@ Pod 基于 FIP 出公网的大致流程,最后可以基于本地节点的公 ``` bash # 准备 provider-network, vlan, subnet # cat 01-provider-network.yaml + apiVersion: kubeovn.io/v1 kind: ProviderNetwork metadata: name: external204 spec: defaultInterface: vlan + # cat 02-vlan.yaml + apiVersion: kubeovn.io/v1 kind: Vlan metadata: @@ -72,7 +83,9 @@ metadata: spec: id: 204 provider: external204 + # cat 03-vlan-subnet.yaml + apiVersion: kubeovn.io/v1 kind: Subnet metadata: @@ -90,7 +103,8 @@ spec: ``` bash # 启用默认 vpc 和上述 underlay 公网 provider subnet 互联 -cat 00-centralized-external-gw-no-ip.yaml +# cat 00-centralized-external-gw-no-ip.yaml + apiVersion: v1 kind: ConfigMap metadata: @@ -104,21 +118,23 @@ data: external-gw-addr: "10.5.204.254/24" # underlay 物理网关的 ip ``` -目前该功能已支持可以不指定 lrp ip 和 mac,已支持自动获取,创建 lrp 类型的 ovn eip 资源。 +目前该功能已支持可以不指定 logical router port (lrp) ip 和 mac,已支持从 underlay 公网中自动分配,创建 lrp 类型的 ovn eip 资源。 -如果指定了,则相当于指定 ip 创建 lrp 类型的 ovn-eip。 +如果指定了,则相当于以指定 ip 的方式创建了一个 lrp 类型的 ovn-eip。 当然也可以提前手动创建 lrp 类型的 ovn eip。 ### 1.3 自定义 vpc 启用 eip snat fip 功能 ``` bash # cat 00-ns.yml + apiVersion: v1 kind: Namespace metadata: name: vpc1 # cat 01-vpc-ecmp-enable-external-bfd.yml + kind: Vpc apiVersion: kubeovn.io/v1 metadata: @@ -130,6 +146,7 @@ spec: # vpc 启用 enableExternal 会自动创建 lrp 关联到上述指定的公网 # cat 02-subnet.yml + apiVersion: kubeovn.io/v1 kind: Subnet metadata: @@ -158,6 +175,7 @@ spec: ```bash # k ko nbctl show vpc1 + router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) port vpc1-vpc1-subnet1 mac: "00:00:00:ED:8E:C7" @@ -174,6 +192,7 @@ router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) ``` bash # k ko nbctl lr-route-list vpc1 + IPv4 Routes Route Table
: 0.0.0.0/0 10.5.204.254 dst-ip @@ -184,8 +203,8 @@ Route Table
: 该功能和 iptables-eip 设计和使用方式基本一致,ovn-eip 目前有三种 type -- nat: 用于 ovn dnat,fip, snat, 这些 nat 类型会记录在 status 中 -- lrp: Resources connected to the public network from a vpc can be used by nat +- nat: 是指 ovn dnat,fip, snat 这三种 nat 资源类型 +- lrp: 软路由基于该端口和 underlay 公网互联,该 lrp 端口的 ip 可以被其他 dnat snat 复用 - lsp: 用于 ovn 基于 bfd 的 ecmp 静态路由场景,在网关节点上提供一个 ovs internal port 作为 ecmp 路由的下一跳 ``` bash @@ -230,6 +249,19 @@ metadata: spec: ovnEip: eip-static ipName: vpc-1-busybox01.vpc1 # 注意这里是 ip crd 的名字,具有唯一性 + +-- +# 或者通过传统指定 vpc 以及 内网 ip 的方式 + +kind: OvnFip +apiVersion: kubeovn.io/v1 +metadata: + name: eip-static +spec: + ovnEip: eip-static + vpc: vpc1 + v4Ip: 192.168.0.2 + ``` ``` bash @@ -281,6 +313,7 @@ router 87ad06fd-71d5-4ff8-a1f0-54fa3bba1a7f (vpc1) ``` bash # 先创建 vip,eip,再将 eip 绑定到 vip # cat vip.yaml + apiVersion: kubeovn.io/v1 kind: Vip metadata: @@ -289,6 +322,7 @@ spec: subnet: vpc1-subnet1 # cat 04-fip.yaml + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -307,6 +341,20 @@ spec: ovnEip: eip-for-vip ipType: vip # 默认情况下 fip 是面向 pod ip 的,这里需要标注指定对接到 vip 资源 ipName: test-fip-vip + +--- +# 或者通过传统指定 vpc 以及 内网 ip 的方式 + +kind: OvnFip +apiVersion: kubeovn.io/v1 +metadata: + name: eip-for-vip +spec: + ovnEip: eip-for-vip + ipType: vip # 默认情况下 fip 是面向 pod ip 的,这里需要标注指定对接到 vip 资源 + vpc: vpc1 + v4Ip: 192.168.0.3 + ``` ``` bash @@ -362,6 +410,7 @@ tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 byt ```bash # cat 03-subnet-snat.yaml + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -379,6 +428,19 @@ metadata: spec: ovnEip: snat-for-subnet-in-vpc vpcSubnet: vpc1-subnet1 # eip 对应整个网段 + +--- +# 或者通过传统指定 vpc 以及 内网 subnet cidr 的方式 + +kind: OvnSnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: snat-for-subnet-in-vpc +spec: + ovnEip: snat-for-subnet-in-vpc + vpc: vpc1 + v4IpCidr: 192.168.0.0/24 # 该字段可以是 cidr 也可以是 ip + ``` ### 3.2 ovn-snat 对应到一个 pod ip @@ -387,6 +449,7 @@ spec: ```bash # cat 03-pod-snat.yaml + --- kind: OvnEip apiVersion: kubeovn.io/v1 @@ -405,6 +468,18 @@ spec: ovnEip: snat-for-pod-vpc-ip ipName: vpc-1-busybox02.vpc1 # eip 对应单个 pod ip +--- +# 或者通过传统指定 vpc 以及 内网 ip 的方式 + +kind: OvnSnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: snat-for-subnet-in-vpc +spec: + ovnEip: snat-for-subnet-in-vpc + vpc: vpc1 + v4IpCidr: 192.168.0.4 + ``` 以上资源创建后,可以看到 snat 公网功能依赖的如下资源。 @@ -495,12 +570,14 @@ rtt min/avg/max/mdev = 22.126/22.518/22.741/0.278 ms ### 4.1 ovn-dnat 为 pod 绑定一个 dnat ```yaml + kind: OvnEip apiVersion: kubeovn.io/v1 metadata: name: eip-static spec: externalSubnet: underlay + --- kind: OvnDnatRule apiVersion: kubeovn.io/v1 @@ -512,6 +589,23 @@ spec: protocol: tcp internalPort: "22" externalPort: "22" + + +--- +# 或者通过传统指定 vpc 以及 内网 ip 的方式 + +kind: OvnDnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: eip-dnat +spec: + ovnEip: eip-dnat + protocol: tcp + internalPort: "22" + externalPort: "22" + vpc: vpc1 + v4Ip: 192.168.0.3 + ``` OvnDnatRule 的配置与 IptablesDnatRule 类似 @@ -530,6 +624,22 @@ eip-dnat eip-dnat tcp 10.5.49.4 192.168.0. ### 4.2 ovn-dnat 为 vip 绑定一个 dnat ```yaml + +kind: OvnDnatRule +apiVersion: kubeovn.io/v1 +metadata: + name: eip-dnat +spec: + ipType: vip # 默认情况下 dnat 是面向 pod ip 的,这里需要标注指定对接到 vip 资源 + ovnEip: eip-dnat + ipName: test-dnat-vip + protocol: tcp + internalPort: "22" + externalPort: "22" + +--- +# 或者通过传统指定 vpc 以及 内网 ip 的方式 + kind: OvnDnatRule apiVersion: kubeovn.io/v1 metadata: @@ -541,6 +651,9 @@ spec: protocol: tcp internalPort: "22" externalPort: "22" + vpc: vpc1 + v4Ip: 192.168.0.4 + ``` OvnDnatRule 的配置与 IptablesDnatRule 类似 @@ -557,4 +670,5 @@ eip-dnat 10.5.49.4 00:00:00:4D:CE:49 dnat true # kubectl get odnat eip-dnat NAME EIP PROTOCOL V4EIP V4IP INTERNALPORT EXTERNALPORT IPNAME READY eip-dnat eip-dnat tcp 10.5.49.4 192.168.0.4 22 22 test-dnat-vip true + ``` From 558c0d5ceeb0c56dde6922d0f1a5862978e7402f Mon Sep 17 00:00:00 2001 From: bobz965 Date: Mon, 23 Oct 2023 17:18:18 +0800 Subject: [PATCH 2/3] fix lint Signed-off-by: bobz965 --- docs/advance/vpc-internal-lb.en.md | 102 ++++++++++++--------- docs/advance/vpc-internal-lb.md | 137 +++++++++++++++-------------- 2 files changed, 127 insertions(+), 112 deletions(-) diff --git a/docs/advance/vpc-internal-lb.en.md b/docs/advance/vpc-internal-lb.en.md index 21b5a5560..2fe996441 100755 --- a/docs/advance/vpc-internal-lb.en.md +++ b/docs/advance/vpc-internal-lb.en.md @@ -12,26 +12,28 @@ To address the above issues, Kube OVN introduced the `SwitchLBRule` CRD in 1.11, ## Automatically Generate Load Balancing Rules by `Selector` - Load balancing rules can be generated by `selector` automatic association with `pod` configuration through `label`. - - example of `SwitchLBRule` is as follows: - - ```yaml - apiVersion: kubeovn.io/v1 - kind: SwitchLBRule - metadata: - name: cjh-slr-nginx - spec: - vip: 1.1.1.1 - sessionAffinity: ClientIP - namespace: default - selector: - - app:nginx - ports: - - name: dns - port: 8888 - targetPort: 80 - protocol: TCP +Load balancing rules can be generated by `selector` automatic association with `pod` configuration through `label`. + +example of `SwitchLBRule` is as follows: + +```yaml + +apiVersion: kubeovn.io/v1 +kind: SwitchLBRule +metadata: + name: cjh-slr-nginx +spec: + vip: 1.1.1.1 + sessionAffinity: ClientIP + namespace: default + selector: + - app:nginx + ports: + - name: dns + port: 8888 + targetPort: 80 + protocol: TCP + ``` - usage of `selector`, `sessionAffinity`, and `port` is the same as Kubernetes Service. @@ -44,29 +46,29 @@ To address the above issues, Kube OVN introduced the `SwitchLBRule` CRD in 1.11, ## Manually Defined Load Balancing Rules by `Endpoints` - Load balancing rules can be customized configured by `endpoints`, to support scenarios where load balancing rules cannot be automatically generated through `selector`. For example, the load balancing backend is `vm` created by `kubevirt`. - - example of `SwitchLBRule` is as follows: - - ```yaml - apiVersion: kubeovn.io/v1 - kind: SwitchLBRule - metadata: - name: cjh-slr-nginx - spec: - vip: 1.1.1.1 - sessionAffinity: ClientIP - namespace: default - endpoints: - - 192.168.0.101 - - 192.168.0.102 - - 192.168.0.103 - ports: - - name: dns - port: 8888 - targetPort: 80 - protocol: TCP - ``` +Load balancing rules can be customized configured by `endpoints`, to support scenarios where load balancing rules cannot be automatically generated through `selector`. For example, the load balancing backend is `vm` created by `kubevirt`. + +example of `SwitchLBRule` is as follows: + +```yaml +apiVersion: kubeovn.io/v1 +kind: SwitchLBRule +metadata: + name: cjh-slr-nginx +spec: + vip: 1.1.1.1 + sessionAffinity: ClientIP + namespace: default + endpoints: + - 192.168.0.101 + - 192.168.0.102 + - 192.168.0.103 + ports: + - name: dns + port: 8888 + targetPort: 80 + protocol: TCP +``` - usage of `sessionAffinity`, and `port` is the same as Kubernetes Service. @@ -76,7 +78,7 @@ To address the above issues, Kube OVN introduced the `SwitchLBRule` CRD in 1.11, - `endpoints`:load balancing backend IP list. - > **attention:**If both `selector` and `endpoints` are configured, the `selector` configuration will be automatically ignored. +> **attention:**If both `selector` and `endpoints` are configured, the `selector` configuration will be automatically ignored. ## Health Check @@ -93,11 +95,14 @@ Add a health check to `SwitchLBRule` based on the health check of the `ovn` load ### Create `SwitchLBRule` ```bash + root@server:~# kubectl get po -o wide -n vulpecula NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-78d9578975-f4qn4 1/1 Running 3 4d16h 10.16.0.4 worker nginx-78d9578975-t8tm5 1/1 Running 3 4d16h 10.16.0.6 worker + # create slr + root@server:~# cat << END > slr.yaml apiVersion: kubeovn.io/v1 kind: SwitchLBRule @@ -120,20 +125,25 @@ root@server:~# kubectl apply -f slr.yaml root@server:~# kubectl get slr NAME VIP PORT(S) SERVICE AGE vulpecula-nginx 1.1.1.1 8888/TCP default/slr-vulpecula-nginx 3d21h + ``` The `vip` with the same name of the `subnet` has been created. ```bash + # vip for check + root@server:~# kubectl get vip NAME NS V4IP MAC V6IP PMAC SUBNET READY TYPE vulpecula-subnet 10.16.0.2 00:00:00:39:95:C1 vulpecula-subnet true + ``` Query the `Load_Balancer_Health_Check` and `Service_Monitor` by commands. ```bash + root@server:~# kubectl ko nbctl list Load_Balancer _uuid : 3cbb6d43-44aa-4028-962f-30d2dba9f0b8 external_ids : {} @@ -209,6 +219,7 @@ Commercial support is available at Update the service endpoints of the load balancer by deleting the `pod`. ```bash + kubectl delete po nginx-78d9578975-f4qn4 kubectl get po -o wide -n vulpecula NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES @@ -219,6 +230,7 @@ nginx-78d9578975-t8tm5 1/1 Running 3 4d16h 10.16.0.6 worker Query the `Load_Balancer_Health_Check` and `Service_Monitor` by commands, the results have undergone corresponding changes. ```bash + root@server:~# kubectl ko nbctl list Load_Balancer _uuid : 3cbb6d43-44aa-4028-962f-30d2dba9f0b8 external_ids : {} @@ -263,6 +275,7 @@ status : online Delete `SwitchLBRule` and confirm the resource status, `Load_Balancer_Health_Check` adn `Service_Monitor` has been deleted, and the corresponding `vip` has also been deleted. ```bash + root@server:~# kubectl delete -f slr.yaml switchlbrule.kubeovn.io "vulpecula-nginx" deleted root@server:~# kubectl get vip @@ -271,4 +284,5 @@ root@server:~# kubectl ko sbctl list Service_Monitor root@server:~# root@server:~# kubectl ko nbctl list Load_Balancer_Health_Check root@server:~# + ``` diff --git a/docs/advance/vpc-internal-lb.md b/docs/advance/vpc-internal-lb.md index 0e30b6734..6ed12ac3c 100755 --- a/docs/advance/vpc-internal-lb.md +++ b/docs/advance/vpc-internal-lb.md @@ -12,27 +12,29 @@ Kubernetes 提供的 Service 可以用作集群内的负载均衡, 但是在 ## `Selector` 自动生成负载均衡规则 - 通过 `selector` 可以通过 `label` 自动关联 `pod` 配置生成负载均衡规则。 - - `SwitchLBRule` 样例如下: - - ```yaml - apiVersion: kubeovn.io/v1 - kind: SwitchLBRule - metadata: - name: cjh-slr-nginx - spec: - vip: 1.1.1.1 - sessionAffinity: ClientIP - namespace: default - selector: - - app:nginx - ports: - - name: dns - port: 8888 - targetPort: 80 - protocol: TCP - ``` +通过 `selector` 可以通过 `label` 自动关联 `pod` 配置生成负载均衡规则。 + +`SwitchLBRule` 样例如下: + +```yaml + +apiVersion: kubeovn.io/v1 +kind: SwitchLBRule +metadata: + name: cjh-slr-nginx +spec: + vip: 1.1.1.1 + sessionAffinity: ClientIP + namespace: default + selector: + - app:nginx + ports: + - name: dns + port: 8888 + targetPort: 80 + protocol: TCP + +``` - `selector`, `sessionAffinity` 和 `port` 使用方式同 Kubernetes Service。 @@ -40,43 +42,42 @@ Kubernetes 提供的 Service 可以用作集群内的负载均衡, 但是在 - `namespace`:`selector` 所选择 Pod 所在命名空间。 - Kube-OVN 会根据 `SwitchLBRule` 定义选择的 Pod 得出 Pod 所在 VPC 并设置对应的 L2 LB。 +Kube-OVN 会根据 `SwitchLBRule` 定义选择的 Pod 得出 Pod 所在 VPC 并设置对应的 L2 LB。 ## `Endpoints` 自定义负载均衡规则 - 通过 `endpoints` 可以自定义负载均衡规则,用以支持无法通过 `selector` 自动生成负载均衡规则的场景,比如负载均衡后端是 `kubevirt` 创建的 `vm` 。 - - `SwitchLBRule` 样例如下: - - ```yaml - apiVersion: kubeovn.io/v1 - kind: SwitchLBRule - metadata: - name: cjh-slr-nginx - spec: - vip: 1.1.1.1 - sessionAffinity: ClientIP - namespace: default - endpoints: - - 192.168.0.101 - - 192.168.0.102 - - 192.168.0.103 - ports: - - name: dns - port: 8888 - targetPort: 80 - protocol: TCP - ``` +通过 `endpoints` 可以自定义负载均衡规则,用以支持无法通过 `selector` 自动生成负载均衡规则的场景,比如负载均衡后端是 `kubevirt` 创建的 `vm` 。 -- `sessionAffinity` 和 `port` 使用方式同 Kubernetes Service。 +`SwitchLBRule` 样例如下: -- `vip`:自定义负载均衡的 IP 地址。 +```yaml -- `namespace`:`selector` 所选择 Pod 所在命名空间。 +apiVersion: kubeovn.io/v1 +kind: SwitchLBRule +metadata: + name: cjh-slr-nginx +spec: + vip: 1.1.1.1 + sessionAffinity: ClientIP + namespace: default + endpoints: + - 192.168.0.101 + - 192.168.0.102 + - 192.168.0.103 + ports: + - name: dns + port: 8888 + targetPort: 80 + protocol: TCP + +``` +- `sessionAffinity` 和 `port` 使用方式同 Kubernetes Service。 +- `vip`:自定义负载均衡的 IP 地址。 +- `namespace`:`selector` 所选择 Pod 所在命名空间。 - `endpoints`:负载均衡后端 IP 列表。 - - > **注:**如果同时配置了 `selector` 和 `endpoints`,会自动忽略`selector`配置。 + +如果同时配置了 `selector` 和 `endpoints`, 会自动忽略 `selector` 配置。 ## 健康检查 @@ -87,16 +88,18 @@ Kubernetes 提供的 Service 可以用作集群内的负载均衡, 但是在 根据 `ovn` 负载均衡器的运行状况检查,对 `SwitchLBRule` 添加健康检查。在创建 `SwitchLBRule` 的同时,从对应的 `VPC` 和 `subnet` 中获取一个可复用的 `vip` 作为检测端点,并添加对应的 `ip_port_mappings` 和 `load_balancer_health_check` 到对应的负载均衡器上。 -> - 检测端点 `vip` 会自动在对应的 `subnet` 中判断是否存在,并且与 `subnet` 同名,如果不存在则会自动创建,并且在所有关联的 `SwitchLBRule` 被删除后自动被删除。 -> - 暂时只支持通过 `Selector` 自动生成的负载均衡规则 +- 检测端点 `vip` 会自动在对应的 `subnet` 中判断是否存在,并且与 `subnet` 同名,如果不存在则会自动创建,并且在所有关联的 `SwitchLBRule` 被删除后自动被删除。 +- 暂时只支持通过 `Selector` 自动生成的负载均衡规则 ### 创建负载均衡规则 ```bash + root@server:~# kubectl get po -o wide -n vulpecula NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-78d9578975-f4qn4 1/1 Running 3 4d16h 10.16.0.4 worker nginx-78d9578975-t8tm5 1/1 Running 3 4d16h 10.16.0.6 worker + # 创建 slr root@server:~# cat << END > slr.yaml apiVersion: kubeovn.io/v1 @@ -120,20 +123,25 @@ root@server:~# kubectl apply -f slr.yaml root@server:~# kubectl get slr NAME VIP PORT(S) SERVICE AGE vulpecula-nginx 1.1.1.1 8888/TCP default/slr-vulpecula-nginx 3d21h + ``` 可以看到与 `subnet` 同名的 `vip` 已经被创建。 ```bash + # 查看检测端点 vip + root@server:~# kubectl get vip NAME NS V4IP MAC V6IP PMAC SUBNET READY TYPE vulpecula-subnet 10.16.0.2 00:00:00:39:95:C1 vulpecula-subnet true + ``` 通过命令可以查询到对应的 `Load_Balancer_Health_Check` 和 `Service_Monitor`。 ```bash + root@server:~# kubectl ko nbctl list Load_Balancer _uuid : 3cbb6d43-44aa-4028-962f-30d2dba9f0b8 external_ids : {} @@ -173,35 +181,22 @@ protocol : tcp src_ip : "10.16.0.2" src_mac : "c6:d4:b8:08:54:e7" status : online + ``` 此时通过负载均衡 `vip` 可以成功得到服务响应。 ```bash + root@server:~# kubectl exec -it -n vulpecula nginx-78d9578975-t8tm5 -- curl 1.1.1.1:8888 Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and -working. Further configuration is required.

- -

For online documentation and support please refer to -nginx.org.
-Commercial support is available at -nginx.com.

-

Thank you for using nginx.

+ ``` ### 更新负载均衡服务终端 @@ -209,16 +204,19 @@ Commercial support is available at 通过删除 `pod` 更新负载均衡器的服务终端。 ```bash + kubectl delete po nginx-78d9578975-f4qn4 kubectl get po -o wide -n vulpecula NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-78d9578975-lxmvh 1/1 Running 0 31s 10.16.0.8 worker nginx-78d9578975-t8tm5 1/1 Running 3 4d16h 10.16.0.6 worker + ``` 通过命令可以查询到对应的 `Load_Balancer_Health_Check` 和 `Service_Monitor` 已经发生了响应的变化。 ```bash + root@server:~# kubectl ko nbctl list Load_Balancer _uuid : 3cbb6d43-44aa-4028-962f-30d2dba9f0b8 external_ids : {} @@ -258,11 +256,13 @@ protocol : tcp src_ip : "10.16.0.2" src_mac : "c6:d4:b8:08:54:e7" status : online + ``` 删除 `SwitchLBRule`,并确认资源状态,可以看到 `Load_Balancer_Health_Check` 和 `Service_Monitor` 都已经被删除,并且对应的 `vip` 也被删除。 ```bash + root@server:~# kubectl delete -f slr.yaml switchlbrule.kubeovn.io "vulpecula-nginx" deleted root@server:~# kubectl get vip @@ -271,4 +271,5 @@ root@server:~# kubectl ko sbctl list Service_Monitor root@server:~# root@server:~# kubectl ko nbctl list Load_Balancer_Health_Check root@server:~# + ``` From d4f394a2f4c68fb1b26760e3fd3143595f44d173 Mon Sep 17 00:00:00 2001 From: bobz965 Date: Mon, 23 Oct 2023 17:47:13 +0800 Subject: [PATCH 3/3] rename Signed-off-by: bobz965 --- .../{vpc-dns.en.md => vpc-internal-dns.en.md} | 2 +- .../{vpc-dns.md => vpc-internal-dns.md} | 2 +- docs/advance/vpc-internal-lb.en.md | 30 +++++-------------- docs/advance/vpc-internal-lb.md | 0 docs/reference/feature-stage.md | 3 +- docs/reference/kube-ovn-api.en.md | 4 +-- docs/reference/kube-ovn-api.md | 2 +- mkdocs.yml | 4 +-- 8 files changed, 15 insertions(+), 32 deletions(-) rename docs/advance/{vpc-dns.en.md => vpc-internal-dns.en.md} (99%) rename docs/advance/{vpc-dns.md => vpc-internal-dns.md} (99%) mode change 100755 => 100644 docs/advance/vpc-internal-lb.en.md mode change 100755 => 100644 docs/advance/vpc-internal-lb.md diff --git a/docs/advance/vpc-dns.en.md b/docs/advance/vpc-internal-dns.en.md similarity index 99% rename from docs/advance/vpc-dns.en.md rename to docs/advance/vpc-internal-dns.en.md index 1ca3d1f08..8fdc3bff6 100644 --- a/docs/advance/vpc-dns.en.md +++ b/docs/advance/vpc-internal-dns.en.md @@ -1,4 +1,4 @@ -# Custom VPC DNS +# Custom VPC Internal DNS Due to the isolation of the user-defined VPC and the default VPC network, the coredns deployed in the default VPC cannot be accessed from within the custom VPC. If you wish to use the intra-cluster domain name resolution capability provided by Kubernetes within your custom VPC, you can refer to this document and utilize the vpc-dns CRD to do so. diff --git a/docs/advance/vpc-dns.md b/docs/advance/vpc-internal-dns.md similarity index 99% rename from docs/advance/vpc-dns.md rename to docs/advance/vpc-internal-dns.md index 70cae4fb0..7ca926844 100644 --- a/docs/advance/vpc-dns.md +++ b/docs/advance/vpc-internal-dns.md @@ -1,4 +1,4 @@ -# 自定义 VPC DNS +# 自定义 VPC 内部 DNS 由于用户自定义 VPC 和 默认 VPC 网络相互隔离,自定 VPC 内无法访问到部署在默认 VPC 内的 coredns。 如果用户希望在自定义 VPC 内使用 Kubernetes 提供的集群内域名解析能力,可以参考本文档,利用 `vpc-dns` CRD 来实现。 diff --git a/docs/advance/vpc-internal-lb.en.md b/docs/advance/vpc-internal-lb.en.md old mode 100755 new mode 100644 index 2fe996441..d71d3413b --- a/docs/advance/vpc-internal-lb.en.md +++ b/docs/advance/vpc-internal-lb.en.md @@ -37,12 +37,10 @@ spec: ``` - usage of `selector`, `sessionAffinity`, and `port` is the same as Kubernetes Service. - - `vip`:customize load balancing IP address. - - `namespace`:namespace of the `pod` selected by `selector`. - Kube OVN will determine the VPC of the selected `pod` based on the `SwitchLBRule` definition and set the corresponding L2 LB. +Kube OVN will determine the VPC of the selected `pod` based on the `SwitchLBRule` definition and set the corresponding L2 LB. ## Manually Defined Load Balancing Rules by `Endpoints` @@ -70,15 +68,13 @@ spec: protocol: TCP ``` -- usage of `sessionAffinity`, and `port` is the same as Kubernetes Service. +usage of `sessionAffinity`, and `port` is the same as Kubernetes Service. - `vip`:customize load balancing IP address. - - `namespace`:namespace of the `pod` selected by `selector`. - - `endpoints`:load balancing backend IP list. -> **attention:**If both `selector` and `endpoints` are configured, the `selector` configuration will be automatically ignored. +If both `selector` and `endpoints` are configured, the `selector` configuration will be automatically ignored. ## Health Check @@ -121,6 +117,7 @@ spec: targetPort: 80 protocol: TCP END + root@server:~# kubectl apply -f slr.yaml root@server:~# kubectl get slr NAME VIP PORT(S) SERVICE AGE @@ -183,6 +180,7 @@ protocol : tcp src_ip : "10.16.0.2" src_mac : "c6:d4:b8:08:54:e7" status : online + ``` At this point, the service response can be successfully obtained through load balancer `vip`. @@ -193,22 +191,6 @@ root@server:~# kubectl exec -it -n vulpecula nginx-78d9578975-t8tm5 -- curl 1.1. Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and -working. Further configuration is required.

- -

For online documentation and support please refer to -nginx.org.
-Commercial support is available at -nginx.com.

-

Thank you for using nginx.

@@ -225,6 +207,7 @@ kubectl get po -o wide -n vulpecula NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-78d9578975-lxmvh 1/1 Running 0 31s 10.16.0.8 worker nginx-78d9578975-t8tm5 1/1 Running 3 4d16h 10.16.0.6 worker + ``` Query the `Load_Balancer_Health_Check` and `Service_Monitor` by commands, the results have undergone corresponding changes. @@ -270,6 +253,7 @@ protocol : tcp src_ip : "10.16.0.2" src_mac : "c6:d4:b8:08:54:e7" status : online + ``` Delete `SwitchLBRule` and confirm the resource status, `Load_Balancer_Health_Check` adn `Service_Monitor` has been deleted, and the corresponding `vip` has also been deleted. diff --git a/docs/advance/vpc-internal-lb.md b/docs/advance/vpc-internal-lb.md old mode 100755 new mode 100644 diff --git a/docs/reference/feature-stage.md b/docs/reference/feature-stage.md index f12b4deef..8f6bae3fa 100644 --- a/docs/reference/feature-stage.md +++ b/docs/reference/feature-stage.md @@ -75,6 +75,5 @@ | [StatefulSet 固定 IP](../guide/static-ip-mac.md) | true | GA | 1.8 | | | [VM 固定 IP](../guide/static-ip-mac.md) | false | Beta | 1.9 | | | [默认 VPC Load Balancer 类型 Service](../guide/loadbalancer-service.md) | false | Alpha | 1.11 | | -| [自定义 VPC 内部负载均衡](../guide/loadbalancer-service.md) | false | Alpha | 1.11 | | -| [自定义 VPC DNS](../guide/loadbalancer-service.md) | false | Alpha | 1.11 | | +| [自定义 VPC 内部 DNS](../advance/vpc-internal-dns.md) | false | Alpha | 1.11 | | | [Underlay 和 Overlay 互通](../start/underlay.md) | false | Alpha | 1.11 | | diff --git a/docs/reference/kube-ovn-api.en.md b/docs/reference/kube-ovn-api.en.md index 2c63bc127..4c2e309d8 100644 --- a/docs/reference/kube-ovn-api.en.md +++ b/docs/reference/kube-ovn-api.en.md @@ -414,7 +414,7 @@ The meaning of the above tolerance fields can be found in the official Kubernete | conditions | []VpcDnsCondition | VpcDns status change information, refer to the beginning of the document for the definition of Condition | | active | Bool | Whether VpcDns is in use | -For detailed documentation on the use of VpcDns, see [Customizing VPC DNS](../advance/vpc-dns.md). +For detailed documentation on the use of VpcDns, see [Customizing VPC DNS](../advance/vpc-internal-dns.md). ### SwitchLBRule @@ -436,7 +436,7 @@ For detailed documentation on the use of VpcDns, see [Customizing VPC DNS](../ad | sessionAffinity | String | Standard Kubernetes service sessionAffinity value | | ports | []SlrPort | List of SwitchLBRule ports | -For detailed configuration information of SwitchLBRule, you can refer to [Customizing VPC Internal Load Balancing](../advance/vpc-internal-lb.md). +For detailed configuration information of SwitchLBRule, you can refer to [Customizing VPC Internal Load Balancing health check](../advance/vpc-internal-lb.md). ##### SlrPort diff --git a/docs/reference/kube-ovn-api.md b/docs/reference/kube-ovn-api.md index 8ad1224cc..5115d5e56 100644 --- a/docs/reference/kube-ovn-api.md +++ b/docs/reference/kube-ovn-api.md @@ -414,7 +414,7 @@ | conditions | []VpcDnsCondition | VpcDns 状态变化信息,具体字段参考文档开头 Condition 定义 | | active | Bool | VpcDns 是否正在使用 | -VpcDns 的详细使用文档,可以参考 [自定义 VPC DNS](../advance/vpc-dns.md)。 +VpcDns 的详细使用文档,可以参考 [自定义 VPC 内部 DNS](../advance/vpc-internal-dns.md)。 ### SwitchLBRule diff --git a/mkdocs.yml b/mkdocs.yml index e12086860..e0e7cc5eb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -53,7 +53,7 @@ nav: - DHCP 设置: 'advance/dhcp.md' - VPC 互联: 'advance/vpc-peering.md' - 自定义 VPC 内部负载均衡: 'advance/vpc-internal-lb.md' - - 自定义 VPC DNS: 'advance/vpc-dns.md' + - 自定义 VPC 内部 DNS: 'advance/vpc-internal-dns.md' - 外部网关设置: 'advance/external-gateway.md' - VIP 预留设置: 'advance/vip.md' - Mellanox 网卡 Offload 支持: 'advance/offload-mellanox.md' @@ -204,7 +204,7 @@ plugins: Iptables 规则: Iptables Rules kube-ovn-pinger 参数描述: kube-ovn-pinger args description 自定义 VPC 内部负载均衡: VPC Internal Load Balancer - 自定义 VPC DNS: VPC DNS + 自定义 VPC 内部 DNS: VPC Internal DNS Kube-OVN 接口规范: Kube-OVN API Reference SecurityGroup 使用: SecurityGroup Usage OVN IPsec 支持: OVN IPsec Support