From 16cb2440675c5e3c4575b4d11961a23dd97cf998 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 9 Jun 2017 16:19:10 -0400 Subject: [PATCH] Use GC rather than refcounting for VNID policy rules --- pkg/sdn/plugin/multitenant.go | 77 +++++------------ pkg/sdn/plugin/networkpolicy.go | 41 ++++------ pkg/sdn/plugin/node.go | 19 +++-- pkg/sdn/plugin/ovscontroller.go | 62 ++++++++++++++ pkg/sdn/plugin/ovscontroller_test.go | 118 +++++++++++++++++++++++++++ pkg/sdn/plugin/pod_linux.go | 5 +- pkg/sdn/plugin/singletenant.go | 4 +- 7 files changed, 231 insertions(+), 95 deletions(-) diff --git a/pkg/sdn/plugin/multitenant.go b/pkg/sdn/plugin/multitenant.go index 0820f9831f9f..9e0fc922b78d 100644 --- a/pkg/sdn/plugin/multitenant.go +++ b/pkg/sdn/plugin/multitenant.go @@ -15,13 +15,13 @@ type multiTenantPlugin struct { node *OsdnNode vnids *nodeVNIDMap - vnidRefsLock sync.Mutex - vnidRefs map[uint32]int + vnidInUseLock sync.Mutex + vnidInUse map[uint32]bool } func NewMultiTenantPlugin() osdnPolicy { return &multiTenantPlugin{ - vnidRefs: make(map[uint32]int), + vnidInUse: make(map[uint32]bool), } } @@ -66,14 +66,10 @@ func (mp *multiTenantPlugin) updatePodNetwork(namespace string, oldNetID, netID } if oldNetID != netID { - movedVNIDRefs := 0 - // Update OF rules for the existing/old pods in the namespace for _, pod := range pods { err = mp.node.UpdatePod(pod) - if err == nil { - movedVNIDRefs++ - } else { + if err != nil { glog.Errorf("Could not update pod %q in namespace %q: %v", pod.Name, namespace, err) } } @@ -86,12 +82,10 @@ func (mp *multiTenantPlugin) updatePodNetwork(namespace string, oldNetID, netID mp.node.DeleteServiceRules(&svc) mp.node.AddServiceRules(&svc, netID) - movedVNIDRefs++ } - if movedVNIDRefs > 0 { - mp.moveVNIDRefs(movedVNIDRefs, oldNetID, netID) - } + mp.EnsureVNIDRules(netID) + mp.RemoveVNIDRules(oldNetID) // Update namespace references in egress firewall rules mp.node.UpdateEgressNetworkPolicyVNID(namespace, oldNetID, netID) @@ -126,18 +120,19 @@ func (mp *multiTenantPlugin) GetMulticastEnabled(vnid uint32) bool { return mp.vnids.GetMulticastEnabled(vnid) } -func (mp *multiTenantPlugin) RefVNID(vnid uint32) { +func (mp *multiTenantPlugin) EnsureVNIDRules(vnid uint32) { if vnid == 0 { return } - mp.vnidRefsLock.Lock() - defer mp.vnidRefsLock.Unlock() - mp.vnidRefs[vnid] += 1 - if mp.vnidRefs[vnid] > 1 { + mp.vnidInUseLock.Lock() + defer mp.vnidInUseLock.Unlock() + if mp.vnidInUse[vnid] { return } - glog.V(5).Infof("RefVNID %d adding rule", vnid) + mp.vnidInUse[vnid] = true + + glog.V(5).Infof("EnsureVNIDRules %d - adding rules", vnid) otx := mp.node.oc.NewTransaction() otx.AddFlow("table=80, priority=100, reg0=%d, reg1=%d, actions=output:NXM_NX_REG2[]", vnid, vnid) @@ -146,52 +141,20 @@ func (mp *multiTenantPlugin) RefVNID(vnid uint32) { } } -func (mp *multiTenantPlugin) UnrefVNID(vnid uint32) { +func (mp *multiTenantPlugin) RemoveVNIDRules(vnid uint32) { if vnid == 0 { return } - mp.vnidRefsLock.Lock() - defer mp.vnidRefsLock.Unlock() - if mp.vnidRefs[vnid] == 0 { - glog.Warningf("refcounting error on vnid %d", vnid) - return - } - mp.vnidRefs[vnid] -= 1 - if mp.vnidRefs[vnid] > 0 { - return - } - glog.V(5).Infof("UnrefVNID %d removing rule", vnid) + mp.vnidInUseLock.Lock() + defer mp.vnidInUseLock.Unlock() + mp.vnidInUse[vnid] = false + + glog.V(5).Infof("RemoveVNIDRules %d", vnid) otx := mp.node.oc.NewTransaction() - otx.DeleteFlows("table=80, reg0=%d, reg1=%d", vnid, vnid) + otx.DeleteFlows("table=80, reg1=%d", vnid) if err := otx.EndTransaction(); err != nil { glog.Errorf("Error deleting OVS flow for VNID: %v", err) } } - -func (mp *multiTenantPlugin) moveVNIDRefs(num int, oldVNID, newVNID uint32) { - glog.V(5).Infof("moveVNIDRefs %d -> %d", oldVNID, newVNID) - - mp.vnidRefsLock.Lock() - defer mp.vnidRefsLock.Unlock() - - otx := mp.node.oc.NewTransaction() - if mp.vnidRefs[oldVNID] <= num { - otx.DeleteFlows("table=80, reg0=%d, reg1=%d", oldVNID, oldVNID) - } - if mp.vnidRefs[newVNID] == 0 { - otx.AddFlow("table=80, priority=100, reg0=%d, reg1=%d, actions=output:NXM_NX_REG2[]", newVNID, newVNID) - } - err := otx.EndTransaction() - if err != nil { - glog.Errorf("Error modifying OVS flows for VNID: %v", err) - } - - mp.vnidRefs[oldVNID] -= num - if mp.vnidRefs[oldVNID] < 0 { - glog.Warningf("refcounting error on vnid %d", oldVNID) - mp.vnidRefs[oldVNID] = 0 - } - mp.vnidRefs[newVNID] += num -} diff --git a/pkg/sdn/plugin/networkpolicy.go b/pkg/sdn/plugin/networkpolicy.go index 891d6a5322c0..0785946bbd70 100644 --- a/pkg/sdn/plugin/networkpolicy.go +++ b/pkg/sdn/plugin/networkpolicy.go @@ -47,7 +47,6 @@ type npNamespace struct { name string vnid uint32 isolated bool - refs int inUse bool policies map[ktypes.UID]*npPolicy @@ -121,7 +120,7 @@ func (np *networkPolicyPlugin) initNamespaces() error { name: ns.Name, vnid: vnid, isolated: namespaceIsIsolated(&ns), - refs: 0, + inUse: false, policies: make(map[ktypes.UID]*npPolicy), } } @@ -164,7 +163,7 @@ func (np *networkPolicyPlugin) AddNetNamespace(netns *osapi.NetNamespace) { name: netns.NetName, vnid: netns.NetID, isolated: isolated, - refs: 0, + inUse: false, policies: make(map[ktypes.UID]*npPolicy), } } @@ -197,15 +196,10 @@ func (np *networkPolicyPlugin) GetMulticastEnabled(vnid uint32) bool { } func (np *networkPolicyPlugin) syncNamespace(npns *npNamespace) { - inUse := npns.refs > 0 - if !inUse && !npns.inUse { - return - } - glog.V(5).Infof("syncNamespace %d", npns.vnid) otx := np.node.oc.NewTransaction() otx.DeleteFlows("table=80, reg1=%d", npns.vnid) - if inUse { + if npns.inUse { if npns.isolated { for _, npp := range npns.policies { for _, flow := range npp.flows { @@ -219,23 +213,22 @@ func (np *networkPolicyPlugin) syncNamespace(npns *npNamespace) { if err := otx.EndTransaction(); err != nil { glog.Errorf("Error syncing OVS flows for VNID: %v", err) } - npns.inUse = inUse } -func (np *networkPolicyPlugin) RefVNID(vnid uint32) { +func (np *networkPolicyPlugin) EnsureVNIDRules(vnid uint32) { np.lock.Lock() defer np.lock.Unlock() npns, exists := np.namespaces[vnid] - if !exists { + if !exists || npns.inUse { return } - npns.refs += 1 + npns.inUse = true np.syncNamespace(npns) } -func (np *networkPolicyPlugin) UnrefVNID(vnid uint32) { +func (np *networkPolicyPlugin) RemoveVNIDRules(vnid uint32) { np.lock.Lock() defer np.lock.Unlock() @@ -243,12 +236,8 @@ func (np *networkPolicyPlugin) UnrefVNID(vnid uint32) { if !exists { return } - if npns.refs == 0 { - glog.Warningf("refcounting error on vnid %d", vnid) - return - } - npns.refs -= 1 + npns.inUse = false np.syncNamespace(npns) } @@ -408,11 +397,15 @@ func (np *networkPolicyPlugin) watchNetworkPolicies() { switch delta.Type { case cache.Sync, cache.Added, cache.Updated: if changed := np.updateNetworkPolicy(npns, policy); changed { - np.syncNamespace(npns) + if npns.inUse { + np.syncNamespace(npns) + } } case cache.Deleted: delete(npns.policies, policy.UID) - np.syncNamespace(npns) + if npns.inUse { + np.syncNamespace(npns) + } } return nil @@ -522,7 +515,9 @@ func (np *networkPolicyPlugin) handleAddOrUpdateNamespace(obj, _ interface{}, ev np.kNamespaces[ns.Name] = *ns if npns, exists := np.namespaces[vnid]; exists { npns.isolated = namespaceIsIsolated(ns) - np.syncNamespace(npns) + if npns.inUse { + np.syncNamespace(npns) + } } // else the NetNamespace doesn't exist yet, but we will initialize // npns.isolated from the kapi.Namespace when it's created @@ -555,7 +550,7 @@ func (np *networkPolicyPlugin) refreshNetworkPolicies(watchResourceName Resource } } } - if changed { + if changed && npns.inUse { np.syncNamespace(npns) } } diff --git a/pkg/sdn/plugin/node.go b/pkg/sdn/plugin/node.go index 5abca60202ff..9cc4e3b89599 100644 --- a/pkg/sdn/plugin/node.go +++ b/pkg/sdn/plugin/node.go @@ -45,8 +45,8 @@ type osdnPolicy interface { GetNamespaces(vnid uint32) []string GetMulticastEnabled(vnid uint32) bool - RefVNID(vnid uint32) - UnrefVNID(vnid uint32) + EnsureVNIDRules(vnid uint32) + RemoveVNIDRules(vnid uint32) } type OsdnNode struct { @@ -284,11 +284,13 @@ func (node *OsdnNode) Start() error { continue } if vnid, err := node.policy.GetVNID(p.Namespace); err == nil { - node.policy.RefVNID(vnid) + node.policy.EnsureVNIDRules(vnid) } } } + go kwait.Forever(node.syncVNIDRules, 24 * time.Hour) + log.V(5).Infof("openshift-sdn network plugin ready") node.markPodNetworkReady() @@ -387,18 +389,17 @@ func (node *OsdnNode) handleAddOrUpdateService(obj, oldObj interface{}, eventTyp } node.AddServiceRules(serv, netid) - if !exists { - node.policy.RefVNID(netid) - } + node.policy.EnsureVNIDRules(netid) } func (node *OsdnNode) handleDeleteService(obj interface{}) { serv := obj.(*kapi.Service) log.V(5).Infof("Watch %s event for Service %q", watch.Deleted, serv.Name) node.DeleteServiceRules(serv) +} - netid, err := node.policy.GetVNID(serv.Namespace) - if err == nil { - node.policy.UnrefVNID(netid) +func (node *OsdnNode) syncVNIDRules() { + for _, vnid := range node.oc.FindUnusedVNIDs() { + node.policy.RemoveVNIDRules(uint32(vnid)) } } diff --git a/pkg/sdn/plugin/ovscontroller.go b/pkg/sdn/plugin/ovscontroller.go index 8cf5b22e21ef..67d9f877dd4e 100644 --- a/pkg/sdn/plugin/ovscontroller.go +++ b/pkg/sdn/plugin/ovscontroller.go @@ -13,6 +13,7 @@ import ( osapi "github.com/openshift/origin/pkg/sdn/api" "github.com/openshift/origin/pkg/util/ovs" + "k8s.io/apimachinery/pkg/util/sets" kapi "k8s.io/kubernetes/pkg/api" ) @@ -540,3 +541,64 @@ func (oc *ovsController) UpdateVXLANMulticastFlows(remoteIPs []string) error { return otx.EndTransaction() } + +// FindUnusedVNIDs returns a list of VNIDs for which there are table 80 "check" rules, +// but no table 60/70 "load" rules (meaning that there are no longer any pods or services +// on this node with that VNID). There is no locking with respect to other ovsController +// actions, but as long the "add a pod" and "add a service" codepaths add the +// pod/service-specific rules before they call policy.EnsureVNIDRules(), then there is no +// race condition. +func (oc *ovsController) FindUnusedVNIDs() []int { + flows, err := oc.ovs.DumpFlows() + if err != nil { + glog.Errorf("FindUnusedVNIDs: could not DumpFlows: %v", err) + return nil + } + + // inUseVNIDs is the set of VNIDs in use by pods or services on this node. + // policyVNIDs is the set of VNIDs that we have rules for delivering to. + inUseVNIDs := sets.NewInt() + policyVNIDs := sets.NewInt() + for _, flow := range flows { + parsed, err := ovs.ParseFlow(ovs.ParseForDump, flow) + if err != nil { + glog.Warningf("FindUnusedVNIDs: could not parse flow %q: %v", flow, err) + continue + } + + // A VNID is in use if there is a table 60 (services) or 70 (pods) flow that + // loads that VNID into reg1 for later comparison. + if parsed.Table == 60 || parsed.Table == 70 { + // Can't use FindAction here since there may be multiple "load"s + for _, action := range parsed.Actions { + if action.Name != "load" || strings.Index(action.Value, "REG1") == -1 { + continue + } + vnidEnd := strings.Index(action.Value, "->") + if vnidEnd == -1 { + continue + } + vnid, err := strconv.ParseInt(action.Value[:vnidEnd], 0, 32) + if err != nil { + glog.Warningf("FindUnusedVNIDs: could not parse VNID in 'load:%s': %v", action.Value, err) + continue + } + inUseVNIDs.Insert(int(vnid)) + break + } + } + + // A VNID is checked by policy if there is a table 80 rule comparing reg1 to it. + if field, exists := parsed.FindField("reg1"); exists { + vnid, err := strconv.ParseInt(field.Value, 0, 32) + if err != nil { + glog.Warningf("FindUnusedVNIDs: could not parse VNID in 'reg1=%s': %v", field.Value, err) + continue + } + policyVNIDs.Insert(int(vnid)) + } + + } + + return policyVNIDs.Difference(inUseVNIDs).UnsortedList() +} diff --git a/pkg/sdn/plugin/ovscontroller_test.go b/pkg/sdn/plugin/ovscontroller_test.go index 1590fd08b740..6990626c17eb 100644 --- a/pkg/sdn/plugin/ovscontroller_test.go +++ b/pkg/sdn/plugin/ovscontroller_test.go @@ -3,6 +3,7 @@ package plugin import ( "fmt" "reflect" + "sort" "strings" "testing" @@ -980,3 +981,120 @@ func TestAlreadySetUp(t *testing.T) { } } } + +func TestSyncVNIDRules(t *testing.T) { + testcases := []struct { + flows []string + unused []int + }{ + { + /* Both VNIDs have 1 pod and 1 service, so they stay */ + flows: []string{ + "table=60,priority=200,reg0=0 actions=output:2", + "table=60,priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,ip,nw_dst=172.30.156.103,nw_frag=later actions=load:0xcb81e9->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,ip,nw_dst=172.30.76.192,nw_frag=later actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.156.103,tp_dst=5454 actions=load:0xcb81e9->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5454 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5455 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=0 actions=drop", + "table=70,priority=100,ip,nw_dst=10.129.0.2 actions=load:0x55fac->NXM_NX_REG1[],load:0x3->NXM_NX_REG2[],goto_table:80", + "table=70,priority=100,ip,nw_dst=10.129.0.3 actions=load:0xcb81e9->NXM_NX_REG1[],load:0x4->NXM_NX_REG2[],goto_table:80", + "table=70,priority=0 actions=drop", + "table=80,priority=300,ip,nw_src=10.129.0.1 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg0=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg1=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0x55fac,reg1=0x55fac actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0xcb81e9,reg1=0xcb81e9 actions=output:NXM_NX_REG2[]", + "table=80,priority=0 actions=drop", + }, + unused: []int{}, + }, + { + /* 0xcb81e9 has just a pod, 0x55fac has just a service, both stay */ + flows: []string{ + "table=60,priority=200,reg0=0 actions=output:2", + "table=60,priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,ip,nw_dst=172.30.76.192,nw_frag=later actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5454 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5455 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=0 actions=drop", + "table=70,priority=100,ip,nw_dst=10.129.0.3 actions=load:0xcb81e9->NXM_NX_REG1[],load:0x4->NXM_NX_REG2[],goto_table:80", + "table=70,priority=0 actions=drop", + "table=80,priority=300,ip,nw_src=10.129.0.1 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg0=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg1=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0x55fac,reg1=0x55fac actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0xcb81e9,reg1=0xcb81e9 actions=output:NXM_NX_REG2[]", + "table=80,priority=0 actions=drop", + }, + unused: []int{}, + }, + { + /* 0xcb81e9 gets GCed, 0x55fac stays */ + flows: []string{ + "table=60,priority=200,reg0=0 actions=output:2", + "table=60,priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,ip,nw_dst=172.30.76.192,nw_frag=later actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5454 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.76.192,tp_dst=5455 actions=load:0x55fac->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=0 actions=drop", + "table=70,priority=100,ip,nw_dst=10.129.0.2 actions=load:0x55fac->NXM_NX_REG1[],load:0x3->NXM_NX_REG2[],goto_table:80", + "table=70,priority=0 actions=drop", + "table=80,priority=300,ip,nw_src=10.129.0.1 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg0=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg1=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0x55fac,reg1=0x55fac actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0xcb81e9,reg1=0xcb81e9 actions=output:NXM_NX_REG2[]", + "table=80,priority=0 actions=drop", + }, + unused: []int{0xcb81e9}, + }, + { + /* Both get GCed */ + flows: []string{ + "table=60,priority=200,reg0=0 actions=output:2", + "table=60,priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80", + "table=60,priority=0 actions=drop", + "table=70,priority=0 actions=drop", + "table=80,priority=300,ip,nw_src=10.129.0.1 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg0=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=200,reg1=0 actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0x55fac,reg1=0x55fac actions=output:NXM_NX_REG2[]", + "table=80,priority=100,reg0=0xcb81e9,reg1=0xcb81e9 actions=output:NXM_NX_REG2[]", + "table=80,priority=0 actions=drop", + }, + unused: []int{0x55fac, 0xcb81e9}, + }, + } + + for i, tc := range testcases { + _, oc, _ := setup(t) + + otx := oc.NewTransaction() + for _, flow := range tc.flows { + otx.AddFlow(flow) + } + if err := otx.EndTransaction(); err != nil { + t.Fatalf("(%d) unexpected error from AddFlow: %v", i, err) + } + + unused := oc.FindUnusedVNIDs() + sort.Ints(unused) + if !reflect.DeepEqual(unused, tc.unused) { + t.Fatalf("(%d) wrong result, expected %v, got %v", i, tc.unused, unused) + } + } +} diff --git a/pkg/sdn/plugin/pod_linux.go b/pkg/sdn/plugin/pod_linux.go index 15beea38b41a..f9dd838d4d48 100644 --- a/pkg/sdn/plugin/pod_linux.go +++ b/pkg/sdn/plugin/pod_linux.go @@ -405,7 +405,7 @@ func (m *podManager) setup(req *cniserver.PodRequest) (*cnitypes.Result, *runnin return nil, nil, err } - m.policy.RefVNID(vnid) + m.policy.EnsureVNIDRules(vnid) success = true return ipamResult, &runningPod{podPortMapping: podPortMapping, vnid: vnid, ofport: ofport}, nil } @@ -445,9 +445,6 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error { if err := m.ovs.TearDownPod(hostVethName, podIP); err != nil { errList = append(errList, err) } - if vnid, err := m.policy.GetVNID(req.PodNamespace); err == nil { - m.policy.UnrefVNID(vnid) - } } if err := m.ipamDel(req.ContainerId); err != nil { diff --git a/pkg/sdn/plugin/singletenant.go b/pkg/sdn/plugin/singletenant.go index 7393e390ca02..2a5354d60ddf 100644 --- a/pkg/sdn/plugin/singletenant.go +++ b/pkg/sdn/plugin/singletenant.go @@ -41,8 +41,8 @@ func (sp *singleTenantPlugin) GetMulticastEnabled(vnid uint32) bool { return false } -func (sp *singleTenantPlugin) RefVNID(vnid uint32) { +func (sp *singleTenantPlugin) EnsureVNIDRules(vnid uint32) { } -func (sp *singleTenantPlugin) UnrefVNID(vnid uint32) { +func (sp *singleTenantPlugin) RemoveVNIDRules(vnid uint32) { }