From b78a6dfaa09a711ac3cbd5ceb012196825777067 Mon Sep 17 00:00:00 2001 From: "xulei.3" Date: Thu, 15 Aug 2024 13:09:51 +0800 Subject: [PATCH] refactor(qrm): refactor the state module for enhanced extensibility --- .../cpueviction/cpu_eviciton_test.go | 2 +- .../strategy/pressure_load_test.go | 2 +- .../qrm-plugins/cpu/dynamicpolicy/policy.go | 2 +- .../cpu/dynamicpolicy/policy_test.go | 4 +- .../cpu/dynamicpolicy/state/state.go | 2 + .../dynamicpolicy/state/state_checkpoint.go | 22 +-- .../cpu/dynamicpolicy/state/state_test.go | 10 +- .../cpu/dynamicpolicy/state/util.go | 130 +++++++++--------- .../qrm-plugins/cpu/dynamicpolicy/util.go | 3 +- .../qrm-plugins/cpu/nativepolicy/policy.go | 2 +- .../cpu/nativepolicy/policy_test.go | 3 +- .../qrm-plugins/cpu/nativepolicy/util/util.go | 3 +- 12 files changed, 97 insertions(+), 88 deletions(-) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/cpu_eviciton_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/cpu_eviciton_test.go index ae8559b321..2d3a11c7f4 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/cpu_eviciton_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/cpu_eviciton_test.go @@ -53,7 +53,7 @@ func makeState(topo *machine.CPUTopology) (qrmstate.State, error) { if err != nil { return nil, fmt.Errorf("make tmp dir for checkpoint failed with error: %v", err) } - return qrmstate.NewCheckpointState(tmpDir, "test", "test", topo, false) + return qrmstate.NewCheckpointState(tmpDir, "test", "test", topo, false, qrmstate.GenerateMachineStateFromPodEntries) } func TestNewCPUPressureEviction(t *testing.T) { diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go index 6707d92c57..6b46500328 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go @@ -104,7 +104,7 @@ func makeState(topo *machine.CPUTopology) (qrmstate.State, error) { if err != nil { return nil, fmt.Errorf("make tmp dir for checkpoint failed with error: %v", err) } - return qrmstate.NewCheckpointState(tmpDir, "test", "test", topo, false) + return qrmstate.NewCheckpointState(tmpDir, "test", "test", topo, false, qrmstate.GenerateMachineStateFromPodEntries) } func TestNewCPUPressureLoadEviction(t *testing.T) { diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go index 05f62be1be..5a7203e6a7 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go @@ -144,7 +144,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration } stateImpl, stateErr := state.NewCheckpointState(conf.GenericQRMPluginConfiguration.StateFileDirectory, cpuPluginStateFileName, - cpuconsts.CPUResourcePluginPolicyNameDynamic, agentCtx.CPUTopology, conf.SkipCPUStateCorruption) + cpuconsts.CPUResourcePluginPolicyNameDynamic, agentCtx.CPUTopology, conf.SkipCPUStateCorruption, state.GenerateMachineStateFromPodEntries) if stateErr != nil { return false, agent.ComponentStub{}, fmt.Errorf("NewCheckpointState failed with error: %v", stateErr) } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go index 1969df599f..12fce4cd25 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go @@ -27,7 +27,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -53,6 +52,7 @@ import ( cgroupcm "github.com/kubewharf/katalyst-core/pkg/util/cgroup/common" cgroupcmutils "github.com/kubewharf/katalyst-core/pkg/util/cgroup/manager" "github.com/kubewharf/katalyst-core/pkg/util/machine" + "github.com/stretchr/testify/require" ) const ( @@ -82,7 +82,7 @@ func getTestDynamicPolicyWithInitialization(topology *machine.CPUTopology, state } func getTestDynamicPolicyWithoutInitialization(topology *machine.CPUTopology, stateFileDirectory string) (*DynamicPolicy, error) { - stateImpl, err := state.NewCheckpointState(stateFileDirectory, cpuPluginStateFileName, cpuconsts.CPUResourcePluginPolicyNameDynamic, topology, false) + stateImpl, err := state.NewCheckpointState(stateFileDirectory, cpuPluginStateFileName, cpuconsts.CPUResourcePluginPolicyNameDynamic, topology, false, state.GenerateMachineStateFromPodEntries) if err != nil { return nil, err } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state.go index c8576e3c1b..6493dde31b 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state.go @@ -749,3 +749,5 @@ type State interface { type ReadonlyState interface { reader } + +type GenerateMachineStateFromPodEntriesFunc func(topology *machine.CPUTopology, podEntries PodEntries) (NUMANodeMap, error) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_checkpoint.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_checkpoint.go index d189864780..f8a9ba75d7 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_checkpoint.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_checkpoint.go @@ -40,13 +40,14 @@ type stateCheckpoint struct { checkpointName string // when we add new properties to checkpoint, // it will cause checkpoint corruption, and we should skip it - skipStateCorruption bool + skipStateCorruption bool + GenerateMachineStateFromPodEntries GenerateMachineStateFromPodEntriesFunc } var _ State = &stateCheckpoint{} func NewCheckpointState(stateDir, checkpointName, policyName string, - topology *machine.CPUTopology, skipStateCorruption bool, + topology *machine.CPUTopology, skipStateCorruption bool, generateMachineStateFunc GenerateMachineStateFromPodEntriesFunc, ) (State, error) { checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir) if err != nil { @@ -54,21 +55,22 @@ func NewCheckpointState(stateDir, checkpointName, policyName string, } sc := &stateCheckpoint{ - cache: NewCPUPluginState(topology), - policyName: policyName, - checkpointManager: checkpointManager, - checkpointName: checkpointName, - skipStateCorruption: skipStateCorruption, + cache: NewCPUPluginState(topology), + policyName: policyName, + checkpointManager: checkpointManager, + checkpointName: checkpointName, + skipStateCorruption: skipStateCorruption, + GenerateMachineStateFromPodEntries: generateMachineStateFunc, } - if err := sc.restoreState(topology); err != nil { + if err := sc.RestoreState(topology); err != nil { return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete "+ "the cpu plugin checkpoint file %q before restarting Kubelet", err, path.Join(stateDir, checkpointName)) } return sc, nil } -func (sc *stateCheckpoint) restoreState(topology *machine.CPUTopology) error { +func (sc *stateCheckpoint) RestoreState(topology *machine.CPUTopology) error { sc.Lock() defer sc.Unlock() var err error @@ -94,7 +96,7 @@ func (sc *stateCheckpoint) restoreState(topology *machine.CPUTopology) error { return fmt.Errorf("[cpu_plugin] configured policy %q differs from state checkpoint policy %q", sc.policyName, checkpoint.PolicyName) } - generatedMachineState, err := GenerateMachineStateFromPodEntries(topology, checkpoint.PodEntries, sc.policyName) + generatedMachineState, err := sc.GenerateMachineStateFromPodEntries(topology, checkpoint.PodEntries) if err != nil { return fmt.Errorf("GenerateMachineStateFromPodEntries failed with error: %v", err) } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go index 763b4a557d..95acb90ce9 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go @@ -1449,7 +1449,7 @@ func TestNewCheckpointState(t *testing.T) { require.NoError(t, cpm.CreateCheckpoint(cpuPluginStateFileName, checkpoint), "could not create testing checkpoint") } - restoredState, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, cpuTopology, false) + restoredState, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, cpuTopology, false, GenerateMachineStateFromPodEntries) if strings.TrimSpace(tc.expectedError) != "" { require.Error(t, err) require.Contains(t, err.Error(), "could not restore state from checkpoint:") @@ -1457,7 +1457,7 @@ func TestNewCheckpointState(t *testing.T) { // test skip corruption if strings.Contains(err.Error(), "checkpoint is corrupted") { - _, err = NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, cpuTopology, true) + _, err = NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, cpuTopology, true, GenerateMachineStateFromPodEntries) require.Nil(t, err) } } else { @@ -1945,7 +1945,7 @@ func TestClearState(t *testing.T) { } defer os.RemoveAll(testingDir) - state1, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false) + state1, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false, GenerateMachineStateFromPodEntries) as.Nil(err) state1.ClearState() @@ -1953,7 +1953,7 @@ func TestClearState(t *testing.T) { state1.SetMachineState(tc.machineState) state1.SetPodEntries(tc.podEntries) - state2, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false) + state2, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false, GenerateMachineStateFromPodEntries) as.Nil(err) assertStateEqual(t, state2, state1) }) @@ -2438,7 +2438,7 @@ func TestCheckpointStateHelpers(t *testing.T) { t.Run(tc.description, func(t *testing.T) { t.Parallel() - state, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false) + state, err := NewCheckpointState(testingDir, cpuPluginStateFileName, policyName, tc.cpuTopology, false, GenerateMachineStateFromPodEntries) as.Nil(err) state.ClearState() diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go index cd005dfd87..a6d4835bf2 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go @@ -25,7 +25,6 @@ import ( "k8s.io/klog/v2" apiconsts "github.com/kubewharf/katalyst-api/pkg/consts" - "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/consts" cpuconsts "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/consts" "github.com/kubewharf/katalyst-core/pkg/util/general" "github.com/kubewharf/katalyst-core/pkg/util/machine" @@ -187,68 +186,9 @@ func GetNonBindingSharedRequestedQuantityFromPodEntries(podEntries PodEntries, n return CPUPreciseCeil(reqFloat64) } -// GenerateMachineStateFromPodEntries returns NUMANodeMap for given resource based on -// machine info and reserved resources along with existed pod entries -func GenerateMachineStateFromPodEntries(topology *machine.CPUTopology, podEntries PodEntries, policyName string) (NUMANodeMap, error) { - if topology == nil { - return nil, fmt.Errorf("GenerateMachineStateFromPodEntries got nil topology") - } - - machineState := make(NUMANodeMap) - for _, numaNode := range topology.CPUDetails.NUMANodes().ToSliceInt64() { - numaNodeState := &NUMANodeState{} - numaNodeAllCPUs := topology.CPUDetails.CPUsInNUMANodes(int(numaNode)).Clone() - allocatedCPUsInNumaNode := machine.NewCPUSet() - - for podUID, containerEntries := range podEntries { - if containerEntries.IsPoolEntry() { - continue - } - for containerName, allocationInfo := range containerEntries { - if allocationInfo == nil { - general.Warningf("nil allocationInfo in podEntries") - continue - } - - // the container hasn't cpuset assignment in the current NUMA node - if allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)].Size() == 0 && - allocationInfo.TopologyAwareAssignments[int(numaNode)].Size() == 0 { - continue - } - - switch policyName { - case consts.CPUResourcePluginPolicyNameDynamic: - // only modify allocated and default properties in NUMA node state if the policy is dynamic and the entry indicates numa_binding. - // shared_cores with numa_binding also contributes to numaNodeState.AllocatedCPUSet, - // it's convenient that we can skip NUMA with AllocatedCPUSet > 0 when allocating CPUs for dedicated_cores with numa_binding. - if CheckNUMABinding(allocationInfo) { - allocatedCPUsInNumaNode = allocatedCPUsInNumaNode.Union(allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)]) - } - case consts.CPUResourcePluginPolicyNameNative: - // only modify allocated and default properties in NUMA node state if the policy is native and the QoS class is Guaranteed - if CheckDedicatedPool(allocationInfo) { - allocatedCPUsInNumaNode = allocatedCPUsInNumaNode.Union(allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)]) - } - } - - topologyAwareAssignments, _ := machine.GetNumaAwareAssignments(topology, allocationInfo.AllocationResult.Intersection(numaNodeAllCPUs)) - originalTopologyAwareAssignments, _ := machine.GetNumaAwareAssignments(topology, allocationInfo.OriginalAllocationResult.Intersection(numaNodeAllCPUs)) - - numaNodeAllocationInfo := allocationInfo.Clone() - numaNodeAllocationInfo.AllocationResult = allocationInfo.AllocationResult.Intersection(numaNodeAllCPUs) - numaNodeAllocationInfo.OriginalAllocationResult = allocationInfo.OriginalAllocationResult.Intersection(numaNodeAllCPUs) - numaNodeAllocationInfo.TopologyAwareAssignments = topologyAwareAssignments - numaNodeAllocationInfo.OriginalTopologyAwareAssignments = originalTopologyAwareAssignments - - numaNodeState.SetAllocationInfo(podUID, containerName, numaNodeAllocationInfo) - } - } - - numaNodeState.AllocatedCPUSet = allocatedCPUsInNumaNode.Clone() - numaNodeState.DefaultCPUSet = numaNodeAllCPUs.Difference(numaNodeState.AllocatedCPUSet) - machineState[int(numaNode)] = numaNodeState - } - return machineState, nil +// GenerateMachineStateFromPodEntries for dynamic policy +func GenerateMachineStateFromPodEntries(topology *machine.CPUTopology, podEntries PodEntries) (NUMANodeMap, error) { + return GenerateMachineStateFromPodEntriesByPolicy(topology, podEntries, cpuconsts.CPUResourcePluginPolicyNameDynamic) } func IsIsolationPool(poolName string) bool { @@ -490,3 +430,67 @@ func checkCPUSetMap(map1, map2 map[int]machine.CPUSet) bool { func CPUPreciseCeil(request float64) int { return int(math.Ceil(float64(int(request*1000)) / 1000)) } + +// GenerateMachineStateFromPodEntriesByPolicy returns NUMANodeMap for given resource based on +// machine info and reserved resources along with existed pod entries and policy name +func GenerateMachineStateFromPodEntriesByPolicy(topology *machine.CPUTopology, podEntries PodEntries, policyName string) (NUMANodeMap, error) { + if topology == nil { + return nil, fmt.Errorf("GenerateMachineStateFromPodEntriesByPolicy got nil topology") + } + + machineState := make(NUMANodeMap) + for _, numaNode := range topology.CPUDetails.NUMANodes().ToSliceInt64() { + numaNodeState := &NUMANodeState{} + numaNodeAllCPUs := topology.CPUDetails.CPUsInNUMANodes(int(numaNode)).Clone() + allocatedCPUsInNumaNode := machine.NewCPUSet() + + for podUID, containerEntries := range podEntries { + if containerEntries.IsPoolEntry() { + continue + } + for containerName, allocationInfo := range containerEntries { + if allocationInfo == nil { + general.Warningf("nil allocationInfo in podEntries") + continue + } + + // the container hasn't cpuset assignment in the current NUMA node + if allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)].Size() == 0 && + allocationInfo.TopologyAwareAssignments[int(numaNode)].Size() == 0 { + continue + } + + switch policyName { + case cpuconsts.CPUResourcePluginPolicyNameDynamic: + // only modify allocated and default properties in NUMA node state if the policy is dynamic and the entry indicates numa_binding. + // shared_cores with numa_binding also contributes to numaNodeState.AllocatedCPUSet, + // it's convenient that we can skip NUMA with AllocatedCPUSet > 0 when allocating CPUs for dedicated_cores with numa_binding. + if CheckNUMABinding(allocationInfo) { + allocatedCPUsInNumaNode = allocatedCPUsInNumaNode.Union(allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)]) + } + case cpuconsts.CPUResourcePluginPolicyNameNative: + // only modify allocated and default properties in NUMA node state if the policy is native and the QoS class is Guaranteed + if CheckDedicatedPool(allocationInfo) { + allocatedCPUsInNumaNode = allocatedCPUsInNumaNode.Union(allocationInfo.OriginalTopologyAwareAssignments[int(numaNode)]) + } + } + + topologyAwareAssignments, _ := machine.GetNumaAwareAssignments(topology, allocationInfo.AllocationResult.Intersection(numaNodeAllCPUs)) + originalTopologyAwareAssignments, _ := machine.GetNumaAwareAssignments(topology, allocationInfo.OriginalAllocationResult.Intersection(numaNodeAllCPUs)) + + numaNodeAllocationInfo := allocationInfo.Clone() + numaNodeAllocationInfo.AllocationResult = allocationInfo.AllocationResult.Intersection(numaNodeAllCPUs) + numaNodeAllocationInfo.OriginalAllocationResult = allocationInfo.OriginalAllocationResult.Intersection(numaNodeAllCPUs) + numaNodeAllocationInfo.TopologyAwareAssignments = topologyAwareAssignments + numaNodeAllocationInfo.OriginalTopologyAwareAssignments = originalTopologyAwareAssignments + + numaNodeState.SetAllocationInfo(podUID, containerName, numaNodeAllocationInfo) + } + } + + numaNodeState.AllocatedCPUSet = allocatedCPUsInNumaNode.Clone() + numaNodeState.DefaultCPUSet = numaNodeAllCPUs.Difference(numaNodeState.AllocatedCPUSet) + machineState[int(numaNode)] = numaNodeState + } + return machineState, nil +} diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/util.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/util.go index c339c4a372..b2cc5354d4 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/util.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/util.go @@ -23,7 +23,6 @@ import ( pluginapi "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" apiconsts "github.com/kubewharf/katalyst-api/pkg/consts" - cpuconsts "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/consts" "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state" "github.com/kubewharf/katalyst-core/pkg/util/general" "github.com/kubewharf/katalyst-core/pkg/util/machine" @@ -38,7 +37,7 @@ func getProportionalSize(oldPoolSize, oldTotalSize, newTotalSize int, ceil bool) } func generateMachineStateFromPodEntries(topology *machine.CPUTopology, podEntries state.PodEntries) (state.NUMANodeMap, error) { - return state.GenerateMachineStateFromPodEntries(topology, podEntries, cpuconsts.CPUResourcePluginPolicyNameDynamic) + return state.GenerateMachineStateFromPodEntries(topology, podEntries) } // updateAllocationInfoByReq updates allocationInfo by latest req when admitting active pod, diff --git a/pkg/agent/qrm-plugins/cpu/nativepolicy/policy.go b/pkg/agent/qrm-plugins/cpu/nativepolicy/policy.go index 80c930ce0e..31b0fa80a4 100644 --- a/pkg/agent/qrm-plugins/cpu/nativepolicy/policy.go +++ b/pkg/agent/qrm-plugins/cpu/nativepolicy/policy.go @@ -109,7 +109,7 @@ func NewNativePolicy(agentCtx *agent.GenericContext, conf *config.Configuration, general.Infof("new native policy") stateImpl, stateErr := state.NewCheckpointState(conf.GenericQRMPluginConfiguration.StateFileDirectory, cpuPluginStateFileName, - cpuconsts.CPUResourcePluginPolicyNameNative, agentCtx.CPUTopology, conf.SkipCPUStateCorruption) + cpuconsts.CPUResourcePluginPolicyNameNative, agentCtx.CPUTopology, conf.SkipCPUStateCorruption, nativepolicyutil.GenerateMachineStateFromPodEntries) if stateErr != nil { return false, agent.ComponentStub{}, fmt.Errorf("NewCheckpointState failed with error: %v", stateErr) } diff --git a/pkg/agent/qrm-plugins/cpu/nativepolicy/policy_test.go b/pkg/agent/qrm-plugins/cpu/nativepolicy/policy_test.go index 3b32b615ef..5271b1f00d 100644 --- a/pkg/agent/qrm-plugins/cpu/nativepolicy/policy_test.go +++ b/pkg/agent/qrm-plugins/cpu/nativepolicy/policy_test.go @@ -30,6 +30,7 @@ import ( cpuconsts "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/consts" "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state" + nativepolicyutil "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/cpu/nativepolicy/util" "github.com/kubewharf/katalyst-core/pkg/agent/qrm-plugins/util" "github.com/kubewharf/katalyst-core/pkg/config/agent/dynamic" "github.com/kubewharf/katalyst-core/pkg/metrics" @@ -42,7 +43,7 @@ const ( func getTestNativePolicy(topology *machine.CPUTopology, stateFileDirectory string) (*NativePolicy, error) { stateImpl, err := state.NewCheckpointState(stateFileDirectory, cpuPluginStateFileName, - cpuconsts.CPUResourcePluginPolicyNameNative, topology, false) + cpuconsts.CPUResourcePluginPolicyNameNative, topology, false, nativepolicyutil.GenerateMachineStateFromPodEntries) if err != nil { return nil, err } diff --git a/pkg/agent/qrm-plugins/cpu/nativepolicy/util/util.go b/pkg/agent/qrm-plugins/cpu/nativepolicy/util/util.go index 355781445b..33cf9d8a4a 100644 --- a/pkg/agent/qrm-plugins/cpu/nativepolicy/util/util.go +++ b/pkg/agent/qrm-plugins/cpu/nativepolicy/util/util.go @@ -22,6 +22,7 @@ import ( "github.com/kubewharf/katalyst-core/pkg/util/machine" ) +// GenerateMachineStateFromPodEntries for native policy func GenerateMachineStateFromPodEntries(topology *machine.CPUTopology, podEntries state.PodEntries) (state.NUMANodeMap, error) { - return state.GenerateMachineStateFromPodEntries(topology, podEntries, cpuconsts.CPUResourcePluginPolicyNameNative) + return state.GenerateMachineStateFromPodEntriesByPolicy(topology, podEntries, cpuconsts.CPUResourcePluginPolicyNameNative) }