From b3a40812bad7b932cc7ffb2dfd725f46b33c4e01 Mon Sep 17 00:00:00 2001 From: linzhecheng Date: Wed, 26 Jul 2023 20:29:04 +0800 Subject: [PATCH] feat: headroom policy for dedicated numa Service performance score is considerd in new policy. Signed-off-by: linzhecheng --- .../plugin/qosaware/resource/cpu/advisor.go | 1 + .../qosaware/resource/cpu/advisor_test.go | 73 ++++++++++- .../region/headroompolicy/policy_canonical.go | 21 +-- .../headroompolicy/policy_numa_exclusive.go | 124 ++++++++++++++++++ .../plugin/qosaware/resource/helper/helper.go | 12 ++ .../memory/headroompolicy/policy_canonical.go | 2 +- .../headroompolicy/policy_canonical_test.go | 3 +- pkg/agent/sysadvisor/types/cpu.go | 5 +- pkg/agent/sysadvisor/types/helper.go | 10 +- pkg/metaserver/spd/manager.go | 35 +++-- pkg/util/general/common.go | 24 ++++ pkg/util/general/common_test.go | 8 ++ 12 files changed, 279 insertions(+), 39 deletions(-) create mode 100644 pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_numa_exclusive.go diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor.go index de6ab0ffe..30f270dc2 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor.go @@ -53,6 +53,7 @@ func init() { provisionpolicy.RegisterInitializer(types.CPUProvisionPolicyRama, provisionpolicy.NewPolicyRama) headroompolicy.RegisterInitializer(types.CPUHeadroomPolicyCanonical, headroompolicy.NewPolicyCanonical) + headroompolicy.RegisterInitializer(types.CPUHeadroomPolicyNUMAExclusive, headroompolicy.NewPolicyNUMAExclusive) provisionassembler.RegisterInitializer(types.CPUProvisionAssemblerCommon, provisionassembler.NewProvisionAssemblerCommon) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go index eba2d687c..691bd0b51 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" "github.com/kubewharf/katalyst-api/pkg/consts" @@ -63,7 +64,7 @@ func generateTestConfiguration(t *testing.T, checkpointDir, stateFileDir string) return conf } -func newTestCPUResourceAdvisor(t *testing.T, pods []*v1.Pod, conf *config.Configuration, mf *metric.FakeMetricsFetcher, performanceLevel spd.PerformanceLevel) (*cpuResourceAdvisor, metacache.MetaCache) { +func newTestCPUResourceAdvisor(t *testing.T, pods []*v1.Pod, conf *config.Configuration, mf *metric.FakeMetricsFetcher, profiles map[k8stypes.UID]spd.DummyPodServiceProfile) (*cpuResourceAdvisor, metacache.MetaCache) { metaCache, err := metacache.NewMetaCacheImp(conf, metric.NewFakeMetricsFetcher(metrics.DummyMetrics{})) require.NoError(t, err) @@ -88,7 +89,7 @@ func newTestCPUResourceAdvisor(t *testing.T, pods []*v1.Pod, conf *config.Config MetricsFetcher: mf, } - err = metaServer.SetServiceProfilingManager(spd.NewDummyServiceProfilingManager(performanceLevel)) + err = metaServer.SetServiceProfilingManager(spd.NewDummyServiceProfilingManager(profiles)) require.NoError(t, err) cra := NewCPUResourceAdvisor(conf, struct{}{}, metaCache, metaServer, nil) @@ -144,7 +145,8 @@ func TestAdvisorUpdate(t *testing.T) { pods []*v1.Pod nodeEnableReclaim bool headroomAssembler types.CPUHeadroomAssemblerName - pLevel spd.PerformanceLevel + headroomPolicies map[types.QoSRegionType][]types.CPUHeadroomPolicyName + podProfiles map[k8stypes.UID]spd.DummyPodServiceProfile wantInternalCalculationResult types.InternalCPUCalculationResult wantHeadroom resource.Quantity metrics []metricItem @@ -476,8 +478,12 @@ func TestAdvisorUpdate(t *testing.T) { }, }, nodeEnableReclaim: true, - pLevel: spd.PerformanceLevelPoor, + podProfiles: map[k8stypes.UID]spd.DummyPodServiceProfile{"uid1": {PerformanceLevel: spd.PerformanceLevelPoor, Score: 0}}, headroomAssembler: types.CPUHeadroomAssemblerDedicated, + headroomPolicies: map[types.QoSRegionType][]types.CPUHeadroomPolicyName{ + types.QoSRegionTypeShare: {types.CPUHeadroomPolicyCanonical}, + types.QoSRegionTypeDedicatedNumaExclusive: {types.CPUHeadroomPolicyNUMAExclusive}, + }, wantInternalCalculationResult: types.InternalCPUCalculationResult{ PoolEntries: map[string]map[int]int{ state.PoolNameReserve: { @@ -491,6 +497,60 @@ func TestAdvisorUpdate(t *testing.T) { }, wantHeadroom: *resource.NewQuantity(45, resource.DecimalSI), }, + { + name: "single_dedicated_numa_exclusive pod with performance score", + pools: map[string]*types.PoolInfo{ + state.PoolNameReserve: { + PoolName: state.PoolNameReserve, + TopologyAwareAssignments: map[int]machine.CPUSet{ + 0: machine.MustParse("0"), + 1: machine.MustParse("24"), + }, + }, + state.PoolNameReclaim: { + PoolName: state.PoolNameReclaim, + TopologyAwareAssignments: map[int]machine.CPUSet{ + 0: machine.MustParse("70-71"), + 1: machine.MustParse("25-47,72-95"), + }, + }, + }, + containers: []*types.ContainerInfo{ + makeContainerInfo("uid1", "default", "pod1", "c1", consts.PodAnnotationQoSLevelDedicatedCores, state.PoolNameDedicated, + map[string]string{consts.PodAnnotationMemoryEnhancementNumaBinding: consts.PodAnnotationMemoryEnhancementNumaBindingEnable}, + map[int]machine.CPUSet{ + 0: machine.MustParse("1-23,48-71"), + }, 36), + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + UID: "uid1", + }, + }, + }, + nodeEnableReclaim: true, + podProfiles: map[k8stypes.UID]spd.DummyPodServiceProfile{"uid1": {PerformanceLevel: spd.PerformanceLevelPerfect, Score: 50}}, + headroomAssembler: types.CPUHeadroomAssemblerDedicated, + headroomPolicies: map[types.QoSRegionType][]types.CPUHeadroomPolicyName{ + types.QoSRegionTypeShare: {types.CPUHeadroomPolicyCanonical}, + types.QoSRegionTypeDedicatedNumaExclusive: {types.CPUHeadroomPolicyNUMAExclusive}, + }, + wantInternalCalculationResult: types.InternalCPUCalculationResult{ + PoolEntries: map[string]map[int]int{ + state.PoolNameReserve: { + -1: 2, + }, + state.PoolNameReclaim: { + 0: 4, + -1: 47, + }, + }, + }, + wantHeadroom: *resource.NewQuantity(49, resource.DecimalSI), + }, { name: "dedicated_numa_exclusive_&_share", pools: map[string]*types.PoolInfo{ @@ -837,8 +897,11 @@ func TestAdvisorUpdate(t *testing.T) { if tt.headroomAssembler != "" { conf.CPUAdvisorConfiguration.HeadroomAssembler = tt.headroomAssembler } + if len(tt.headroomPolicies) != 0 { + conf.CPUAdvisorConfiguration.HeadroomPolicies = tt.headroomPolicies + } - advisor, metaCache := newTestCPUResourceAdvisor(t, tt.pods, conf, mf, tt.pLevel) + advisor, metaCache := newTestCPUResourceAdvisor(t, tt.pods, conf, mf, tt.podProfiles) advisor.startTime = time.Now().Add(-types.StartUpPeriod) advisor.conf.GetDynamicConfiguration().EnableReclaim = tt.nodeEnableReclaim diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_canonical.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_canonical.go index 8fc1669c6..58dd40bd4 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_canonical.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_canonical.go @@ -21,7 +21,6 @@ import ( "math" "k8s.io/klog/v2" - "k8s.io/kubelet/pkg/apis/resourceplugin/v1alpha1" "github.com/kubewharf/katalyst-core/pkg/agent/sysadvisor/metacache" "github.com/kubewharf/katalyst-core/pkg/agent/sysadvisor/plugin/qosaware/resource/helper" @@ -29,7 +28,6 @@ import ( "github.com/kubewharf/katalyst-core/pkg/config" "github.com/kubewharf/katalyst-core/pkg/metaserver" "github.com/kubewharf/katalyst-core/pkg/metrics" - "github.com/kubewharf/katalyst-core/pkg/util/general" "github.com/kubewharf/katalyst-core/pkg/util/machine" ) @@ -63,22 +61,9 @@ func (p *PolicyCanonical) Update() error { klog.Errorf("[qosaware-cpu-headroom] illegal container info of %v/%v", podUID, containerName) continue } - var containerEstimation float64 = 0 - if ci.IsNumaBinding() && !enableReclaim { - if ci.ContainerType == v1alpha1.ContainerType_MAIN { - bindingNumas := machine.GetCPUAssignmentNUMAs(ci.TopologyAwareAssignments) - for range bindingNumas.ToSliceInt() { - containerEstimation += float64(p.metaServer.CPUsPerNuma()) - } - general.Infof("container %s/%s occupied cpu %v", ci.PodName, ci.ContainerName, containerEstimation) - } else { - containerEstimation = 0 - } - } else { - containerEstimation, err = helper.EstimateContainerCPUUsage(ci, p.metaReader, enableReclaim) - if err != nil { - return err - } + containerEstimation, err := helper.EstimateContainerCPUUsage(ci, p.metaReader, enableReclaim) + if err != nil { + return err } // FIXME: metric server doesn't support to report cpu usage in numa granularity, diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_numa_exclusive.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_numa_exclusive.go new file mode 100644 index 000000000..ee666635f --- /dev/null +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/headroompolicy/policy_numa_exclusive.go @@ -0,0 +1,124 @@ +/* +Copyright 2022 The Katalyst Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package headroompolicy + +import ( + "context" + "fmt" + "math" + + "k8s.io/klog/v2" + + "github.com/kubewharf/katalyst-core/pkg/agent/sysadvisor/metacache" + "github.com/kubewharf/katalyst-core/pkg/agent/sysadvisor/plugin/qosaware/resource/helper" + "github.com/kubewharf/katalyst-core/pkg/agent/sysadvisor/types" + "github.com/kubewharf/katalyst-core/pkg/config" + "github.com/kubewharf/katalyst-core/pkg/metaserver" + "github.com/kubewharf/katalyst-core/pkg/metaserver/spd" + "github.com/kubewharf/katalyst-core/pkg/metrics" + "github.com/kubewharf/katalyst-core/pkg/util/machine" +) + +type PolicyNUMAExclusive struct { + *PolicyBase + headroom float64 +} + +// NOTE: NewPolicyNUMAExclusive can only for dedicated_cores with numa exclusive region + +func NewPolicyNUMAExclusive(regionName string, regionType types.QoSRegionType, ownerPoolName string, + _ *config.Configuration, _ interface{}, metaReader metacache.MetaReader, + metaServer *metaserver.MetaServer, emitter metrics.MetricEmitter) HeadroomPolicy { + p := &PolicyNUMAExclusive{ + PolicyBase: NewPolicyBase(regionName, regionType, ownerPoolName, metaReader, metaServer, emitter), + } + return p +} + +func (p *PolicyNUMAExclusive) getContainerInfos() (string, []*types.ContainerInfo, error) { + if len(p.podSet) != 1 { + return "", nil, fmt.Errorf("more than one pod are assgined to this policy") + } + cis := make([]*types.ContainerInfo, 0) + for podUID, containers := range p.podSet { + for _, container := range containers.List() { + ci, ok := p.metaReader.GetContainerInfo(podUID, container) + if !ok { + return "", nil, fmt.Errorf("failed to find continaer(%s/%s)", podUID, container) + } + cis = append(cis, ci) + } + return podUID, cis, nil + } + return "", nil, fmt.Errorf("should never get here") +} + +func (p *PolicyNUMAExclusive) Update() error { + cpuEstimation := 0.0 + containerCnt := 0 + + podUID, containers, err := p.getContainerInfos() + if err != nil { + return err + } + enableReclaim, err := helper.PodEnableReclaim(context.Background(), p.metaServer, podUID, p.EnableReclaim) + if err != nil { + return err + } + if !enableReclaim { + p.headroom = 0 + return nil + } + + for _, ci := range containers { + containerEstimation, err := helper.EstimateContainerCPUUsage(ci, p.metaReader, enableReclaim) + if err != nil { + return err + } + + // FIXME: metric server doesn't support to report cpu usage in numa granularity, + // so we split cpu usage evenly across the binding numas of container. + if p.bindingNumas.Size() > 0 { + cpuSize := 0 + for _, numaID := range p.bindingNumas.ToSliceInt() { + cpuSize += ci.TopologyAwareAssignments[numaID].Size() + } + containerEstimation = containerEstimation * float64(cpuSize) / float64(machine.CountCPUAssignmentCPUs(ci.TopologyAwareAssignments)) + } + + cpuEstimation += containerEstimation + containerCnt += 1 + } + cpuEstimation += p.ReservedForAllocate + + originHeadroom := math.Max(p.ResourceUpperBound-cpuEstimation+p.ReservedForReclaim, 0) + score, err := helper.PodPerformanceScore(context.Background(), p.metaServer, podUID) + if err != nil { + return err + } + p.headroom = originHeadroom * (score - spd.MinPerformanceScore) / (spd.MaxPerformanceScore - spd.MinPerformanceScore) + + klog.Infof("region %v cpuEstimation %v with reservedForAllocate %v reservedForReclaim %v"+ + " originHeadroom %v headroom %v score %v #container %v", p.regionName, cpuEstimation, p.ReservedForAllocate, + p.ReservedForReclaim, originHeadroom, p.headroom, score, containerCnt) + + return nil +} + +func (p *PolicyNUMAExclusive) GetHeadroom() (float64, error) { + return p.headroom, nil +} diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/helper/helper.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/helper/helper.go index 20923d6ec..edb92fbec 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/helper/helper.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/helper/helper.go @@ -55,3 +55,15 @@ func PodEnableReclaim(ctx context.Context, metaServer *metaserver.MetaServer, // if performance level not poor, it can not be reclaimed return pLevel != spd.PerformanceLevelPoor, nil } + +func PodPerformanceScore(ctx context.Context, metaServer *metaserver.MetaServer, podUID string) (float64, error) { + if metaServer == nil { + return 0, fmt.Errorf("metaServer is nil") + } + pod, err := metaServer.GetPod(ctx, podUID) + if err != nil { + return 0, err + } + + return metaServer.ServiceBusinessPerformanceScore(ctx, pod) +} diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical.go index 07a5c9fe3..dae32563b 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical.go @@ -77,7 +77,7 @@ func (p *PolicyCanonical) estimateNonReclaimedQoSMemoryRequirement() (float64, e return true } - if ci.IsNumaBinding() && !enableReclaim { + if ci.IsNumaExclusive() && !enableReclaim { if ci.ContainerType == v1alpha1.ContainerType_MAIN { bindingNumas := machine.GetCPUAssignmentNUMAs(ci.TopologyAwareAssignments) for _, numaID := range bindingNumas.ToSliceInt() { diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go index 05fe781db..aa0f4e7f2 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go @@ -458,7 +458,8 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { makeContainerInfo("pod1", "default", "pod1", "container1", consts.PodAnnotationQoSLevelDedicatedCores, - map[string]string{consts.PodAnnotationMemoryEnhancementNumaBinding: consts.PodAnnotationMemoryEnhancementNumaBindingEnable}, + map[string]string{consts.PodAnnotationMemoryEnhancementNumaBinding: consts.PodAnnotationMemoryEnhancementNumaBindingEnable, + consts.PodAnnotationMemoryEnhancementNumaExclusive: consts.PodAnnotationMemoryEnhancementNumaExclusiveEnable}, types.TopologyAwareAssignment{0: machine.NewCPUSet(0, 1, 2, 3, 4)}, 1), }, memoryHeadroomConfiguration: &memoryheadroom.MemoryHeadroomConfiguration{ diff --git a/pkg/agent/sysadvisor/types/cpu.go b/pkg/agent/sysadvisor/types/cpu.go index 22478fbba..f419921f7 100644 --- a/pkg/agent/sysadvisor/types/cpu.go +++ b/pkg/agent/sysadvisor/types/cpu.go @@ -39,8 +39,9 @@ const ( type CPUHeadroomPolicyName string const ( - CPUHeadroomPolicyNone CPUHeadroomPolicyName = "none" - CPUHeadroomPolicyCanonical CPUHeadroomPolicyName = "canonical" + CPUHeadroomPolicyNone CPUHeadroomPolicyName = "none" + CPUHeadroomPolicyCanonical CPUHeadroomPolicyName = "canonical" + CPUHeadroomPolicyNUMAExclusive CPUHeadroomPolicyName = "numa_exclusive" ) // CPUProvisionAssemblerName defines assemblers for cpu advisor to generate node diff --git a/pkg/agent/sysadvisor/types/helper.go b/pkg/agent/sysadvisor/types/helper.go index df43afd25..c1a17fb09 100644 --- a/pkg/agent/sysadvisor/types/helper.go +++ b/pkg/agent/sysadvisor/types/helper.go @@ -24,13 +24,17 @@ import ( "github.com/kubewharf/katalyst-api/pkg/consts" "github.com/kubewharf/katalyst-core/pkg/util/general" "github.com/kubewharf/katalyst-core/pkg/util/machine" + qosutil "github.com/kubewharf/katalyst-core/pkg/util/qos" ) -// IsNumaBinding returns true iff current container is for dedicated_cores with numa binding -// todo: support numa exclusive +// IsNumaBinding returns true if current container is for dedicated_cores with numa binding func (ci *ContainerInfo) IsNumaBinding() bool { return ci.QoSLevel == consts.PodAnnotationQoSLevelDedicatedCores && - ci.Annotations[consts.PodAnnotationMemoryEnhancementNumaBinding] == consts.PodAnnotationMemoryEnhancementNumaBindingEnable + qosutil.AnnotationsIndicateNUMABinding(ci.Annotations) +} + +func (ci *ContainerInfo) IsNumaExclusive() bool { + return ci.QoSLevel == consts.PodAnnotationQoSLevelDedicatedCores && qosutil.AnnotationsIndicateNUMAExclusive(ci.Annotations) } func (ci *ContainerInfo) Clone() *ContainerInfo { diff --git a/pkg/metaserver/spd/manager.go b/pkg/metaserver/spd/manager.go index 90f6e3d93..d63a5f131 100644 --- a/pkg/metaserver/spd/manager.go +++ b/pkg/metaserver/spd/manager.go @@ -21,6 +21,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "github.com/kubewharf/katalyst-core/pkg/util" ) @@ -34,6 +35,9 @@ const ( PerformanceLevelPerfect PerformanceLevel = 0 PerformanceLevelGood PerformanceLevel = 1 PerformanceLevelPoor PerformanceLevel = 2 + + MaxPerformanceScore float64 = 100 + MinPerformanceScore float64 = 0 ) type IndicatorTarget map[string]util.IndicatorTarget @@ -43,7 +47,7 @@ type ServiceProfilingManager interface { ServiceBusinessPerformanceLevel(ctx context.Context, pod *v1.Pod) (PerformanceLevel, error) // ServiceBusinessPerformanceScore returns the service business performance score for the given pod - // The score is in range [0, 100] + // The score is in range [MinPerformanceScore, MaxPerformanceScore] ServiceBusinessPerformanceScore(ctx context.Context, pod *v1.Pod) (float64, error) // ServiceSystemPerformanceTarget returns the system performance target for the given pod @@ -53,20 +57,33 @@ type ServiceProfilingManager interface { Run(ctx context.Context) } +type DummyPodServiceProfile struct { + PerformanceLevel PerformanceLevel + Score float64 +} + type DummyServiceProfilingManager struct { - pLevel PerformanceLevel + podProfiles map[types.UID]DummyPodServiceProfile } -func NewDummyServiceProfilingManager(pLevel PerformanceLevel) *DummyServiceProfilingManager { - return &DummyServiceProfilingManager{pLevel: pLevel} +func NewDummyServiceProfilingManager(podProfiles map[types.UID]DummyPodServiceProfile) *DummyServiceProfilingManager { + return &DummyServiceProfilingManager{podProfiles: podProfiles} } -func (d *DummyServiceProfilingManager) ServiceBusinessPerformanceLevel(_ context.Context, _ *v1.Pod) (PerformanceLevel, error) { - return d.pLevel, nil +func (d *DummyServiceProfilingManager) ServiceBusinessPerformanceLevel(_ context.Context, pod *v1.Pod) (PerformanceLevel, error) { + profile, ok := d.podProfiles[pod.UID] + if !ok { + return PerformanceLevelPerfect, nil + } + return profile.PerformanceLevel, nil } -func (d *DummyServiceProfilingManager) ServiceBusinessPerformanceScore(_ context.Context, _ *v1.Pod) (float64, error) { - return 100, nil +func (d *DummyServiceProfilingManager) ServiceBusinessPerformanceScore(_ context.Context, pod *v1.Pod) (float64, error) { + profile, ok := d.podProfiles[pod.UID] + if !ok { + return 100, nil + } + return profile.Score, nil } func (d *DummyServiceProfilingManager) ServiceSystemPerformanceTarget(_ context.Context, _ *v1.Pod) (IndicatorTarget, error) { @@ -89,7 +106,7 @@ func NewServiceProfilingManager(fetcher SPDFetcher) ServiceProfilingManager { func (m *serviceProfilingManager) ServiceBusinessPerformanceScore(_ context.Context, _ *v1.Pod) (float64, error) { // todo: implement service business performance score using spd to calculate - return 1., nil + return MaxPerformanceScore, nil } // ServiceBusinessPerformanceLevel gets the service business performance level by spd, and use the poorest business indicator diff --git a/pkg/util/general/common.go b/pkg/util/general/common.go index ac4392adc..a5e5a5022 100644 --- a/pkg/util/general/common.go +++ b/pkg/util/general/common.go @@ -27,6 +27,7 @@ import ( "strings" "time" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" ) @@ -54,6 +55,14 @@ func MaxInt64(a, b int64) int64 { } } +func MaxFloat64(a, b float64) float64 { + if a >= b { + return a + } else { + return b + } +} + func MaxTimePtr(a, b *time.Time) *time.Time { if a == nil { return b @@ -314,3 +323,18 @@ func CovertUInt64ToInt(numUInt64 uint64) (int, error) { func Clamp(value, min, max float64) float64 { return math.Max(math.Min(value, max), min) } + +// FormatMemoryQuantity aligned to Gi Mi Ki +func FormatMemoryQuantity(q float64) string { + value := int64(q) + if (value >> 30) > 0 { + value = (value >> 30) << 30 + } else if (value >> 20) > 0 { + value = (value >> 20) << 20 + } else if (value >> 10) > 0 { + value = (value >> 10) << 10 + } + quantity := resource.NewQuantity(value, resource.BinarySI) + + return fmt.Sprintf("%v[%v]", q, quantity.String()) +} diff --git a/pkg/util/general/common_test.go b/pkg/util/general/common_test.go index 98808c85f..8cbdb1faf 100644 --- a/pkg/util/general/common_test.go +++ b/pkg/util/general/common_test.go @@ -141,3 +141,11 @@ func TestJsonPathEmpty(t *testing.T) { as.Equal(true, JsonPathEmpty([]byte("{}"))) as.Equal(true, JsonPathEmpty([]byte(""))) } + +func TestFormatMemoryQutantity(t *testing.T) { + t.Parallel() + as := require.New(t) + as.Equal("1024[1Ki]", FormatMemoryQuantity(1<<10)) + as.Equal("1.048576e+06[1Mi]", FormatMemoryQuantity(1<<20)) + as.Equal("1.073741824e+09[1Gi]", FormatMemoryQuantity(1<<30)) +}