diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ed6f09d65..0065b93a0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,8 +7,8 @@ on: workflow_dispatch: {} jobs: - lint: - name: Lint + fmt: + name: Format runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 @@ -24,10 +24,30 @@ jobs: run: | make fmt && git add pkg cmd && git diff --cached --exit-code || (echo 'Please run "make fmt" to verify gofmt' && exit 1); + + vet: + name: Vet + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version-file: go.mod + cache: false - name: Verify govet run: | make vet && git add pkg cmd && git diff --cached --exit-code || (echo 'Please run "make vet" to verify govet' && exit 1); + + lint: + name: Lint + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version-file: go.mod + cache: false - uses: golangci/golangci-lint-action@v3 with: args: --verbose --out-${NO_FUTURE}format colored-line-number --config .golangci.yml diff --git a/.golangci.yml b/.golangci.yml index b68cb05e9..652802d98 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ # options for analysis running run: - # default concurrency is a available CPU number - concurrency: 4 + # default concurrency is the available CPU number + concurrency: 16 # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 10m diff --git a/Makefile b/Makefile index 4fb6bb45a..22357ffa5 100644 --- a/Makefile +++ b/Makefile @@ -112,7 +112,7 @@ vet: ## Run go vet against code. .PHONY: test test: ## Run go test against code. - go test -v -coverprofile=coverage.txt -covermode=atomic -race -coverpkg=./... ./... + go test -v -coverprofile=coverage.txt -parallel=16 -p=16 -covermode=atomic -race -coverpkg=./... ./pkg/... .PHONY: license license: diff --git a/cmd/katalyst-agent/app/options/eviction/memory_pressure_eviction_test.go b/cmd/katalyst-agent/app/options/eviction/memory_pressure_eviction_test.go index b1129bad0..ad7a463a6 100644 --- a/cmd/katalyst-agent/app/options/eviction/memory_pressure_eviction_test.go +++ b/cmd/katalyst-agent/app/options/eviction/memory_pressure_eviction_test.go @@ -26,6 +26,8 @@ import ( ) func TestMemoryPressureEvictionPluginOptions_ApplyTo(t *testing.T) { + t.Parallel() + options := NewMemoryPressureEvictionOptions() configuration := eviction.NewMemoryPressureEvictionPluginConfiguration() diff --git a/pkg/agent/evictionmanager/manager_test.go b/pkg/agent/evictionmanager/manager_test.go index fa6334763..6e4af747c 100644 --- a/pkg/agent/evictionmanager/manager_test.go +++ b/pkg/agent/evictionmanager/manager_test.go @@ -228,6 +228,8 @@ func makeEvictionManager() *EvictionManger { } func TestEvictionManger_collectEvictionResult(t *testing.T) { + t.Parallel() + mgr := makeEvictionManager() tests := []struct { name string diff --git a/pkg/agent/evictionmanager/plugin/memory/helper_test.go b/pkg/agent/evictionmanager/plugin/memory/helper_test.go index f22405637..42859608f 100644 --- a/pkg/agent/evictionmanager/plugin/memory/helper_test.go +++ b/pkg/agent/evictionmanager/plugin/memory/helper_test.go @@ -56,6 +56,8 @@ func makeHelper() (*EvictionHelper, error) { } func TestEvictionHelper_getEvictionCmpFuncs(t *testing.T) { + t.Parallel() + helper, err := makeHelper() assert.NoError(t, err) assert.NotNil(t, helper) diff --git a/pkg/agent/evictionmanager/plugin/memory/numa_pressure_test.go b/pkg/agent/evictionmanager/plugin/memory/numa_pressure_test.go index 9fcda3eba..f86c2fa13 100644 --- a/pkg/agent/evictionmanager/plugin/memory/numa_pressure_test.go +++ b/pkg/agent/evictionmanager/plugin/memory/numa_pressure_test.go @@ -62,6 +62,8 @@ func makeNumaPressureEvictionPlugin(conf *config.Configuration) (*NumaMemoryPres } func TestNewNumaPressureEvictionPlugin(t *testing.T) { + t.Parallel() + plugin, err := makeNumaPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) @@ -74,6 +76,8 @@ func TestNewNumaPressureEvictionPlugin(t *testing.T) { } func TestNumaMemoryPressurePlugin_ThresholdMet(t *testing.T) { + t.Parallel() + plugin, err := makeNumaPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) @@ -213,6 +217,8 @@ func TestNumaMemoryPressurePlugin_ThresholdMet(t *testing.T) { } func TestNumaMemoryPressurePlugin_GetTopEvictionPods(t *testing.T) { + t.Parallel() + plugin, err := makeNumaPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) diff --git a/pkg/agent/evictionmanager/plugin/memory/rss_overuse_test.go b/pkg/agent/evictionmanager/plugin/memory/rss_overuse_test.go index 03982770e..78e2e5c42 100644 --- a/pkg/agent/evictionmanager/plugin/memory/rss_overuse_test.go +++ b/pkg/agent/evictionmanager/plugin/memory/rss_overuse_test.go @@ -54,6 +54,8 @@ func makeRssOverusePlugin(conf *config.Configuration) (*RssOveruseEvictionPlugin } func TestRssOveruseEvictionPlugin_GetEvictPods(t *testing.T) { + t.Parallel() + plugin, err := makeRssOverusePlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) diff --git a/pkg/agent/evictionmanager/plugin/memory/system_pressure_test.go b/pkg/agent/evictionmanager/plugin/memory/system_pressure_test.go index 0044aa37d..822fd27f8 100644 --- a/pkg/agent/evictionmanager/plugin/memory/system_pressure_test.go +++ b/pkg/agent/evictionmanager/plugin/memory/system_pressure_test.go @@ -97,6 +97,8 @@ func makeSystemPressureEvictionPlugin(conf *config.Configuration) (*SystemPressu } func TestNewSystemPressureEvictionPlugin(t *testing.T) { + t.Parallel() + plugin, err := makeSystemPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) @@ -111,6 +113,8 @@ func TestNewSystemPressureEvictionPlugin(t *testing.T) { } func TestSystemPressureEvictionPlugin_ThresholdMet(t *testing.T) { + t.Parallel() + plugin, err := makeSystemPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) @@ -290,6 +294,8 @@ func TestSystemPressureEvictionPlugin_ThresholdMet(t *testing.T) { } func TestSystemPressureEvictionPlugin_GetTopEvictionPods(t *testing.T) { + t.Parallel() + plugin, err := makeSystemPressureEvictionPlugin(makeConf()) assert.NoError(t, err) assert.NotNil(t, plugin) diff --git a/pkg/agent/evictionmanager/plugin/reclaimed_resources_test.go b/pkg/agent/evictionmanager/plugin/reclaimed_resources_test.go index 598ac32d8..c3aadcee5 100644 --- a/pkg/agent/evictionmanager/plugin/reclaimed_resources_test.go +++ b/pkg/agent/evictionmanager/plugin/reclaimed_resources_test.go @@ -63,6 +63,8 @@ func generateTestMetaServer(clientSet *client.GenericClientSet, conf *config.Con } func TestNewReclaimedResourcesEvictionPlugin(t *testing.T) { + t.Parallel() + testNodeName := "test-node" testConf := generateTestConfiguration(t, testNodeName) pods := []*corev1.Pod{ diff --git a/pkg/agent/evictionmanager/podkiller/killer_test.go b/pkg/agent/evictionmanager/podkiller/killer_test.go index ad21e9f33..cedc3af3c 100644 --- a/pkg/agent/evictionmanager/podkiller/killer_test.go +++ b/pkg/agent/evictionmanager/podkiller/killer_test.go @@ -32,6 +32,8 @@ import ( ) func TestEvictionQueue(t *testing.T) { + t.Parallel() + pods := []*v1.Pod{ { ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, diff --git a/pkg/agent/evictionmanager/rule/queue_test.go b/pkg/agent/evictionmanager/rule/queue_test.go index 29569720d..0c91b660d 100644 --- a/pkg/agent/evictionmanager/rule/queue_test.go +++ b/pkg/agent/evictionmanager/rule/queue_test.go @@ -43,6 +43,8 @@ func makeRuledEvictPod(name, scope string) *RuledEvictPod { } func TestEvictionQueue(t *testing.T) { + t.Parallel() + for _, tc := range []struct { comment string q EvictionQueue diff --git a/pkg/agent/evictionmanager/rule/rule_test.go b/pkg/agent/evictionmanager/rule/rule_test.go index 848218019..6544c88e9 100644 --- a/pkg/agent/evictionmanager/rule/rule_test.go +++ b/pkg/agent/evictionmanager/rule/rule_test.go @@ -39,6 +39,8 @@ func makeRuledEvictPodForSort(name, scope string, annotations map[string]string, } func TestEvictionStrategyImp(t *testing.T) { + t.Parallel() + testConf, _ := options.NewOptions().Config() s := NewEvictionStrategyImpl(testConf) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/cpu.pb_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/cpu.pb_test.go index d7e60faab..61264bfff 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/cpu.pb_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/cpu.pb_test.go @@ -26,6 +26,8 @@ import ( ) func TestCPUPB(t *testing.T) { + t.Parallel() + OverlapType_OverlapWithPod.EnumDescriptor() _ = OverlapType_OverlapWithPod.String() @@ -217,32 +219,44 @@ func TestCPUPB(t *testing.T) { } func TestAddContainer(t *testing.T) { + t.Parallel() + testServer := &UnimplementedCPUAdvisorServer{} testServer.AddContainer(context.Background(), &advisorsvc.AddContainerRequest{}) } func TestRemovePod(t *testing.T) { + t.Parallel() + testServer := &UnimplementedCPUAdvisorServer{} testServer.RemovePod(context.Background(), &advisorsvc.RemovePodRequest{}) } func TestListAndWatch(t *testing.T) { + t.Parallel() + testServer := &UnimplementedCPUAdvisorServer{} testServer.ListAndWatch(&advisorsvc.Empty{}, &cPUAdvisorListAndWatchServer{}) } func TestGetCheckpoint(t *testing.T) { + t.Parallel() + testServer := &UnimplementedCPUPluginServer{} testServer.GetCheckpoint(context.Background(), &GetCheckpointRequest{}) } func TestRegisterCPUAdvisorServer(t *testing.T) { + t.Parallel() + grpcServer := grpc.NewServer() testServer := &UnimplementedCPUAdvisorServer{} RegisterCPUAdvisorServer(grpcServer, testServer) } func TestRegisterCPUPluginServer(t *testing.T) { + t.Parallel() + grpcServer := grpc.NewServer() testServer := &UnimplementedCPUPluginServer{} RegisterCPUPluginServer(grpcServer, testServer) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/qos_aware_client_stub_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/qos_aware_client_stub_test.go index b2a9666ed..fcd6d8b51 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/qos_aware_client_stub_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpuadvisor/qos_aware_client_stub_test.go @@ -24,16 +24,22 @@ import ( ) func TestClientAddContainer(t *testing.T) { + t.Parallel() + client := NewCPUAdvisorClientStub() _, _ = client.AddContainer(context.Background(), &advisorsvc.AddContainerRequest{}) } func TestClientRemovePod(t *testing.T) { + t.Parallel() + client := NewCPUAdvisorClientStub() _, _ = client.RemovePod(context.Background(), &advisorsvc.RemovePodRequest{}) } func TestClientListAndWatch(t *testing.T) { + t.Parallel() + client := NewCPUAdvisorClientStub() _, _ = client.ListAndWatch(context.Background(), &advisorsvc.Empty{}) } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go index 2961a0bd5..8751fa327 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_load_test.go @@ -78,7 +78,7 @@ func makeConf(metricRingSize int, gracePeriod int64, loadUpperBoundRatio, } func makeState(topo *machine.CPUTopology) (qrmstate.State, error) { - tmpDir, err := os.MkdirTemp("", "checkpoint") + tmpDir, err := os.MkdirTemp("", "checkpoint-makeState") if err != nil { return nil, fmt.Errorf("make tmp dir for checkpoint failed with error: %v", err) } @@ -86,6 +86,8 @@ func makeState(topo *machine.CPUTopology) (qrmstate.State, error) { } func TestNewCPUPressureLoadEviction(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -102,6 +104,8 @@ func TestNewCPUPressureLoadEviction(t *testing.T) { } func TestThresholdMet(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -385,6 +389,8 @@ func TestThresholdMet(t *testing.T) { } func TestGetTopEvictionPods(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_suppression_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_suppression_test.go index 385418f3a..0061da9d4 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_suppression_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/cpueviction/strategy/pressure_suppression_test.go @@ -43,7 +43,7 @@ import ( const ( defaultCPUMaxSuppressionToleranceRate = 5.0 - defaultCPUMinSuppressionToleranceDuration = 1 * time.Second + defaultCPUMinSuppressionToleranceDuration = 10 * time.Millisecond ) func makeSuppressionEvictionConf(cpuMaxSuppressionToleranceRate float64, @@ -56,6 +56,8 @@ func makeSuppressionEvictionConf(cpuMaxSuppressionToleranceRate float64, } func TestNewCPUPressureSuppressionEviction(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -71,6 +73,8 @@ func TestNewCPUPressureSuppressionEviction(t *testing.T) { } func TestCPUPressureSuppression_GetEvictPods(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -304,7 +308,7 @@ func TestCPUPressureSuppression_GetEvictPods(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resp) - time.Sleep(1 * time.Second) + time.Sleep(defaultCPUMinSuppressionToleranceDuration) resp, err = plugin.GetEvictPods(context.TODO(), &evictionpluginapi.GetEvictPodsRequest{ ActivePods: pods, diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go index 6edddd8c1..c10513cf2 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy.go @@ -65,10 +65,6 @@ const ( syncCPUIdlePeriod = 30 * time.Second ) -var ( - transitionPeriod = 30 * time.Second -) - var ( readonlyStateLock sync.RWMutex readonlyState state.ReadonlyState @@ -125,6 +121,7 @@ type DynamicPolicy struct { qosConfig *generic.QoSConfiguration dynamicConfig *dynamicconfig.DynamicAgentConfiguration podDebugAnnoKeys []string + transitionPeriod time.Duration } func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration, @@ -196,6 +193,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration enableCPUIdle: conf.CPUQRMPluginConfig.EnableCPUIdle, reclaimRelativeRootCgroupPath: conf.ReclaimRelativeRootCgroupPath, podDebugAnnoKeys: conf.PodDebugAnnoKeys, + transitionPeriod: time.Second, } // register allocation behaviors for pods with different QoS level @@ -212,7 +210,7 @@ func NewDynamicPolicy(agentCtx *agent.GenericContext, conf *config.Configuration consts.PodAnnotationQoSLevelReclaimedCores: policyImplement.reclaimedCoresHintHandler, } - state.GetContainerRequestedCores = policyImplement.getContainerRequestedCores + state.SetContainerRequestedCores(policyImplement.getContainerRequestedCores) if err := policyImplement.cleanPools(); err != nil { return false, agent.ComponentStub{}, fmt.Errorf("cleanPools failed with error: %v", err) @@ -324,7 +322,7 @@ func (p *DynamicPolicy) Start() (err error) { general.Infof("sync existing containers to cpu advisor successfully") // call lw of CPUAdvisorServer and do allocation - if err := p.lwCPUAdvisorServer(p.stopCh); err != nil { + if err = p.lwCPUAdvisorServer(p.stopCh); err != nil { general.Errorf("lwCPUAdvisorServer failed with error: %v", err) } else { general.Infof("lwCPUAdvisorServer finished") @@ -420,7 +418,7 @@ func (p *DynamicPolicy) GetResourcesAllocation(_ context.Context, allocationInfo.InitTimestamp = time.Now().Format(util.QRMTimeFormat) p.state.SetAllocationInfo(podUID, containerName, allocationInfo) - } else if allocationInfo.RampUp && time.Now().After(initTs.Add(transitionPeriod)) { + } else if allocationInfo.RampUp && time.Now().After(initTs.Add(p.transitionPeriod)) { general.Infof("pod: %s/%s, container: %s ramp up finished", allocationInfo.PodNamespace, allocationInfo.PodName, allocationInfo.ContainerName) allocationInfo.RampUp = false p.state.SetAllocationInfo(podUID, containerName, allocationInfo) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_advisor_handler.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_advisor_handler.go index 4c813975a..eb70b9300 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_advisor_handler.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_advisor_handler.go @@ -521,7 +521,7 @@ func (p *DynamicPolicy) applyBlocks(blockCPUSet advisorapi.BlockCPUSet, resp *ad // adapt to old checkpoint without RequestQuantity property if newEntries[podUID][containerName] != nil { - newEntries[podUID][containerName].RequestQuantity = state.GetContainerRequestedCores(allocationInfo) + newEntries[podUID][containerName].RequestQuantity = state.GetContainerRequestedCores()(allocationInfo) continue } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_allocation_handlers.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_allocation_handlers.go index 722dfe4e3..954fd1106 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_allocation_handlers.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_allocation_handlers.go @@ -499,7 +499,7 @@ func (p *DynamicPolicy) putAllocationsAndAdjustAllocationEntries(allocationInfos return fmt.Errorf("allocationInfo points to empty poolName") } - reqInt := state.GetContainerRequestedCores(allocationInfo) + reqInt := state.GetContainerRequestedCores()(allocationInfo) poolsQuantityMap[poolName] += reqInt } @@ -723,7 +723,7 @@ func (p *DynamicPolicy) applyPoolsAndIsolatedInfo(poolsCPUSet map[string]machine continue } - reqInt := state.GetContainerRequestedCores(allocationInfo) + reqInt := state.GetContainerRequestedCores()(allocationInfo) if newPodEntries[podUID][containerName] != nil { // adapt to old checkpoint without RequestQuantity property newPodEntries[podUID][containerName].RequestQuantity = reqInt diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go index c040c77d7..209b3b134 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/policy_test.go @@ -99,7 +99,7 @@ func getTestDynamicPolicyWithoutInitialization(topology *machine.CPUTopology, st podDebugAnnoKeys: []string{podDebugAnnoKey}, } - state.GetContainerRequestedCores = policyImplement.getContainerRequestedCores + state.SetContainerRequestedCores(policyImplement.getContainerRequestedCores) // register allocation behaviors for pods with different QoS level policyImplement.allocationHandlers = map[string]util.AllocationHandler{ @@ -119,14 +119,16 @@ func getTestDynamicPolicyWithoutInitialization(topology *machine.CPUTopology, st } func TestInitPoolAndCalculator(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestInitPoolAndCalculator") as.Nil(err) - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() policyImpl, err := getTestDynamicPolicyWithoutInitialization(cpuTopology, tmpDir) as.Nil(err) @@ -142,11 +144,13 @@ func TestInitPoolAndCalculator(t *testing.T) { } func TestRemovePod(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestRemovePod") as.Nil(err) - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -210,7 +214,7 @@ func TestRemovePod(t *testing.T) { }, }, resp) - dynamicPolicy.RemovePod(context.Background(), &pluginapi.RemovePodRequest{ + _, _ = dynamicPolicy.RemovePod(context.Background(), &pluginapi.RemovePodRequest{ PodUid: req.PodUid, }) @@ -223,6 +227,8 @@ func TestRemovePod(t *testing.T) { } func TestAllocate(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -655,7 +661,7 @@ func TestAllocate(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestAllocate") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(tc.cpuTopology, tmpDir) @@ -671,11 +677,13 @@ func TestAllocate(t *testing.T) { tc.expectedResp.PodUid = tc.req.PodUid as.Equalf(tc.expectedResp, resp, "failed in test case: %s", tc.description) - os.RemoveAll(tmpDir) + _ = os.RemoveAll(tmpDir) } } func TestGetTopologyHints(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1069,7 +1077,7 @@ func TestGetTopologyHints(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyHints") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(tc.cpuTopology, tmpDir) @@ -1085,16 +1093,18 @@ func TestGetTopologyHints(t *testing.T) { tc.expectedResp.PodUid = tc.req.PodUid as.Equalf(tc.expectedResp, resp, "failed in test case: %s", tc.description) - os.RemoveAll(tmpDir) + _ = os.RemoveAll(tmpDir) } } func TestGetTopologyAwareAllocatableResources(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyAwareAllocatableResources") as.Nil(err) - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1130,6 +1140,8 @@ func TestGetTopologyAwareAllocatableResources(t *testing.T) { } func TestGetTopologyAwareResources(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1257,7 +1269,7 @@ func TestGetTopologyAwareResources(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyAwareResources") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(tc.cpuTopology, tmpDir) @@ -1282,11 +1294,11 @@ func TestGetTopologyAwareResources(t *testing.T) { as.Equalf(tc.expectedResp, resp, "failed in test case: %s", tc.description) if tc.req.Annotations[consts.PodAnnotationQoSLevelKey] == consts.PodAnnotationQoSLevelSharedCores { - originalTransitionPeriod := transitionPeriod - transitionPeriod = time.Second - time.Sleep(2 * time.Second) + originalTransitionPeriod := dynamicPolicy.transitionPeriod + dynamicPolicy.transitionPeriod = time.Millisecond * 10 + time.Sleep(20 * time.Millisecond) _, err = dynamicPolicy.GetResourcesAllocation(context.Background(), &pluginapi.GetResourcesAllocationRequest{}) - transitionPeriod = originalTransitionPeriod + dynamicPolicy.transitionPeriod = originalTransitionPeriod as.Nil(err) allocationInfo := dynamicPolicy.state.GetAllocationInfo(tc.req.PodUid, testName) as.NotNil(allocationInfo) @@ -1329,11 +1341,13 @@ func TestGetTopologyAwareResources(t *testing.T) { } func TestGetResourcesAllocation(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetResourcesAllocation") as.Nil(err) - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1381,12 +1395,12 @@ func TestGetResourcesAllocation(t *testing.T) { }) // test after ramping up - originalTransitionPeriod := transitionPeriod - transitionPeriod = time.Second - time.Sleep(2 * time.Second) + originalTransitionPeriod := dynamicPolicy.transitionPeriod + dynamicPolicy.transitionPeriod = time.Millisecond * 10 + time.Sleep(20 * time.Millisecond) _, err = dynamicPolicy.GetResourcesAllocation(context.Background(), &pluginapi.GetResourcesAllocationRequest{}) as.Nil(err) - transitionPeriod = originalTransitionPeriod + dynamicPolicy.transitionPeriod = originalTransitionPeriod allocationInfo := dynamicPolicy.state.GetAllocationInfo(req.PodUid, testName) as.NotNil(allocationInfo) as.Equal(allocationInfo.RampUp, false) @@ -1443,6 +1457,8 @@ func TestGetResourcesAllocation(t *testing.T) { } func TestAllocateByQoSAwareServerListAndWatchResp(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -2786,8 +2802,8 @@ func TestAllocateByQoSAwareServerListAndWatchResp(t *testing.T) { }, } - for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + for i, tc := range testCases { + tmpDir, err := ioutil.TempDir("", fmt.Sprintf("checkpoint-TestAllocateByQoSAwareServerListAndWatchResp-%v", i)) as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(tc.cpuTopology, tmpDir) @@ -2888,6 +2904,8 @@ func entriesMatch(entries1, entries2 state.PodEntries) (bool, error) { } func TestGetReadonlyState(t *testing.T) { + t.Parallel() + as := require.New(t) readonlyState, err := GetReadonlyState() as.NotNil(err) @@ -2895,9 +2913,11 @@ func TestGetReadonlyState(t *testing.T) { } func TestClearResidualState(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint_TestClearResidualState") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -2911,9 +2931,11 @@ func TestClearResidualState(t *testing.T) { } func TestStart(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint_TestStart") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -2928,9 +2950,11 @@ func TestStart(t *testing.T) { } func TestStop(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint_TestStop") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -2945,9 +2969,11 @@ func TestStop(t *testing.T) { } func TestCheckCPUSet(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint_TestCheckCPUSet") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -2961,6 +2987,8 @@ func TestCheckCPUSet(t *testing.T) { } func TestSchedIdle(t *testing.T) { + t.Parallel() + as := require.New(t) _, err1 := os.Stat("/sys/fs/cgroup/cpu/kubepods/cpu.idle") @@ -2979,7 +3007,7 @@ func TestSchedIdle(t *testing.T) { as.Nil(err) var enableCPUIdle bool - cgroupcmutils.ApplyCPUWithRelativePath("test", &cgroupcm.CPUData{CpuIdlePtr: &enableCPUIdle}) + _ = cgroupcmutils.ApplyCPUWithRelativePath("test", &cgroupcm.CPUData{CpuIdlePtr: &enableCPUIdle}) contents, err := ioutil.ReadFile(filepath.Join(absCgroupPath, "cpu.idle")) //nolint:gosec as.Nil(err) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go index 96fb1af53..fa5c94bad 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/state_test.go @@ -52,6 +52,7 @@ func assertStateEqual(t *testing.T, restoredState, expectedState State) { } func TestNewCheckpointState(t *testing.T) { + t.Parallel() testName := "test" cpuTopology, _ := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -1465,6 +1466,8 @@ func TestNewCheckpointState(t *testing.T) { } func TestClearState(t *testing.T) { + t.Parallel() + as := require.New(t) testName := "test" @@ -1953,6 +1956,8 @@ func TestClearState(t *testing.T) { } func TestCheckpointStateHelpers(t *testing.T) { + t.Parallel() + as := require.New(t) testName := "test" @@ -2450,6 +2455,8 @@ func TestCheckpointStateHelpers(t *testing.T) { } func TestGetDefaultMachineState(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) @@ -2498,6 +2505,8 @@ func TestGetDefaultMachineState(t *testing.T) { } func TestGetSocketTopology(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go index aafcf59fa..c608f59d9 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util.go @@ -18,6 +18,7 @@ package state import ( "fmt" + "sync" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" @@ -54,7 +55,20 @@ var ( ).Union(StaticPools) ) -var GetContainerRequestedCores func(allocationInfo *AllocationInfo) int +var containerRequestedCoresLock sync.RWMutex +var containerRequestedCores func(allocationInfo *AllocationInfo) int + +func GetContainerRequestedCores() func(allocationInfo *AllocationInfo) int { + containerRequestedCoresLock.RLock() + defer containerRequestedCoresLock.RUnlock() + return containerRequestedCores +} + +func SetContainerRequestedCores(f func(allocationInfo *AllocationInfo) int) { + containerRequestedCoresLock.Lock() + defer containerRequestedCoresLock.Unlock() + containerRequestedCores = f +} // GetIsolatedQuantityMapFromPodEntries returns a map to indicates isolation info, // and the map is formatted as pod -> container -> isolated-quantity @@ -83,7 +97,7 @@ func GetIsolatedQuantityMapFromPodEntries(podEntries PodEntries, ignoreAllocatio // and we will try to isolate those containers, so we will treat them as containers to be isolated. var quantity int if allocationInfo.OwnerPoolName != PoolNameDedicated { - quantity = GetContainerRequestedCores(allocationInfo) + quantity = GetContainerRequestedCores()(allocationInfo) } else { quantity = allocationInfo.AllocationResult.Size() } @@ -128,7 +142,7 @@ func GetSharedQuantityMapFromPodEntries(podEntries PodEntries, ignoreAllocationI } if poolName := allocationInfo.GetOwnerPoolName(); poolName != advisorapi.EmptyOwnerPoolName { - ret[poolName] += GetContainerRequestedCores(allocationInfo) + ret[poolName] += GetContainerRequestedCores()(allocationInfo) } } } diff --git a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util_test.go b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util_test.go index 41087d408..4b89c4c21 100644 --- a/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util_test.go +++ b/pkg/agent/qrm-plugins/cpu/dynamicpolicy/state/util_test.go @@ -33,6 +33,8 @@ import ( ) func TestGenerateCPUMachineStateByPodEntries(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -496,7 +498,7 @@ func TestGenerateCPUMachineStateByPodEntries(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGenerateCPUMachineStateByPodEntries") as.Nil(err) machineState, err := GenerateMachineStateFromPodEntries(tc.cpuTopology, tc.podEntries) diff --git a/pkg/agent/qrm-plugins/memory/dynamicpolicy/policy_test.go b/pkg/agent/qrm-plugins/memory/dynamicpolicy/policy_test.go index 2e702f035..c0f61bd9b 100644 --- a/pkg/agent/qrm-plugins/memory/dynamicpolicy/policy_test.go +++ b/pkg/agent/qrm-plugins/memory/dynamicpolicy/policy_test.go @@ -98,9 +98,11 @@ func getTestDynamicPolicyWithInitialization(topology *machine.CPUTopology, machi } func TestCheckMemorySet(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestCheckMemorySet") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -150,9 +152,11 @@ func TestCheckMemorySet(t *testing.T) { } func TestClearResidualState(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestClearResidualState") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -173,9 +177,11 @@ func TestClearResidualState(t *testing.T) { } func TestSetMemoryMigrate(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestSetMemoryMigrate") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -242,9 +248,11 @@ func TestSetMemoryMigrate(t *testing.T) { } func TestRemovePod(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestRemovePod") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -329,6 +337,8 @@ func TestRemovePod(t *testing.T) { } func TestAllocate(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -755,7 +765,7 @@ func TestAllocate(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestAllocate") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(cpuTopology, machineInfo, tmpDir) @@ -776,6 +786,8 @@ func TestAllocate(t *testing.T) { } func TestGetTopologyHints(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1165,7 +1177,7 @@ func TestGetTopologyHints(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyHints") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(cpuTopology, machineInfo, tmpDir) @@ -1186,9 +1198,11 @@ func TestGetTopologyHints(t *testing.T) { } func TestGetTopologyAwareAllocatableResources(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyAwareAllocatableResources") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -1229,6 +1243,8 @@ func TestGetTopologyAwareAllocatableResources(t *testing.T) { } func TestGetTopologyAwareResources(t *testing.T) { + t.Parallel() + as := require.New(t) cpuTopology, err := machine.GenerateDummyCPUTopology(16, 2, 4) as.Nil(err) @@ -1388,7 +1404,7 @@ func TestGetTopologyAwareResources(t *testing.T) { } for _, tc := range testCases { - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetTopologyAwareResources") as.Nil(err) dynamicPolicy, err := getTestDynamicPolicyWithInitialization(cpuTopology, machineInfo, tmpDir) @@ -1417,9 +1433,11 @@ func TestGetTopologyAwareResources(t *testing.T) { } func TestGetResourcesAllocation(t *testing.T) { + t.Parallel() + as := require.New(t) - tmpDir, err := ioutil.TempDir("", "checkpoint") + tmpDir, err := ioutil.TempDir("", "checkpoint-TestGetResourcesAllocation") as.Nil(err) defer os.RemoveAll(tmpDir) @@ -1556,6 +1574,8 @@ func TestGetResourcesAllocation(t *testing.T) { } func TestGetReadonlyState(t *testing.T) { + t.Parallel() + as := require.New(t) readonlyState, err := GetReadonlyState() as.NotNil(err) @@ -1563,6 +1583,8 @@ func TestGetReadonlyState(t *testing.T) { } func TestGenerateResourcesMachineStateFromPodEntries(t *testing.T) { + t.Parallel() + as := require.New(t) machineInfo, err := machine.GenerateDummyMachineInfo(4, 32) diff --git a/pkg/agent/qrm-plugins/network/staticpolicy/policy_test.go b/pkg/agent/qrm-plugins/network/staticpolicy/policy_test.go index 81ea9855c..f28570fd3 100644 --- a/pkg/agent/qrm-plugins/network/staticpolicy/policy_test.go +++ b/pkg/agent/qrm-plugins/network/staticpolicy/policy_test.go @@ -172,6 +172,8 @@ func makeNICs() []machine.InterfaceInfo { } func TestNewStaticPolicy(t *testing.T) { + t.Parallel() + neetToRun, policy, err := NewStaticPolicy(makeTestGenericContext(t), generateTestConfiguration(t), nil, NetworkResourcePluginPolicyNameStatic) assert.NoError(t, err) assert.NotNil(t, policy) @@ -181,6 +183,8 @@ func TestNewStaticPolicy(t *testing.T) { } func TestRemovePod(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -195,6 +199,8 @@ func TestRemovePod(t *testing.T) { } func TestAllocate(t *testing.T) { + t.Parallel() + testName := "test" testCases := []struct { @@ -442,6 +448,8 @@ func TestAllocate(t *testing.T) { } func TestGetNetClassID(t *testing.T) { + t.Parallel() + staticPolicy := makeStaticPolicy(t) staticPolicy.qosLevelToNetClassMap = map[string]uint32{ consts.PodAnnotationQoSLevelReclaimedCores: 10, @@ -542,6 +550,8 @@ func TestGetNetClassID(t *testing.T) { } func TestName(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -549,6 +559,8 @@ func TestName(t *testing.T) { } func TestResourceName(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -556,6 +568,8 @@ func TestResourceName(t *testing.T) { } func TestGetTopologyHints(t *testing.T) { + t.Parallel() + testName := "test" testCases := []struct { @@ -806,6 +820,8 @@ func TestGetTopologyHints(t *testing.T) { } func TestGetResourcesAllocation(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -814,6 +830,8 @@ func TestGetResourcesAllocation(t *testing.T) { } func TestGetTopologyAwareResources(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -827,6 +845,8 @@ func TestGetTopologyAwareResources(t *testing.T) { } func TestGetTopologyAwareAllocatableResources(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -835,6 +855,8 @@ func TestGetTopologyAwareAllocatableResources(t *testing.T) { } func TestGetResourcePluginOptions(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) @@ -850,6 +872,8 @@ func TestGetResourcePluginOptions(t *testing.T) { } func TestPreStartContainer(t *testing.T) { + t.Parallel() + policy := makeStaticPolicy(t) assert.NotNil(t, policy) diff --git a/pkg/agent/qrm-plugins/util/util_test.go b/pkg/agent/qrm-plugins/util/util_test.go index 400bef742..1d3ff3bbc 100644 --- a/pkg/agent/qrm-plugins/util/util_test.go +++ b/pkg/agent/qrm-plugins/util/util_test.go @@ -30,6 +30,8 @@ import ( ) func TestGetQuantityFromResourceReq(t *testing.T) { + t.Parallel() + as := require.New(t) testCases := []struct { @@ -90,6 +92,8 @@ func TestGetQuantityFromResourceReq(t *testing.T) { } func TestDeepCopyTopologyAwareAssignments(t *testing.T) { + t.Parallel() + as := require.New(t) testCases := []struct { @@ -117,6 +121,8 @@ func TestDeepCopyTopologyAwareAssignments(t *testing.T) { } func TestHintToIntArray(t *testing.T) { + t.Parallel() + as := require.New(t) testCases := []struct { @@ -161,6 +167,8 @@ func TestHintToIntArray(t *testing.T) { } func TestMaskToUInt64Array(t *testing.T) { + t.Parallel() + as := require.New(t) nonEmptyMask, err := bitmask.NewBitMask(0, 1, 2, 3) @@ -190,6 +198,8 @@ func TestMaskToUInt64Array(t *testing.T) { } func TestTransformTopologyAwareQuantity(t *testing.T) { + t.Parallel() + as := require.New(t) testCases := []struct { diff --git a/pkg/agent/resourcemanager/fetcher/kubelet/kubeletplugin_test.go b/pkg/agent/resourcemanager/fetcher/kubelet/kubeletplugin_test.go index 05c3e07e0..d12ffe6c8 100644 --- a/pkg/agent/resourcemanager/fetcher/kubelet/kubeletplugin_test.go +++ b/pkg/agent/resourcemanager/fetcher/kubelet/kubeletplugin_test.go @@ -129,6 +129,8 @@ func tmpSocketDir() (socketDir string, err error) { } func TestNewKubeletReporterPlugin(t *testing.T) { + t.Parallel() + dir, err := tmpSocketDir() assert.NoError(t, err) defer os.RemoveAll(dir) @@ -275,8 +277,8 @@ func TestNewKubeletReporterPlugin(t *testing.T) { err = checkpointManager.CreateCheckpoint(pkgconsts.KubeletQoSResourceManagerCheckpoint, &testutil.MockCheckpoint{}) assert.NoError(t, err) - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) plugin.Stop() - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) } diff --git a/pkg/agent/resourcemanager/fetcher/kubelet/topology/podresourcesserver_test.go b/pkg/agent/resourcemanager/fetcher/kubelet/topology/podresourcesserver_test.go index b9a900d61..dcd7e3f74 100644 --- a/pkg/agent/resourcemanager/fetcher/kubelet/topology/podresourcesserver_test.go +++ b/pkg/agent/resourcemanager/fetcher/kubelet/topology/podresourcesserver_test.go @@ -132,6 +132,8 @@ func generateTestMetaServer(podList ...*v1.Pod) *metaserver.MetaServer { } func Test_getZoneAllocationsByPodResources(t *testing.T) { + t.Parallel() + type args struct { podList []*v1.Pod numaSocketZoneNodeMap map[util.ZoneNode]util.ZoneNode @@ -412,6 +414,8 @@ func Test_getZoneAllocationsByPodResources(t *testing.T) { } func Test_getZoneResourcesByAllocatableResources(t *testing.T) { + t.Parallel() + type args struct { allocatableResources *podresv1.AllocatableResourcesResponse numaSocketZoneNodeMap map[util.ZoneNode]util.ZoneNode @@ -739,6 +743,8 @@ func Test_getZoneResourcesByAllocatableResources(t *testing.T) { } func Test_podResourcesServerTopologyAdapterImpl_GetTopologyZones(t *testing.T) { + t.Parallel() + type fields struct { podList []*v1.Pod listPodResources *podresv1.ListPodResourcesResponse @@ -1420,6 +1426,8 @@ func Test_podResourcesServerTopologyAdapterImpl_GetTopologyZones(t *testing.T) { } func Test_podResourcesServerTopologyAdapterImpl_Run(t *testing.T) { + t.Parallel() + dir, err := tmpSocketDir() assert.NoError(t, err) defer os.RemoveAll(dir) @@ -1467,9 +1475,9 @@ func Test_podResourcesServerTopologyAdapterImpl_Run(t *testing.T) { err = checkpointManager.CreateCheckpoint(pkgconsts.KubeletQoSResourceManagerCheckpoint, &testutil.MockCheckpoint{}) assert.NoError(t, err) - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) cancel() close(notifier) - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) } diff --git a/pkg/agent/resourcemanager/fetcher/manager_test.go b/pkg/agent/resourcemanager/fetcher/manager_test.go index f5bd9e926..17f73c796 100644 --- a/pkg/agent/resourcemanager/fetcher/manager_test.go +++ b/pkg/agent/resourcemanager/fetcher/manager_test.go @@ -49,6 +49,7 @@ import ( const ( testPluginName = "fake-reporter-plugin-1" testPluginNameSecond = "fake-reporter-plugin-2" + testPluginNameThird = "fake-reporter-plugin-3" ) var ( @@ -82,6 +83,8 @@ func generateTestConfiguration(dir string) *katalystconfig.Configuration { } func TestNewManagerImpl(t *testing.T) { + t.Parallel() + socketDir, err := tmpSocketDir() testReporter := reporter.NewReporterManagerStub() require.NoError(t, err) @@ -95,6 +98,8 @@ func TestNewManagerImpl(t *testing.T) { // making sure that after registration, devices are correctly updated and if a re-registration // happens, we will NOT delete devices; and no orphaned devices left. func TestReporterPluginReRegistration(t *testing.T) { + t.Parallel() + // change default klog level flagSet := flag.FlagSet{} klog.InitFlags(&flagSet) @@ -170,6 +175,8 @@ func TestReporterPluginReRegistration(t *testing.T) { } func TestHealthz(t *testing.T) { + t.Parallel() + // change default klog level flagSet := flag.FlagSet{} klog.InitFlags(&flagSet) @@ -198,7 +205,7 @@ func TestHealthz(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) defer cancel() - _, ch, p := setup(t, ctx, content1, nil, socketDir, testPluginName, testReporter) + _, ch, p := setup(t, ctx, content1, nil, socketDir, testPluginNameThird, testReporter) select { case <-ch: diff --git a/pkg/agent/resourcemanager/fetcher/plugin/endpoint_test.go b/pkg/agent/resourcemanager/fetcher/plugin/endpoint_test.go index 656960607..3489f3eb1 100644 --- a/pkg/agent/resourcemanager/fetcher/plugin/endpoint_test.go +++ b/pkg/agent/resourcemanager/fetcher/plugin/endpoint_test.go @@ -39,7 +39,9 @@ var ( ) func TestNewEndpoint(t *testing.T) { - socketDir := path.Join("/tmp") + t.Parallel() + + socketDir := path.Join("/tmp/TestNewEndpoint") content := []*v1alpha1.ReportContent{ { @@ -53,7 +55,7 @@ func TestNewEndpoint(t *testing.T) { } func TestRun(t *testing.T) { - socket := path.Join("/tmp") + socket := path.Join("/tmp/TestRun") content := []*v1alpha1.ReportContent{ { @@ -144,7 +146,9 @@ func TestRun(t *testing.T) { } func TestGetReportContent(t *testing.T) { - socket := path.Join("/tmp") + t.Parallel() + + socket := path.Join("/tmp/TestGetReportContent") content := []*v1alpha1.ReportContent{ { diff --git a/pkg/agent/resourcemanager/fetcher/system/systemplugin_test.go b/pkg/agent/resourcemanager/fetcher/system/systemplugin_test.go index d18acefa5..97966570f 100644 --- a/pkg/agent/resourcemanager/fetcher/system/systemplugin_test.go +++ b/pkg/agent/resourcemanager/fetcher/system/systemplugin_test.go @@ -45,6 +45,8 @@ func generateTestConfiguration(t *testing.T) *config.Configuration { } func Test_systemPlugin_GetReportContent(t *testing.T) { + t.Parallel() + genericClient := &client.GenericClientSet{ KubeClient: fake.NewSimpleClientset(), InternalClient: internalfake.NewSimpleClientset(), diff --git a/pkg/agent/resourcemanager/reporter/cnr/cnrreporter_test.go b/pkg/agent/resourcemanager/reporter/cnr/cnrreporter_test.go index 18c5992fa..308b58326 100644 --- a/pkg/agent/resourcemanager/reporter/cnr/cnrreporter_test.go +++ b/pkg/agent/resourcemanager/reporter/cnr/cnrreporter_test.go @@ -88,6 +88,8 @@ func testMarshal(t *testing.T, v interface{}) []byte { } func Test_parseReportFieldToCNR(t *testing.T) { + t.Parallel() + type args struct { cnr *nodev1alpha1.CustomNodeResource reportField v1alpha1.ReportField @@ -255,6 +257,8 @@ func Test_parseReportFieldToCNR(t *testing.T) { } func Test_initializeCNRFields(t *testing.T) { + t.Parallel() + type args struct { cnr *nodev1alpha1.CustomNodeResource field v1alpha1.ReportField @@ -357,6 +361,8 @@ func Test_initializeCNRFields(t *testing.T) { } func Test_cnrReporterImpl_Update(t *testing.T) { + t.Parallel() + type fields struct { defaultCNR *nodev1alpha1.CustomNodeResource } diff --git a/pkg/agent/resourcemanager/reporter/manager_test.go b/pkg/agent/resourcemanager/reporter/manager_test.go index 275e80e09..46e4176e0 100644 --- a/pkg/agent/resourcemanager/reporter/manager_test.go +++ b/pkg/agent/resourcemanager/reporter/manager_test.go @@ -86,6 +86,8 @@ func generateTestConfiguration(t *testing.T) *config.Configuration { } func TestNewReporterManager(t *testing.T) { + t.Parallel() + testClientSet := generateTestGenericClientSet() testMetricEmitter := &metrics.DummyMetrics{} testConfiguration := generateTestConfiguration(t) @@ -99,6 +101,8 @@ func TestNewReporterManager(t *testing.T) { } func Test_aggregateReportFieldsByGVK(t *testing.T) { + t.Parallel() + type args struct { reportResponses map[string]*v1alpha1.GetReportContentResponse } @@ -148,6 +152,8 @@ func Test_aggregateReportFieldsByGVK(t *testing.T) { } func Test_managerImpl_PushContents(t *testing.T) { + t.Parallel() + type fields struct { conf *config.Configuration reporters map[v1.GroupVersionKind]Reporter @@ -206,6 +212,8 @@ func Test_managerImpl_PushContents(t *testing.T) { } func Test_managerImpl_Run(t *testing.T) { + t.Parallel() + type fields struct { conf *config.Configuration reporters map[v1.GroupVersionKind]Reporter @@ -238,6 +246,8 @@ func Test_managerImpl_Run(t *testing.T) { } func Test_managerImpl_convertReportFieldsIfNeeded(t *testing.T) { + t.Parallel() + type fields struct { converters map[v1.GroupVersionKind]Converter } diff --git a/pkg/agent/sysadvisor/metacache/checkpoint_test.go b/pkg/agent/sysadvisor/metacache/checkpoint_test.go index 28ddef5e0..e9c6888b8 100644 --- a/pkg/agent/sysadvisor/metacache/checkpoint_test.go +++ b/pkg/agent/sysadvisor/metacache/checkpoint_test.go @@ -27,6 +27,8 @@ import ( ) func TestCheckpoint(t *testing.T) { + t.Parallel() + cp := NewMetaCacheCheckpoint() cp.PoolEntries = map[string]*types.PoolInfo{ "p1": { diff --git a/pkg/agent/sysadvisor/plugin/metric-emitter/metric_emitter_test.go b/pkg/agent/sysadvisor/plugin/metric-emitter/metric_emitter_test.go index a008bd2a7..a7ec21ae8 100644 --- a/pkg/agent/sysadvisor/plugin/metric-emitter/metric_emitter_test.go +++ b/pkg/agent/sysadvisor/plugin/metric-emitter/metric_emitter_test.go @@ -49,6 +49,8 @@ func generateTestConfiguration(t *testing.T) *config.Configuration { // todo: change to dummy malachite implementation instead of fake testing func Test_noneExistMetricsFetcher(t *testing.T) { + t.Parallel() + client := &client.GenericClientSet{ KubeClient: fake.NewSimpleClientset(), InternalClient: internalfake.NewSimpleClientset(), @@ -60,7 +62,7 @@ func Test_noneExistMetricsFetcher(t *testing.T) { if err == nil { ctx, cancel := context.WithCancel(context.Background()) go meta.Run(ctx) - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) cancel() } @@ -74,5 +76,5 @@ func Test_noneExistMetricsFetcher(t *testing.T) { assert.NoError(t, err) go f.Run(context.Background()) - time.Sleep(time.Second * 3) + time.Sleep(time.Millisecond * 30) } diff --git a/pkg/agent/sysadvisor/plugin/metric-emitter/syncer/pod/pod_test.go b/pkg/agent/sysadvisor/plugin/metric-emitter/syncer/pod/pod_test.go index c81cd13b7..65fc1c4af 100644 --- a/pkg/agent/sysadvisor/plugin/metric-emitter/syncer/pod/pod_test.go +++ b/pkg/agent/sysadvisor/plugin/metric-emitter/syncer/pod/pod_test.go @@ -45,6 +45,8 @@ func generateTestConfiguration(t *testing.T) *config.Configuration { } func Test_podAddAndRemoved(t *testing.T) { + t.Parallel() + conf := generateTestConfiguration(t) conf.PodSyncPeriod = time.Second diff --git a/pkg/agent/sysadvisor/plugin/qosaware/qos_aware_test.go b/pkg/agent/sysadvisor/plugin/qosaware/qos_aware_test.go index 63c5c11c3..8a45cda59 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/qos_aware_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/qos_aware_test.go @@ -81,7 +81,9 @@ func generateTestMetaCache(t *testing.T, conf *config.Configuration) *metacache. } func TestQoSAwarePlugin(t *testing.T) { - checkpoinDir, err := ioutil.TempDir("", "checkpoint") + t.Parallel() + + checkpoinDir, err := ioutil.TempDir("", "checkpoint-TestQoSAwarePlugin") require.NoError(t, err) defer os.RemoveAll(checkpoinDir) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/reporter/manager/resource/generic_test.go b/pkg/agent/sysadvisor/plugin/qosaware/reporter/manager/resource/generic_test.go index 243031060..5109f9f47 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/reporter/manager/resource/generic_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/reporter/manager/resource/generic_test.go @@ -30,6 +30,8 @@ import ( ) func TestNewGenericHeadroomManager(t *testing.T) { + t.Parallel() + type args struct { name v1.ResourceName useMilliValue bool @@ -77,6 +79,8 @@ func TestNewGenericHeadroomManager(t *testing.T) { } func TestGenericHeadroomManager_Allocatable(t *testing.T) { + t.Parallel() + r := hmadvisor.NewResourceAdvisorStub() reclaimOptions := GenericReclaimOptions{ EnableReclaim: true, diff --git a/pkg/agent/sysadvisor/plugin/qosaware/reporter/reporter_test.go b/pkg/agent/sysadvisor/plugin/qosaware/reporter/reporter_test.go index c65256725..83d57b315 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/reporter/reporter_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/reporter/reporter_test.go @@ -113,6 +113,8 @@ func setupReporterManager(t *testing.T, ctx context.Context, socketDir string, c } func TestNewReclaimedResourcedReporter(t *testing.T) { + t.Parallel() + socketDir, err := tmpSocketDir() require.NoError(t, err) defer os.RemoveAll(socketDir) @@ -134,6 +136,8 @@ func TestNewReclaimedResourcedReporter(t *testing.T) { } func TestReclaimedResourcedReporterWithManager(t *testing.T) { + t.Parallel() + socketDir, err := tmpSocketDir() require.NoError(t, err) defer os.RemoveAll(socketDir) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go index 6d92ec552..100ba1af0 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/advisor_test.go @@ -130,6 +130,8 @@ func makeContainerInfo(podUID, namespace, podName, containerName, qoSLevel, owne } func TestAdvisorUpdate(t *testing.T) { + t.Parallel() + type metricItem struct { pod string container string @@ -768,7 +770,7 @@ func TestAdvisorUpdate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { now := time.Now() - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestAdvisorUpdate") require.NoError(t, err) defer func() { _ = os.RemoveAll(ckDir) }() diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/headroomassembler/assembler_common_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/headroomassembler/assembler_common_test.go index 7b995d767..091bd9d94 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/headroomassembler/assembler_common_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/headroomassembler/assembler_common_test.go @@ -83,6 +83,8 @@ func generateTestMetaServer(t *testing.T, cnr *v1alpha1.CustomNodeResource, podL } func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { + t.Parallel() + now := time.Now() type fields struct { @@ -90,7 +92,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { cnr *v1alpha1.CustomNodeResource podList []*v1.Pod reclaimedResourceConfiguration *reclaimedresource.ReclaimedResourceConfiguration - setFakeMetric func(store *utilmetric.MetricStore) + setFakeMetric func(store *metric.FakeMetricsFetcher) setMetaCache func(cache *metacache.MetaCacheImp) } tests := []struct { @@ -127,7 +129,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { }, }, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { for i := 0; i < 10; i++ { store.SetCPUMetric(i, pkgconsts.MetricCPUUsage, utilmetric.MetricData{Value: 30, Time: &now}) } @@ -172,7 +174,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { }, }, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { for i := 0; i < 10; i++ { store.SetCPUMetric(i, pkgconsts.MetricCPUUsage, utilmetric.MetricData{Value: 30, Time: &now}) } @@ -217,7 +219,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { }, }, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { for i := 0; i < 10; i++ { store.SetCPUMetric(i, pkgconsts.MetricCPUUsage, utilmetric.MetricData{Time: &now}) } @@ -262,7 +264,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { }, }, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { for i := 0; i < 96; i++ { store.SetCPUMetric(i, pkgconsts.MetricCPUUsage, utilmetric.MetricData{Value: 90, Time: &now}) } @@ -308,7 +310,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { }, }, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { now := time.Now() for i := 0; i < 96; i++ { store.SetCPUMetric(i, pkgconsts.MetricCPUUsage, utilmetric.MetricData{Value: 30, Time: &now}) @@ -329,7 +331,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestHeadroomAssemblerCommon_GetHeadroom") require.NoError(t, err) defer os.RemoveAll(ckDir) @@ -350,7 +352,7 @@ func TestHeadroomAssemblerCommon_GetHeadroom(t *testing.T) { metaServer := generateTestMetaServer(t, tt.fields.cnr, tt.fields.podList, metricsFetcher) ha := NewHeadroomAssemblerCommon(conf, nil, nil, nil, nil, nil, metaCache, metaServer, metrics.DummyMetrics{}) - store := utilmetric.GetMetricStoreInstance() + store := metricsFetcher.(*metric.FakeMetricsFetcher) tt.fields.setFakeMetric(store) got, err := ha.GetHeadroom() diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/provisionassembler/assembler_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/provisionassembler/assembler_test.go index a01cee001..0727836c3 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/provisionassembler/assembler_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/assembler/provisionassembler/assembler_test.go @@ -23,6 +23,8 @@ import ( ) func TestRegulatePoolSizes(t *testing.T) { + t.Parallel() + tests := []struct { name string available int diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/isolation/isolator_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/isolation/isolator_test.go index 2320b987c..66cc7ef0e 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/isolation/isolator_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/isolation/isolator_test.go @@ -70,7 +70,9 @@ func makeContainerInfo(podUID, namespace, podName, containerName, qoSLevel, owne } func TestLoadIsolator(t *testing.T) { - ckDir, err := ioutil.TempDir("", "checkpoint") + t.Parallel() + + ckDir, err := ioutil.TempDir("", "checkpoint-TestLoadIsolator") require.NoError(t, err) defer func() { _ = os.RemoveAll(ckDir) }() @@ -446,7 +448,7 @@ func TestLoadIsolator(t *testing.T) { lockedOutFirstObserved: &now, }) } - time.Sleep(time.Second) + time.Sleep(time.Millisecond * 10) res := loader.GetIsolatedPods() assert.EqualValues(t, tc.expects, res) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/provisionpolicy/policy_rama_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/provisionpolicy/policy_rama_test.go index 7c6ae4538..46b80e8ff 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/provisionpolicy/policy_rama_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/cpu/region/provisionpolicy/policy_rama_test.go @@ -118,6 +118,8 @@ func newTestPolicyRama(t *testing.T, checkpointDir string, stateFileDir string, } func TestPolicyRama(t *testing.T) { + t.Parallel() + tests := []struct { name string regionInfo types.RegionInfo diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/advisor_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/advisor_test.go index bf378ae30..9aa9219d4 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/advisor_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/advisor_test.go @@ -124,6 +124,8 @@ func newTestMemoryAdvisor(t *testing.T, pods []*v1.Pod, checkpointDir, stateFile } func TestUpdate(t *testing.T) { + t.Parallel() + tests := []struct { name string pools map[string]*types.PoolInfo @@ -255,7 +257,7 @@ func TestUpdate(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestUpdate") require.NoError(t, err) defer os.RemoveAll(ckDir) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go index c40f6829b..2501ba63b 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/resource/memory/headroompolicy/policy_canonical_test.go @@ -113,6 +113,8 @@ func makeContainerInfo(podUID, namespace, podName, containerName, qoSLevel strin } func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { + t.Parallel() + now := time.Now() type fields struct { @@ -121,7 +123,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { memoryHeadroomConfiguration *memoryheadroom.MemoryHeadroomConfiguration policyCanonicalConfiguration *headroom.MemoryPolicyCanonicalConfiguration essentials types.ResourceEssentials - setFakeMetric func(store *utilmetric.MetricStore) + setFakeMetric func(store *metric.FakeMetricsFetcher) } type args struct { estimateNonReclaimedRequirement float64 @@ -181,7 +183,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { ResourceUpperBound: 100 << 30, ReservedForAllocate: 4 << 30, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { store.SetContainerMetric("pod1", "container1", pkgconsts.MetricMemRssContainer, utilmetric.MetricData{Value: 10 << 30, Time: &now}) store.SetContainerMetric("pod1", "container1", pkgconsts.MetricMemCacheContainer, utilmetric.MetricData{Value: 10 << 30, Time: &now}) @@ -255,7 +257,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { ResourceUpperBound: 100 << 30, ReservedForAllocate: 4 << 30, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { store.SetNodeMetric(pkgconsts.MetricMemTotalSystem, utilmetric.MetricData{Value: 100 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemFreeSystem, utilmetric.MetricData{Value: 60 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemScaleFactorSystem, utilmetric.MetricData{Value: 500, Time: &now}) @@ -334,7 +336,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { ResourceUpperBound: 100 << 30, ReservedForAllocate: 4 << 30, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { store.SetNodeMetric(pkgconsts.MetricMemTotalSystem, utilmetric.MetricData{Value: 100 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemFreeSystem, utilmetric.MetricData{Value: 30 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemScaleFactorSystem, utilmetric.MetricData{Value: 500, Time: &now}) @@ -417,7 +419,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { ResourceUpperBound: 100 << 30, ReservedForAllocate: 4 << 30, }, - setFakeMetric: func(store *utilmetric.MetricStore) { + setFakeMetric: func(store *metric.FakeMetricsFetcher) { store.SetNodeMetric(pkgconsts.MetricMemTotalSystem, utilmetric.MetricData{Value: 100 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemFreeSystem, utilmetric.MetricData{Value: 20 << 30, Time: &now}) store.SetNodeMetric(pkgconsts.MetricMemScaleFactorSystem, utilmetric.MetricData{Value: 500, Time: &now}) @@ -435,7 +437,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestPolicyCanonical_calculateMemoryBuffer") require.NoError(t, err) defer os.RemoveAll(ckDir) @@ -460,7 +462,7 @@ func TestPolicyCanonical_calculateMemoryBuffer(t *testing.T) { p := NewPolicyCanonical(conf, nil, metaCache, metaServer, metrics.DummyMetrics{}) - store := utilmetric.GetMetricStoreInstance() + store := metricsFetcher.(*metric.FakeMetricsFetcher) tt.fields.setFakeMetric(store) p.SetEssentials(tt.fields.essentials) diff --git a/pkg/agent/sysadvisor/plugin/qosaware/server/cpu_server_test.go b/pkg/agent/sysadvisor/plugin/qosaware/server/cpu_server_test.go index e4e6b7081..ceb1d2f2d 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/server/cpu_server_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/server/cpu_server_test.go @@ -79,6 +79,8 @@ func newTestCPUServer(t *testing.T) *cpuServer { } func TestCPUServerStartAndStop(t *testing.T) { + t.Parallel() + cs := newTestCPUServer(t) err := cs.Start() @@ -89,6 +91,8 @@ func TestCPUServerStartAndStop(t *testing.T) { } func TestCPUServerAddContainer(t *testing.T) { + t.Parallel() + tests := []struct { name string request *advisorsvc.AddContainerRequest @@ -149,6 +153,8 @@ func TestCPUServerAddContainer(t *testing.T) { } func TestCPUServerRemovePod(t *testing.T) { + t.Parallel() + tests := []struct { name string request *advisorsvc.RemovePodRequest @@ -219,6 +225,8 @@ func DeepCopyResponse(response *cpuadvisor.ListAndWatchResponse) (*cpuadvisor.Li } func TestCPUServerListAndWatch(t *testing.T) { + t.Parallel() + type ContainerInfo struct { request *advisorsvc.AddContainerRequest allocationInfo *cpuadvisor.AllocationInfo diff --git a/pkg/agent/sysadvisor/plugin/qosaware/server/memory_server_test.go b/pkg/agent/sysadvisor/plugin/qosaware/server/memory_server_test.go index 6fe4422da..ce6c153ed 100644 --- a/pkg/agent/sysadvisor/plugin/qosaware/server/memory_server_test.go +++ b/pkg/agent/sysadvisor/plugin/qosaware/server/memory_server_test.go @@ -79,6 +79,8 @@ func newTestMemoryServer(t *testing.T) *memoryServer { } func TestMemoryServerStartAndStop(t *testing.T) { + t.Parallel() + cs := newTestMemoryServer(t) err := cs.Start() @@ -89,6 +91,8 @@ func TestMemoryServerStartAndStop(t *testing.T) { } func TestMemoryServerListAndWatch(t *testing.T) { + t.Parallel() + type ContainerInfo struct { request *advisorsvc.AddContainerRequest } diff --git a/pkg/agent/sysadvisor/test/metacache_test.go b/pkg/agent/sysadvisor/test/metacache_test.go index 437948d02..5db0472c2 100644 --- a/pkg/agent/sysadvisor/test/metacache_test.go +++ b/pkg/agent/sysadvisor/test/metacache_test.go @@ -51,6 +51,8 @@ func newTestMetaCache(t *testing.T) *metacache.MetaCacheImp { } func TestContainer(t *testing.T) { + t.Parallel() + metaCache := newTestMetaCache(t) err := metaCache.SetContainerInfo("pod-0", "container-0", &types.ContainerInfo{}) @@ -75,6 +77,8 @@ func TestContainer(t *testing.T) { } func TestPool(t *testing.T) { + t.Parallel() + general.Infof("ready to start %v", "test pool") metaCache := newTestMetaCache(t) diff --git a/pkg/agent/sysadvisor/test/sysadvisor_test.go b/pkg/agent/sysadvisor/test/sysadvisor_test.go index b38c6ee96..29ddff5f7 100644 --- a/pkg/agent/sysadvisor/test/sysadvisor_test.go +++ b/pkg/agent/sysadvisor/test/sysadvisor_test.go @@ -61,8 +61,9 @@ func generatePluginConfig(t *testing.T, ckDir, sfDir string) *katalystconfig.Con } func TestAdvisor(t *testing.T) { + t.Parallel() - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestAdvisor") require.NoError(t, err) defer os.RemoveAll(ckDir) @@ -100,11 +101,13 @@ func TestAdvisor(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) go advisor.Run(ctx) - time.Sleep(time.Second * 3) + time.Sleep(time.Millisecond * 30) cancel() } func TestPlugins(t *testing.T) { + t.Parallel() + type args struct { initFn plugin.AdvisorPluginInitFunc } @@ -132,7 +135,7 @@ func TestPlugins(t *testing.T) { DynamicClient: dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()), } - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestPlugins") require.NoError(t, err) defer os.RemoveAll(ckDir) @@ -178,13 +181,15 @@ func TestPlugins(t *testing.T) { } func TestMetaServer(t *testing.T) { + t.Parallel() + client := &client.GenericClientSet{ KubeClient: fake.NewSimpleClientset(), InternalClient: internalfake.NewSimpleClientset(), DynamicClient: dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()), } - ckDir, err := ioutil.TempDir("", "checkpoint") + ckDir, err := ioutil.TempDir("", "checkpoint-TestMetaServer") require.NoError(t, err) defer os.RemoveAll(ckDir) diff --git a/pkg/agent/sysadvisor/types/helper_test.go b/pkg/agent/sysadvisor/types/helper_test.go index 063c3b6ef..0f2e248b6 100644 --- a/pkg/agent/sysadvisor/types/helper_test.go +++ b/pkg/agent/sysadvisor/types/helper_test.go @@ -28,6 +28,8 @@ import ( ) func TestClonePodEntries(t *testing.T) { + t.Parallel() + ci := &ContainerInfo{ PodUID: "uid1", PodNamespace: "ns1", diff --git a/pkg/client/control/unstructured_test.go b/pkg/client/control/unstructured_test.go index 42237e3de..2529b7e05 100644 --- a/pkg/client/control/unstructured_test.go +++ b/pkg/client/control/unstructured_test.go @@ -36,6 +36,8 @@ func toTestUnstructured(t *testing.T, obj interface{}) *unstructured.Unstructure } func Test_prepareUnstructuredPatchBytes(t *testing.T) { + t.Parallel() + type args struct { oldObj *unstructured.Unstructured newObj *unstructured.Unstructured @@ -102,6 +104,8 @@ func Test_prepareUnstructuredPatchBytes(t *testing.T) { } func Test_prepareUnstructuredStatusPatchBytes(t *testing.T) { + t.Parallel() + type args struct { oldObj *unstructured.Unstructured newObj *unstructured.Unstructured diff --git a/pkg/client/control/vpa_test.go b/pkg/client/control/vpa_test.go index 5b39726c3..6e1585f24 100644 --- a/pkg/client/control/vpa_test.go +++ b/pkg/client/control/vpa_test.go @@ -28,6 +28,8 @@ import ( ) func TestPatchVPA(t *testing.T) { + t.Parallel() + oldvpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", @@ -74,6 +76,8 @@ func TestPatchVPA(t *testing.T) { } func TestPatchVPAStatus(t *testing.T) { + t.Parallel() + oldvpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", diff --git a/pkg/client/control/vparec_test.go b/pkg/client/control/vparec_test.go index 4193c9b37..e3c1094fe 100644 --- a/pkg/client/control/vparec_test.go +++ b/pkg/client/control/vparec_test.go @@ -28,6 +28,8 @@ import ( ) func TestPatchVPARec(t *testing.T) { + t.Parallel() + oldvparec1 := &apis.VerticalPodAutoscalerRecommendation{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", @@ -74,6 +76,8 @@ func TestPatchVPARec(t *testing.T) { } func TestPatchVPARecStatus(t *testing.T) { + t.Parallel() + oldvparec1 := &apis.VerticalPodAutoscalerRecommendation{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", diff --git a/pkg/client/genericclient_test.go b/pkg/client/genericclient_test.go index fc51a63be..c90b93c88 100644 --- a/pkg/client/genericclient_test.go +++ b/pkg/client/genericclient_test.go @@ -31,6 +31,8 @@ import ( ) func TestMetricsClient(t *testing.T) { + t.Parallel() + client := GenericClientSet{ CustomClient: &cmfake.FakeCustomMetricsClient{}, ExternalClient: &emfake.FakeExternalMetricsClient{}, diff --git a/pkg/config/metric/store.go b/pkg/config/metric/store.go index 6cc16e1ed..4121fdbc8 100644 --- a/pkg/config/metric/store.go +++ b/pkg/config/metric/store.go @@ -17,6 +17,8 @@ limitations under the License. package metric import ( + "time" + "k8s.io/apimachinery/pkg/labels" ) @@ -26,8 +28,12 @@ type StoreConfiguration struct { StoreServerSelector labels.Selector StoreName string + + GCPeriod time.Duration } func NewStoreConfiguration() *StoreConfiguration { - return &StoreConfiguration{} + return &StoreConfiguration{ + GCPeriod: time.Second * 10, + } } diff --git a/pkg/controller/kcc/cnc_test.go b/pkg/controller/kcc/cnc_test.go index b04814a7d..ebe28af07 100644 --- a/pkg/controller/kcc/cnc_test.go +++ b/pkg/controller/kcc/cnc_test.go @@ -36,6 +36,8 @@ import ( ) func TestCustomNodeConfigController_Run(t *testing.T) { + t.Parallel() + type args struct { cncAndKCCList []runtime.Object kccTargetList []runtime.Object diff --git a/pkg/controller/kcc/kcc_test.go b/pkg/controller/kcc/kcc_test.go index 5488dde49..64d246385 100644 --- a/pkg/controller/kcc/kcc_test.go +++ b/pkg/controller/kcc/kcc_test.go @@ -51,6 +51,8 @@ func generateTestDeletionTimestamp() *v1.Time { } func TestKatalystCustomConfigController_Run(t *testing.T) { + t.Parallel() + type args struct { kccList []runtime.Object kccTargetList []runtime.Object @@ -285,7 +287,7 @@ func TestKatalystCustomConfigController_Run(t *testing.T) { go kcc.Run() cache.WaitForCacheSync(kcc.ctx.Done(), kcc.syncedFunc...) - time.Sleep(1 * time.Second) + time.Sleep(100 * time.Millisecond) }) } } diff --git a/pkg/controller/kcc/kcct_test.go b/pkg/controller/kcc/kcct_test.go index f7bd64ab9..9d9f3590a 100644 --- a/pkg/controller/kcc/kcct_test.go +++ b/pkg/controller/kcc/kcct_test.go @@ -78,6 +78,8 @@ func generateTestNodeNamesTargetResource(name string, nodeNames []string) util.K } func TestKatalystCustomConfigTargetController_Run(t *testing.T) { + t.Parallel() + type args struct { kccList []runtime.Object kccTargetList []runtime.Object @@ -221,6 +223,8 @@ func TestKatalystCustomConfigTargetController_Run(t *testing.T) { } func Test_validateLabelSelectorWithOthers(t *testing.T) { + t.Parallel() + type args struct { labelSelector string targetResource util.KCCTargetResource @@ -292,6 +296,8 @@ func Test_validateLabelSelectorWithOthers(t *testing.T) { } func Test_validateTargetResourceNodeNamesWithOthers(t *testing.T) { + t.Parallel() + type args struct { targetResource util.KCCTargetResource otherResources []util.KCCTargetResource @@ -358,6 +364,8 @@ func Test_validateTargetResourceNodeNamesWithOthers(t *testing.T) { } func Test_validateTargetResourceGlobalWithOthers(t *testing.T) { + t.Parallel() + type args struct { targetResource util.KCCTargetResource otherResources []util.KCCTargetResource @@ -405,6 +413,8 @@ func Test_validateTargetResourceGlobalWithOthers(t *testing.T) { } func Test_updateTargetResourceStatus(t *testing.T) { + t.Parallel() + type args struct { targetResource util.KCCTargetResource isValid bool diff --git a/pkg/controller/kcc/util/kcct_test.go b/pkg/controller/kcc/util/kcct_test.go index 5e9c2838c..59b328c05 100644 --- a/pkg/controller/kcc/util/kcct_test.go +++ b/pkg/controller/kcc/util/kcct_test.go @@ -84,6 +84,8 @@ func generateTestTargetResourceWithTimeout(name, labelSelector string, nodeNames } func Test_findMatchedTargetConfig(t *testing.T) { + t.Parallel() + type args struct { cnc *apisv1alpha1.CustomNodeConfig configList []*unstructured.Unstructured @@ -204,6 +206,8 @@ func Test_findMatchedTargetConfig(t *testing.T) { } func TestUpdateKCCTGenericConditions(t *testing.T) { + t.Parallel() + type args struct { status *apisv1alpha1.GenericConfigStatus conditionType apisv1alpha1.ConfigConditionType diff --git a/pkg/controller/lifecycle/agent-healthz/healthz_controller_test.go b/pkg/controller/lifecycle/agent-healthz/healthz_controller_test.go index 181d4a30f..a273561d5 100644 --- a/pkg/controller/lifecycle/agent-healthz/healthz_controller_test.go +++ b/pkg/controller/lifecycle/agent-healthz/healthz_controller_test.go @@ -137,6 +137,8 @@ func NewFakeHealthzController(t *testing.T) (*HealthzController, error) { } func TestHealthzController(t *testing.T) { + t.Parallel() + ec, err := NewFakeHealthzController(t) if err != nil { klog.Errorf("get new fake cnr lifecycle err %v", err) diff --git a/pkg/controller/lifecycle/cnc_test.go b/pkg/controller/lifecycle/cnc_test.go index 7fc74cbe6..f7cbfdf11 100644 --- a/pkg/controller/lifecycle/cnc_test.go +++ b/pkg/controller/lifecycle/cnc_test.go @@ -35,6 +35,8 @@ import ( ) func TestCNCLifecycle_Run(t *testing.T) { + t.Parallel() + type fields struct { node *corev1.Node cnc *configapis.CustomNodeConfig @@ -140,7 +142,7 @@ func TestCNCLifecycle_Run(t *testing.T) { go cl.Run() cache.WaitForCacheSync(cl.ctx.Done(), cl.nodeListerSynced, cl.cncListerSynced) - time.Sleep(1 * time.Second) + time.Sleep(100 * time.Millisecond) gotCNC, err := cl.cncLister.Get(tt.fields.node.Name) assert.NoError(t, err) @@ -149,7 +151,7 @@ func TestCNCLifecycle_Run(t *testing.T) { // test recreate err = cl.client.InternalClient.ConfigV1alpha1().CustomNodeConfigs().Delete(context.Background(), tt.fields.node.Name, metav1.DeleteOptions{}) assert.NoError(t, err) - time.Sleep(1 * time.Second) + time.Sleep(100 * time.Millisecond) gotCNC, err = cl.cncLister.Get(tt.fields.node.Name) assert.NoError(t, err) diff --git a/pkg/controller/lifecycle/cnr_test.go b/pkg/controller/lifecycle/cnr_test.go index 946a96b63..3e872aa71 100644 --- a/pkg/controller/lifecycle/cnr_test.go +++ b/pkg/controller/lifecycle/cnr_test.go @@ -35,6 +35,8 @@ import ( ) func TestCNRLifecycle_Run(t *testing.T) { + t.Parallel() + type fields struct { node *corev1.Node cnr *nodeapis.CustomNodeResource @@ -140,7 +142,7 @@ func TestCNRLifecycle_Run(t *testing.T) { go cl.Run() cache.WaitForCacheSync(cl.ctx.Done(), cl.nodeListerSynced, cl.cnrListerSynced) - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) gotCNR, err := cl.cnrLister.Get(tt.fields.node.Name) assert.NoError(t, err) @@ -149,7 +151,7 @@ func TestCNRLifecycle_Run(t *testing.T) { // test recreate err = cl.client.InternalClient.NodeV1alpha1().CustomNodeResources().Delete(context.Background(), tt.fields.node.Name, metav1.DeleteOptions{}) assert.NoError(t, err) - time.Sleep(1 * time.Second) + time.Sleep(100 * time.Millisecond) gotCNR, err = cl.cnrLister.Get(tt.fields.node.Name) assert.NoError(t, err) diff --git a/pkg/controller/spd/spd_test.go b/pkg/controller/spd/spd_test.go index e0082871a..39ed2c6d0 100644 --- a/pkg/controller/spd/spd_test.go +++ b/pkg/controller/spd/spd_test.go @@ -47,6 +47,8 @@ var ( ) func TestSPDController_Run(t *testing.T) { + t.Parallel() + type fields struct { pod *v1.Pod workload *appsv1.StatefulSet @@ -228,7 +230,7 @@ func TestSPDController_Run(t *testing.T) { go spdController.Run() synced := cache.WaitForCacheSync(ctx.Done(), spdController.syncedFunc...) assert.True(t, synced) - time.Sleep(1 * time.Second) + time.Sleep(30 * time.Millisecond) targetSPD := tt.fields.spd if targetSPD == nil { @@ -250,6 +252,8 @@ func TestSPDController_Run(t *testing.T) { } func TestPodIndexerDuplicate(t *testing.T) { + t.Parallel() + spdConf := controller.NewSPDConfig() genericConfig := &generic.GenericConfiguration{} controllerConf := &controller.GenericControllerConfiguration{} @@ -271,6 +275,8 @@ func TestPodIndexerDuplicate(t *testing.T) { } func TestIndicatorUpdater(t *testing.T) { + t.Parallel() + var current float32 = 8.3 var value float32 = 23.1 @@ -578,7 +584,7 @@ func TestIndicatorUpdater(t *testing.T) { Current: &value, }, }) - time.Sleep(time.Second * 3) + time.Sleep(time.Millisecond * 30) newSPD, err := controlCtx.Client.InternalClient.WorkloadV1alpha1(). ServiceProfileDescriptors("default").Get(ctx, "spd1", metav1.GetOptions{}) assert.NoError(t, err) diff --git a/pkg/controller/vpa/algorithm/recommenders/avg_load_to_cpu_test.go b/pkg/controller/vpa/algorithm/recommenders/avg_load_to_cpu_test.go index 75517ffe8..9925f8ece 100644 --- a/pkg/controller/vpa/algorithm/recommenders/avg_load_to_cpu_test.go +++ b/pkg/controller/vpa/algorithm/recommenders/avg_load_to_cpu_test.go @@ -34,6 +34,8 @@ import ( ) func TestGetRecommendedPodResources(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string spd *workload.ServiceProfileDescriptor diff --git a/pkg/controller/vpa/recommend_test.go b/pkg/controller/vpa/recommend_test.go index 3c45c427c..eeaa86bd3 100644 --- a/pkg/controller/vpa/recommend_test.go +++ b/pkg/controller/vpa/recommend_test.go @@ -42,6 +42,8 @@ import ( ) func TestResourceRecommendController_Run(t *testing.T) { + t.Parallel() + type fields struct { workload *appsv1.StatefulSet spd *apiworkload.ServiceProfileDescriptor @@ -128,7 +130,7 @@ func TestResourceRecommendController_Run(t *testing.T) { go rrc.Run() synced := cache.WaitForCacheSync(ctx.Done(), rrc.syncedFunc...) assert.True(t, synced) - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) }) } } diff --git a/pkg/controller/vpa/util/api_test.go b/pkg/controller/vpa/util/api_test.go index 73e18af83..b1699a977 100644 --- a/pkg/controller/vpa/util/api_test.go +++ b/pkg/controller/vpa/util/api_test.go @@ -31,6 +31,8 @@ import ( ) func TestUpdateVPAConditions(t *testing.T) { + t.Parallel() + oldvpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", @@ -114,6 +116,8 @@ func TestUpdateVPAConditions(t *testing.T) { } func TestUpdateAPIVPAConditions(t *testing.T) { + t.Parallel() + oldvpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", @@ -187,6 +191,8 @@ func TestUpdateAPIVPAConditions(t *testing.T) { } func TestUpdateVPARecConditions(t *testing.T) { + t.Parallel() + oldvparec1 := &apis.VerticalPodAutoscalerRecommendation{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", diff --git a/pkg/controller/vpa/vpa_test.go b/pkg/controller/vpa/vpa_test.go index b2d882904..2565b5e9a 100644 --- a/pkg/controller/vpa/vpa_test.go +++ b/pkg/controller/vpa/vpa_test.go @@ -59,6 +59,8 @@ var makePod = func(name string, annotations, labels map[string]string, owners [] } func TestVPAControllerSyncVPA(t *testing.T) { + t.Parallel() + pod1 := makePod("pod1", map[string]string{}, map[string]string{"workload": "sts1"}, @@ -394,6 +396,8 @@ func TestVPAControllerSyncVPA(t *testing.T) { } func TestVPAControllerSyncPod(t *testing.T) { + t.Parallel() + pod1 := makePod("pod1", map[string]string{}, map[string]string{"workload": "sts1"}, @@ -542,7 +546,7 @@ func TestVPAControllerSyncPod(t *testing.T) { assert.NoError(t, err) vpaController.vpaSyncQueue.Add(key) - err = wait.PollImmediate(time.Second, time.Second*5, func() (bool, error) { + err = wait.PollImmediate(time.Millisecond*200, time.Second*5, func() (bool, error) { p, _ := controlCtx.Client.KubeClient.CoreV1().Pods(tc.newPod.Namespace).Get(context.TODO(), tc.newPod.Name, metav1.GetOptions{}) eq := reflect.DeepEqual(tc.newPod, p) return eq, nil @@ -557,6 +561,8 @@ func TestVPAControllerSyncPod(t *testing.T) { } func TestVPAControllerSyncWorkload(t *testing.T) { + t.Parallel() + pod1 := makePod("pod1", map[string]string{}, map[string]string{"workload": "sts1"}, @@ -697,7 +703,7 @@ func TestVPAControllerSyncWorkload(t *testing.T) { _, err = controlCtx.Client.KubeClient.CoreV1().Pods(tc.pod.Namespace).Create(context.TODO(), tc.pod, metav1.CreateOptions{}) assert.NoError(t, err) - err = wait.PollImmediate(time.Second, time.Second*5, func() (bool, error) { + err = wait.PollImmediate(time.Millisecond*20, time.Second*5, func() (bool, error) { p, _ := controlCtx.Client.KubeClient.CoreV1().Pods(tc.newPod.Namespace).Get(context.TODO(), tc.newPod.Name, metav1.GetOptions{}) eq := reflect.DeepEqual(tc.newPod, p) return eq, nil @@ -712,6 +718,8 @@ func TestVPAControllerSyncWorkload(t *testing.T) { } func TestPodIndexerDuplicate(t *testing.T) { + t.Parallel() + vpaConf := controller.NewVPAConfig() genericConf := &generic.GenericConfiguration{} controllerConf := &controller.GenericControllerConfiguration{} @@ -744,6 +752,8 @@ func TestPodIndexerDuplicate(t *testing.T) { } func TestSyncPerformance(t *testing.T) { + t.Parallel() + flagSet := flag.FlagSet{} klog.InitFlags(&flagSet) _ = flagSet.Parse([]string{ @@ -795,7 +805,7 @@ func TestSyncPerformance(t *testing.T) { }, }) - amount := 100000 + amount := 1 for i := 1; i <= amount; i++ { name := fmt.Sprintf("pod-%v", i) kubeObj = append(kubeObj, makePod(name, diff --git a/pkg/controller/vpa/vparec_test.go b/pkg/controller/vpa/vparec_test.go index 0f332ce52..270636587 100644 --- a/pkg/controller/vpa/vparec_test.go +++ b/pkg/controller/vpa/vparec_test.go @@ -42,6 +42,8 @@ import ( ) func TestVPARecControllerSyncVPA(t *testing.T) { + t.Parallel() + pod1 := makePod("pod1", map[string]string{apiconsts.WorkloadAnnotationVPAEnabledKey: apiconsts.WorkloadAnnotationVPAEnabled}, map[string]string{"workload": "sts1"}, @@ -353,6 +355,8 @@ func TestVPARecControllerSyncVPA(t *testing.T) { } func TestVPARecControllerSyncVPARec(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string vparecOld *apis.VerticalPodAutoscalerRecommendation diff --git a/pkg/custom-metric/collector/prometheus/scrape_test.go b/pkg/custom-metric/collector/prometheus/scrape_test.go index 1d7fc9d5f..9a4f839a7 100644 --- a/pkg/custom-metric/collector/prometheus/scrape_test.go +++ b/pkg/custom-metric/collector/prometheus/scrape_test.go @@ -31,6 +31,8 @@ import ( ) func Test_scrape(t *testing.T) { + t.Parallel() + ctx := context.Background() server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -57,7 +59,7 @@ without_timestamp{label_test="without_timestamp",namespace="n1",object="pod",obj s, _ := NewScrapeManager(ctx, time.Hour, client, "fake-node", server.URL, metrics.DummyMetrics{}) // to make sure the metric will only be collected once s.scrape() - time.Sleep(time.Second * 5) + time.Sleep(time.Millisecond * 300) handler := func(d []*data.MetricSeries, tags ...metrics.MetricTag) error { assert.NotNil(t, d) diff --git a/pkg/custom-metric/provider/provider_test.go b/pkg/custom-metric/provider/provider_test.go index 3e2c1cf51..71050df96 100644 --- a/pkg/custom-metric/provider/provider_test.go +++ b/pkg/custom-metric/provider/provider_test.go @@ -115,6 +115,8 @@ func generateStorePod(namespace, name, nameLabel string, port int32) *v1.Pod { } func TestWithLocalStore(t *testing.T) { + t.Parallel() + ctx := context.Background() p1 := generateStorePodMeta("ns-1", "pod-1", "full_metric_with_conflict_time", 11) @@ -144,9 +146,21 @@ func TestWithLocalStore(t *testing.T) { testProvider(t, p, s, ctx, baseCtx, genericConf, storeConf) } -func TestWithRemoteStore(t *testing.T) { +func TestWithRemoteStoreOne(t *testing.T) { + t.Parallel() + testWithRemoteStoreWithIndex(t, []int{1}) +} + +func TestWithRemoteStoreTwo(t *testing.T) { + t.Parallel() + testWithRemoteStoreWithIndex(t, []int{1, 2}) +} + +func TestWithRemoteStoreThree(t *testing.T) { + t.Parallel() + testWithRemoteStoreWithIndex(t, []int{1, 2, 3}) } @@ -161,6 +175,7 @@ func testWithRemoteStoreWithIndex(t *testing.T, index []int) { "test": "local-store", })), StoreServerReplicaTotal: len(index), + GCPeriod: time.Second, } lp1 := generateStorePodMeta("ns-1", "pod-1", "full_metric_with_conflict_time", 11) diff --git a/pkg/custom-metric/store/data/cache_test.go b/pkg/custom-metric/store/data/cache_test.go index 29c5af14d..3e403d917 100644 --- a/pkg/custom-metric/store/data/cache_test.go +++ b/pkg/custom-metric/store/data/cache_test.go @@ -26,6 +26,8 @@ import ( ) func Test_cache(t *testing.T) { + t.Parallel() + c := NewCachedMetric(metrics.DummyMetrics{}) var ( diff --git a/pkg/custom-metric/store/local/local_store.go b/pkg/custom-metric/store/local/local_store.go index b654eb72e..d21304fb6 100644 --- a/pkg/custom-metric/store/local/local_store.go +++ b/pkg/custom-metric/store/local/local_store.go @@ -98,7 +98,7 @@ func (l *LocalMemoryMetricStore) Start() error { klog.Info("started local memory store") l.syncSuccess = true - go wait.Until(l.gc, 10*time.Second, l.ctx.Done()) + go wait.Until(l.gc, l.storeConf.GCPeriod, l.ctx.Done()) go wait.Until(l.monitor, time.Minute*3, l.ctx.Done()) return nil } @@ -187,7 +187,7 @@ func (l *LocalMemoryMetricStore) ListMetricMeta(_ context.Context, withObject bo func (l *LocalMemoryMetricStore) gc() { begin := time.Now() defer func() { - klog.Infof("[LocalMemoryMetricStore] gc costs %s", time.Since(begin).String()) + klog.V(6).Infof("[LocalMemoryMetricStore] gc costs %s", time.Since(begin).String()) }() expiredTime := begin.Add(-1 * l.genericConf.OutOfDataPeriod) diff --git a/pkg/metaserver/agent/agent_test.go b/pkg/metaserver/agent/agent_test.go index 28487b72f..6fba6d320 100644 --- a/pkg/metaserver/agent/agent_test.go +++ b/pkg/metaserver/agent/agent_test.go @@ -78,9 +78,11 @@ func constructPodFetcher(names []string) pod.PodFetcher { } func TestFetcher(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) conf, _ := options.NewOptions().Config() - conf.CheckpointManagerDir = "/tmp/test" + conf.CheckpointManagerDir = "/tmp/TestFetcher" bCtx, _ := katalyst_base.GenerateFakeGenericContext(nil, nil, nil) agent, err := NewMetaAgent(conf, bCtx.Client, metrics.DummyMetrics{}) diff --git a/pkg/metaserver/agent/metric/fake_metric.go b/pkg/metaserver/agent/metric/fake_metric.go index 41548e588..c91b2556a 100644 --- a/pkg/metaserver/agent/metric/fake_metric.go +++ b/pkg/metaserver/agent/metric/fake_metric.go @@ -43,7 +43,7 @@ import ( // NewFakeMetricsFetcher returns a fake MetricsFetcher. func NewFakeMetricsFetcher(emitter metrics.MetricEmitter) MetricsFetcher { return &FakeMetricsFetcher{ - metricStore: metric.GetMetricStoreInstance(), + metricStore: metric.NewMetricStore(), emitter: emitter, } } diff --git a/pkg/metaserver/agent/metric/malachite/cgroup/cgroup_test.go b/pkg/metaserver/agent/metric/malachite/cgroup/cgroup_test.go index b02e39493..2b46ed7b4 100644 --- a/pkg/metaserver/agent/metric/malachite/cgroup/cgroup_test.go +++ b/pkg/metaserver/agent/metric/malachite/cgroup/cgroup_test.go @@ -96,6 +96,8 @@ func init() { } func TestGetCgroupStats(t *testing.T) { + t.Parallel() + cgroupData := map[string]*MalachiteCgroupResponse{ "v1-path": { Status: 0, diff --git a/pkg/metaserver/agent/metric/malachite/cgroup/cri.go b/pkg/metaserver/agent/metric/malachite/cgroup/cri.go index 7b243aeb4..f9273e79e 100644 --- a/pkg/metaserver/agent/metric/malachite/cgroup/cri.go +++ b/pkg/metaserver/agent/metric/malachite/cgroup/cri.go @@ -37,7 +37,7 @@ type containerInfo struct { name string } -//nolint +// nolint func getConnection(endPoint string) (*grpc.ClientConn, error) { if endPoint == "" { return nil, fmt.Errorf("endpoint is not set") diff --git a/pkg/metaserver/agent/metric/malachite/client/client.go b/pkg/metaserver/agent/metric/malachite/client/client.go index 0e7daea0c..62b5e337e 100644 --- a/pkg/metaserver/agent/metric/malachite/client/client.go +++ b/pkg/metaserver/agent/metric/malachite/client/client.go @@ -20,6 +20,7 @@ import ( "fmt" "io/ioutil" "net/http" + "sync" ) const ( @@ -46,6 +47,7 @@ const ( var DefaultClient = New() type Client struct { + sync.RWMutex urls map[string]string } @@ -66,10 +68,15 @@ func New() MalachiteClient { // SetURL is used to implement UT for func (c *Client) SetURL(urls map[string]string) { + c.Lock() + defer c.Unlock() c.urls = urls } func (c *Client) GetCgroupStats(cgroupPath string) ([]byte, error) { + c.RLock() + defer c.RUnlock() + url, ok := c.urls[CgroupResource] if !ok { return nil, fmt.Errorf("no url for %v", CgroupResource) @@ -99,6 +106,9 @@ func (c *Client) GetCgroupStats(cgroupPath string) ([]byte, error) { } func (c *Client) GetSystemStats(kind SystemResourceKind) ([]byte, error) { + c.RLock() + defer c.RUnlock() + resource := "" switch kind { case Compute: diff --git a/pkg/metaserver/agent/metric/malachite/system/system_test.go b/pkg/metaserver/agent/metric/malachite/system/system_test.go index 1eacf71b1..a9cbd91bf 100644 --- a/pkg/metaserver/agent/metric/malachite/system/system_test.go +++ b/pkg/metaserver/agent/metric/malachite/system/system_test.go @@ -83,6 +83,8 @@ func getSystemTestServer(data []byte) *httptest.Server { } func TestGetSystemComputeStats(t *testing.T) { + t.Parallel() + data, _ := json.Marshal(fakeSystemCompute) server := getSystemTestServer(data) defer server.Close() @@ -101,6 +103,8 @@ func TestGetSystemComputeStats(t *testing.T) { } func TestGetSystemMemoryStats(t *testing.T) { + t.Parallel() + data, _ := json.Marshal(fakeSystemMemory) server := getSystemTestServer(data) defer server.Close() @@ -119,6 +123,8 @@ func TestGetSystemMemoryStats(t *testing.T) { } func TestGetSystemIOStats(t *testing.T) { + t.Parallel() + data, _ := json.Marshal(fakeSystemIO) server := getSystemTestServer(data) defer server.Close() @@ -137,6 +143,8 @@ func TestGetSystemIOStats(t *testing.T) { } func TestGetSystemNetStats(t *testing.T) { + t.Parallel() + data, _ := json.Marshal(fakeSystemNet) server := getSystemTestServer(data) defer server.Close() @@ -155,6 +163,8 @@ func TestGetSystemNetStats(t *testing.T) { } func TestGetSystemNonExistStats(t *testing.T) { + t.Parallel() + server := getSystemTestServer([]byte{}) defer server.Close() diff --git a/pkg/metaserver/agent/metric/metric.go b/pkg/metaserver/agent/metric/metric.go index 18302fc98..fba3dd932 100644 --- a/pkg/metaserver/agent/metric/metric.go +++ b/pkg/metaserver/agent/metric/metric.go @@ -125,14 +125,10 @@ type MetricsFetcher interface { GetCgroupNumaMetric(cgroupPath, numaNode, metricName string) (metric.MetricData, error) } -var ( - malachiteMetricsFetcherInitOnce sync.Once -) - // NewMalachiteMetricsFetcher returns the default implementation of MetricsFetcher. func NewMalachiteMetricsFetcher(emitter metrics.MetricEmitter, conf *config.Configuration) MetricsFetcher { return &MalachiteMetricsFetcher{ - metricStore: metric.GetMetricStoreInstance(), + metricStore: metric.NewMetricStore(), emitter: emitter, conf: conf, registeredNotifier: map[MetricsScope]map[string]NotifiedData{ @@ -153,11 +149,12 @@ type MalachiteMetricsFetcher struct { registeredNotifier map[MetricsScope]map[string]NotifiedData sync.RWMutex - emitter metrics.MetricEmitter + startOnce sync.Once + emitter metrics.MetricEmitter } func (m *MalachiteMetricsFetcher) Run(ctx context.Context) { - malachiteMetricsFetcherInitOnce.Do(func() { + m.startOnce.Do(func() { go wait.Until(func() { m.sample() }, time.Second*5, ctx.Done()) }) } diff --git a/pkg/metaserver/agent/metric/metric_test.go b/pkg/metaserver/agent/metric/metric_test.go index 4bf2549bd..cf98e67d6 100644 --- a/pkg/metaserver/agent/metric/metric_test.go +++ b/pkg/metaserver/agent/metric/metric_test.go @@ -32,6 +32,8 @@ import ( ) func Test_noneExistMetricsFetcher(t *testing.T) { + t.Parallel() + var err error implement := NewMalachiteMetricsFetcher(metrics.DummyMetrics{}, nil) @@ -133,6 +135,8 @@ func Test_noneExistMetricsFetcher(t *testing.T) { } func Test_notifySystem(t *testing.T) { + t.Parallel() + now := time.Now() f := NewMalachiteMetricsFetcher(metrics.DummyMetrics{}, nil) @@ -195,10 +199,12 @@ func Test_notifySystem(t *testing.T) { } }() - time.Sleep(time.Second * 10) + time.Sleep(time.Millisecond * 3) } func TestStore_Aggregate(t *testing.T) { + t.Parallel() + now := time.Now() f := NewMalachiteMetricsFetcher(metrics.DummyMetrics{}, nil).(*MalachiteMetricsFetcher) diff --git a/pkg/metaserver/config/checkpoint.go b/pkg/metaserver/config/checkpoint.go index 25fb13011..94a323b55 100644 --- a/pkg/metaserver/config/checkpoint.go +++ b/pkg/metaserver/config/checkpoint.go @@ -19,6 +19,7 @@ package config import ( "encoding/json" "reflect" + "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" @@ -41,6 +42,11 @@ type TargetConfigData struct { // Data holds checkpoint data and its checksum type Data struct { + sync.RWMutex + Item *DataItem +} + +type DataItem struct { // Data maps from kind to target config data Data map[string]TargetConfigData Checksum checksum.Checksum @@ -49,25 +55,39 @@ type Data struct { // NewCheckpoint returns an instance of Checkpoint func NewCheckpoint(configResponses map[string]TargetConfigData) ConfigManagerCheckpoint { return &Data{ - Data: configResponses, + Item: &DataItem{ + Data: configResponses, + }, } } func (d *Data) MarshalCheckpoint() ([]byte, error) { - d.Checksum = checksum.New(d.Data) - return json.Marshal(*d) + d.RLock() + defer d.RUnlock() + + d.Item.Checksum = checksum.New(d.Item.Data) + return json.Marshal(*(d.Item)) } func (d *Data) UnmarshalCheckpoint(blob []byte) error { - return json.Unmarshal(blob, d) + d.RLock() + defer d.RUnlock() + + return json.Unmarshal(blob, d.Item) } func (d *Data) VerifyChecksum() error { - return d.Checksum.Verify(d.Data) + d.RLock() + defer d.RUnlock() + + return d.Item.Checksum.Verify(d.Item.Data) } func (d *Data) GetData(kind string) (reflect.Value, metav1.Time) { - if data, ok := d.Data[kind]; ok { + d.RLock() + defer d.RUnlock() + + if data, ok := d.Item.Data[kind]; ok { configField := reflect.ValueOf(data.Value).Elem().FieldByName(kind) return configField, data.Timestamp } @@ -76,8 +96,11 @@ func (d *Data) GetData(kind string) (reflect.Value, metav1.Time) { } func (d *Data) SetData(kind string, val reflect.Value, t metav1.Time) { - if d.Data == nil { - d.Data = make(map[string]TargetConfigData) + d.Lock() + defer d.Unlock() + + if d.Item.Data == nil { + d.Item.Data = make(map[string]TargetConfigData) } // get target dynamic configField by kind @@ -90,7 +113,7 @@ func (d *Data) SetData(kind string, val reflect.Value, t metav1.Time) { specField := configField.Elem().FieldByName("Spec") specField.Set(specValue) - d.Data[kind] = TargetConfigData{ + d.Item.Data[kind] = TargetConfigData{ Value: dynamicConfiguration, Timestamp: metav1.Unix(t.Unix(), 0), } diff --git a/pkg/metaserver/config/checkpoint_test.go b/pkg/metaserver/config/checkpoint_test.go index 7ea11baa0..e17340222 100644 --- a/pkg/metaserver/config/checkpoint_test.go +++ b/pkg/metaserver/config/checkpoint_test.go @@ -29,6 +29,8 @@ import ( ) func TestNewCheckpoint(t *testing.T) { + t.Parallel() + now := metav1.Now() kind := crd.ResourceKindAdminQoSConfiguration dynamicCRD := &crd.DynamicConfigCRD{ diff --git a/pkg/metaserver/config/config_test.go b/pkg/metaserver/config/config_test.go index ce0694335..8d7596353 100644 --- a/pkg/metaserver/config/config_test.go +++ b/pkg/metaserver/config/config_test.go @@ -94,6 +94,8 @@ func constructKatalystCustomConfigLoader() ConfigurationLoader { } func Test_katalystCustomConfigLoader_LoadConfig(t *testing.T) { + t.Parallel() + type args struct { ctx context.Context gvr metav1.GroupVersionResource diff --git a/pkg/metaserver/config/manager_test.go b/pkg/metaserver/config/manager_test.go index 31306e0ae..83140ba74 100644 --- a/pkg/metaserver/config/manager_test.go +++ b/pkg/metaserver/config/manager_test.go @@ -156,6 +156,8 @@ func constructTestDynamicConfigManager(t *testing.T, nodeName string, evictionCo } func TestNewDynamicConfigManager(t *testing.T) { + t.Parallel() + nodeName := "test-node" evictionConfiguration := generateTestEvictionConfiguration(map[v1.ResourceName]float64{ v1.ResourceCPU: 1.2, @@ -182,6 +184,8 @@ func TestNewDynamicConfigManager(t *testing.T) { } func TestDynamicConfigManager_getConfig(t *testing.T) { + t.Parallel() + type fields struct { manager *DynamicConfigManager } @@ -239,6 +243,8 @@ func TestDynamicConfigManager_getConfig(t *testing.T) { } func Test_applyDynamicConfig(t *testing.T) { + t.Parallel() + type args struct { currentConfig *dynamic.Configuration dynamicConf *crd.DynamicConfigCRD @@ -355,6 +361,8 @@ func Test_applyDynamicConfig(t *testing.T) { } func Test_getGVRToKindMap(t *testing.T) { + t.Parallel() + tests := []struct { name string wantGVR schema.GroupVersionResource @@ -390,6 +398,8 @@ func checkGVRToGVKMap(gvr schema.GroupVersionResource, wantGVK schema.GroupVersi } func Test_updateDynamicConf(t *testing.T) { + t.Parallel() + type args struct { resourceGVRMap map[string]metav1.GroupVersionResource gvrToKind map[schema.GroupVersionResource]schema.GroupVersionKind diff --git a/pkg/metaserver/external/cgroupid/manager_linux_test.go b/pkg/metaserver/external/cgroupid/manager_linux_test.go index c06c19284..6ac17ac1e 100644 --- a/pkg/metaserver/external/cgroupid/manager_linux_test.go +++ b/pkg/metaserver/external/cgroupid/manager_linux_test.go @@ -43,6 +43,8 @@ var ( ) func TestGetCgroupIDForContainer(t *testing.T) { + t.Parallel() + cgroupIDManager := NewCgroupIDManager(podFetcher).(*cgroupIDManagerImpl) assert.NotNil(t, cgroupIDManager) @@ -79,6 +81,8 @@ func TestGetCgroupIDForContainer(t *testing.T) { } func TestListCgroupIDsForPod(t *testing.T) { + t.Parallel() + cgroupIDManager := NewCgroupIDManager(podFetcher).(*cgroupIDManagerImpl) assert.NotNil(t, cgroupIDManager) @@ -115,6 +119,8 @@ func TestListCgroupIDsForPod(t *testing.T) { } func TestGetAbsentContainers(t *testing.T) { + t.Parallel() + cgroupIDManager := NewCgroupIDManager(podFetcher).(*cgroupIDManagerImpl) assert.NotNil(t, cgroupIDManager) @@ -151,6 +157,8 @@ func TestGetAbsentContainers(t *testing.T) { } func TestClearResidualPodsInCache(t *testing.T) { + t.Parallel() + cgroupIDManager := NewCgroupIDManager(podFetcher).(*cgroupIDManagerImpl) assert.NotNil(t, cgroupIDManager) @@ -187,7 +195,10 @@ func TestClearResidualPodsInCache(t *testing.T) { assert.Equal(t, len(tt.want), len(cgroupIDManager.podCgroupIDCache)) for wantPodUID, wantContainerMap := range tt.want { + cgroupIDManager.Lock() gotContainerMap, ok := cgroupIDManager.podCgroupIDCache[wantPodUID] + cgroupIDManager.Unlock() + assert.True(t, ok) assert.Equal(t, len(wantContainerMap), len(gotContainerMap)) for wantContainerID, wantCgID := range wantContainerMap { diff --git a/pkg/metaserver/external/manager_linux_test.go b/pkg/metaserver/external/manager_linux_test.go index a4fb815b0..91cbec7ea 100644 --- a/pkg/metaserver/external/manager_linux_test.go +++ b/pkg/metaserver/external/manager_linux_test.go @@ -27,23 +27,24 @@ import ( "github.com/stretchr/testify/assert" "github.com/kubewharf/katalyst-core/pkg/metaserver/agent/pod" + "github.com/kubewharf/katalyst-core/pkg/metaserver/external/cgroupid" + "github.com/kubewharf/katalyst-core/pkg/util/external/network" + "github.com/kubewharf/katalyst-core/pkg/util/external/rdt" ) var ( podFetcher = &pod.PodFetcherStub{} ) -func TestInitExternalManager(t *testing.T) { - externalManager := InitExternalManager(podFetcher) - assert.NotNil(t, externalManager) - - return -} - func TestSetNetworkManager(t *testing.T) { - externalManager := InitExternalManager(podFetcher).(*externalManagerImpl) - assert.NotNil(t, externalManager) - + t.Parallel() + + externalManager := &externalManagerImpl{ + start: false, + CgroupIDManager: cgroupid.NewCgroupIDManager(podFetcher), + NetworkManager: network.NewNetworkManager(), + RDTManager: rdt.NewDefaultManager(), + } externalManager.start = false externalManager.SetNetworkManager(nil) @@ -51,8 +52,14 @@ func TestSetNetworkManager(t *testing.T) { } func TestRun(t *testing.T) { - externalManager := InitExternalManager(podFetcher) - assert.NotNil(t, externalManager) + t.Parallel() + + externalManager := &externalManagerImpl{ + start: false, + CgroupIDManager: cgroupid.NewCgroupIDManager(podFetcher), + NetworkManager: network.NewNetworkManager(), + RDTManager: rdt.NewDefaultManager(), + } ctx, cancel := context.WithTimeout(context.TODO(), time.Second) defer cancel() diff --git a/pkg/metaserver/metaserver_test.go b/pkg/metaserver/metaserver_test.go index 537bfcf1e..e03dc7cf0 100644 --- a/pkg/metaserver/metaserver_test.go +++ b/pkg/metaserver/metaserver_test.go @@ -64,6 +64,8 @@ func generateTestMetaServer(clientSet *client.GenericClientSet, conf *config.Con } func TestMetaServer_Run(t *testing.T) { + t.Parallel() + genericClient := &client.GenericClientSet{ KubeClient: fake.NewSimpleClientset(), InternalClient: internalfake.NewSimpleClientset(), @@ -78,7 +80,7 @@ func TestMetaServer_Run(t *testing.T) { go meta.Run(context.Background()) - time.Sleep(1 * time.Second) + time.Sleep(3 * time.Millisecond) meta.Lock() assert.True(t, meta.start) @@ -86,6 +88,8 @@ func TestMetaServer_Run(t *testing.T) { } func TestMetaServer_SetServiceProfilingManager(t *testing.T) { + t.Parallel() + genericClient := &client.GenericClientSet{ KubeClient: fake.NewSimpleClientset(), InternalClient: internalfake.NewSimpleClientset(), @@ -99,7 +103,7 @@ func TestMetaServer_SetServiceProfilingManager(t *testing.T) { go meta.Run(context.Background()) - time.Sleep(1 * time.Second) + time.Sleep(3 * time.Millisecond) err = meta.SetServiceProfilingManager(&spd.DummyServiceProfilingManager{}) assert.Error(t, err) diff --git a/pkg/metaserver/spd/checkpoint/checkpoint_test.go b/pkg/metaserver/spd/checkpoint/checkpoint_test.go index 35db52590..01b296701 100644 --- a/pkg/metaserver/spd/checkpoint/checkpoint_test.go +++ b/pkg/metaserver/spd/checkpoint/checkpoint_test.go @@ -30,6 +30,8 @@ import ( // TestWriteLoadDeleteSPDs validates all combinations of write, load, and delete func TestWriteLoadDeleteSPDs(t *testing.T) { + t.Parallel() + testSPDs := []*v1alpha1.ServiceProfileDescriptor{ { ObjectMeta: metav1.ObjectMeta{ @@ -61,7 +63,7 @@ func TestWriteLoadDeleteSPDs(t *testing.T) { }, } - dir, err := ioutil.TempDir("", "checkpoint") + dir, err := ioutil.TempDir("", "checkpoint-TestWriteLoadDeleteSPDs") if err != nil { t.Errorf("failed to allocate temp directory for TestWriteLoadDeleteSPDs error=%v", err) } diff --git a/pkg/metaserver/spd/fetcher_test.go b/pkg/metaserver/spd/fetcher_test.go index 3c2102f58..d0dac5471 100644 --- a/pkg/metaserver/spd/fetcher_test.go +++ b/pkg/metaserver/spd/fetcher_test.go @@ -40,6 +40,8 @@ import ( ) func generateTestConfiguration(t *testing.T, nodeName string, checkpoint string) *pkgconfig.Configuration { + t.Parallel() + testConfiguration, err := options.NewOptions().Config() require.NoError(t, err) require.NotNil(t, testConfiguration) @@ -50,6 +52,8 @@ func generateTestConfiguration(t *testing.T, nodeName string, checkpoint string) } func Test_spdManager_GetSPD(t *testing.T) { + t.Parallel() + type fields struct { nodeName string spd *workloadapis.ServiceProfileDescriptor @@ -180,7 +184,7 @@ func Test_spdManager_GetSPD(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "checkpoint") + dir, err := ioutil.TempDir("", "checkpoint-Test_spdManager_GetSPD") require.NoError(t, err) defer os.RemoveAll(dir) @@ -198,7 +202,7 @@ func Test_spdManager_GetSPD(t *testing.T) { ctx := context.TODO() go s.Run(ctx) - time.Sleep(1 * time.Second) + time.Sleep(1 * time.Millisecond) got, err := s.GetSPD(ctx, tt.args.pod) if (err != nil) != tt.wantErr { diff --git a/pkg/metaserver/spd/manager_test.go b/pkg/metaserver/spd/manager_test.go index 682d2b95b..aac3278eb 100644 --- a/pkg/metaserver/spd/manager_test.go +++ b/pkg/metaserver/spd/manager_test.go @@ -39,6 +39,8 @@ import ( ) func Test_serviceProfilingManager_ServiceBusinessPerformanceLevel(t *testing.T) { + t.Parallel() + type fields struct { nodeName string spd *workloadapis.ServiceProfileDescriptor @@ -247,7 +249,7 @@ func Test_serviceProfilingManager_ServiceBusinessPerformanceLevel(t *testing.T) } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "checkpoint") + dir, err := ioutil.TempDir("", "checkpoint-Test_serviceProfilingManager_ServiceBusinessPerformanceLevel") require.NoError(t, err) defer os.RemoveAll(dir) @@ -281,6 +283,8 @@ func Test_serviceProfilingManager_ServiceBusinessPerformanceLevel(t *testing.T) } func Test_serviceProfilingManager_ServiceSystemPerformanceTarget(t *testing.T) { + t.Parallel() + type fields struct { nodeName string spd *workloadapis.ServiceProfileDescriptor @@ -379,7 +383,7 @@ func Test_serviceProfilingManager_ServiceSystemPerformanceTarget(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "checkpoint") + dir, err := ioutil.TempDir("", "checkpoint-Test_serviceProfilingManager_ServiceSystemPerformanceTarget") require.NoError(t, err) defer os.RemoveAll(dir) diff --git a/pkg/metrics/metrics-pool/custom_metrics_pool_test.go b/pkg/metrics/metrics-pool/custom_metrics_pool_test.go index 3dd6a3dc3..91b2f0bd5 100644 --- a/pkg/metrics/metrics-pool/custom_metrics_pool_test.go +++ b/pkg/metrics/metrics-pool/custom_metrics_pool_test.go @@ -26,6 +26,8 @@ import ( ) func TestNewCustomMetricsEmitterPool(t *testing.T) { + t.Parallel() + m, err := NewOpenTelemetryPrometheusMetricsEmitterPool(generic.NewMetricsConfiguration(), http.NewServeMux()) assert.NoError(t, err) diff --git a/pkg/metrics/metrics-pool/otel_prom_metrics_mux_test.go b/pkg/metrics/metrics-pool/otel_prom_metrics_mux_test.go index 1d650be32..8a637e7bb 100644 --- a/pkg/metrics/metrics-pool/otel_prom_metrics_mux_test.go +++ b/pkg/metrics/metrics-pool/otel_prom_metrics_mux_test.go @@ -26,6 +26,8 @@ import ( ) func TestNewMetricEmitterMux(t *testing.T) { + t.Parallel() + m, err := NewOpenTelemetryPrometheusMetricsEmitterPool(generic.NewMetricsConfiguration(), http.NewServeMux()) assert.NoError(t, err) diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go index 987a22217..b92f5f0e9 100644 --- a/pkg/metrics/metrics_test.go +++ b/pkg/metrics/metrics_test.go @@ -34,6 +34,8 @@ func newMetricsEmitter() (MetricEmitter, error) { } func TestMetrics(t *testing.T) { + t.Parallel() + emitt, err := newMetricsEmitter() if err != nil { t.Errorf("new open telemetry Prometheus Metrics Emitter error:%v", err) @@ -63,15 +65,17 @@ func TestMetrics(t *testing.T) { } func TestClock(t *testing.T) { + t.Parallel() + last := time.Now() c := prometheusClock{last: last} - _ = c.Ticker(time.Second) + _ = c.Ticker(time.Millisecond * 3) select { case <-c.t.C(): c.Now() } - assert.Equal(t, true, c.Now().Sub(last) > time.Second) + assert.Equal(t, true, c.Now().Sub(last) > time.Millisecond*3) c.Stop() } diff --git a/pkg/scheduler/eventhandlers/cache_test.go b/pkg/scheduler/eventhandlers/cache_test.go index 4272e3e7f..56639e994 100644 --- a/pkg/scheduler/eventhandlers/cache_test.go +++ b/pkg/scheduler/eventhandlers/cache_test.go @@ -66,6 +66,8 @@ var makeCachedCNR = func(name string, res v1.ResourceList) *apis.CustomNodeResou } func Test_CalculateQoSResource(t *testing.T) { + t.Parallel() + cache := schedulercache.GetCache() _, err := cache.GetNodeInfo("c1") diff --git a/pkg/scheduler/plugins/qosawarenoderesources/fit.go b/pkg/scheduler/plugins/qosawarenoderesources/fit.go index 9f27190e7..83c0e6bf5 100644 --- a/pkg/scheduler/plugins/qosawarenoderesources/fit.go +++ b/pkg/scheduler/plugins/qosawarenoderesources/fit.go @@ -270,6 +270,7 @@ func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod } return framework.NewStatus(framework.Unschedulable, failureReasons...) } + return nil } diff --git a/pkg/util/asyncworker/async_workers_test.go b/pkg/util/asyncworker/async_workers_test.go index 416dfaa0a..973fa0f2b 100644 --- a/pkg/util/asyncworker/async_workers_test.go +++ b/pkg/util/asyncworker/async_workers_test.go @@ -26,20 +26,22 @@ import ( ) func TestAsyncWorkers(t *testing.T) { + t.Parallel() + rt := require.New(t) asw := NewAsyncWorkers("test") result, a, b, c, d, e, f := 0, 1, 2, 3, 4, 5, 6 - timeoutSeconds := 10 * time.Second + timeoutSeconds := 100 * time.Millisecond fn := func(ctx context.Context, params ...interface{}) error { if len(params) != 2 { return fmt.Errorf("invalid params") } - time.Sleep(5 * time.Second) + time.Sleep(5 * time.Millisecond) p1Int := params[0].(int) p2Int := params[1].(int) result = p1Int + p2Int @@ -63,7 +65,7 @@ func TestAsyncWorkers(t *testing.T) { asw.workLock.Lock() for asw.workStatuses[work1Name].working { asw.workLock.Unlock() - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) if time.Now().Sub(work1DeliveredAt) > timeoutSeconds { rt.Failf("%s timeout", work1Name) @@ -106,7 +108,7 @@ func TestAsyncWorkers(t *testing.T) { asw.workLock.Lock() for asw.workStatuses[work2Name].working { asw.workLock.Unlock() - time.Sleep(1 * time.Second) + time.Sleep(10 * time.Millisecond) if time.Now().Sub(work1DeliveredAt) > 3*timeoutSeconds { rt.Failf("%s timeout", work2Name) diff --git a/pkg/util/cgroup/common/path.go b/pkg/util/cgroup/common/path.go index a09068bb9..0c4b264e5 100644 --- a/pkg/util/cgroup/common/path.go +++ b/pkg/util/cgroup/common/path.go @@ -30,6 +30,7 @@ import ( // k8sCgroupPathList is used to record cgroup-path related configurations, // and it will be set as SystemdRootPath (along with kubernetes levels) as default. +var k8sCgroupPathLock sync.RWMutex var k8sCgroupPathList = sets.NewString( CgroupFsRootPath, CgroupFsRootPathBestEffort, @@ -44,6 +45,8 @@ var k8sCgroupPathSettingOnce = sync.Once{} func InitKubernetesCGroupPath(cgroupType CgroupType, additionalK8SCGroupPath []string) { k8sCgroupPathSettingOnce.Do(func() { if cgroupType == CgroupTypeSystemd { + k8sCgroupPathLock.Lock() + defer k8sCgroupPathLock.Unlock() k8sCgroupPathList = sets.NewString( SystemdRootPath, SystemdRootPathBestEffort, @@ -72,6 +75,9 @@ func GetAbsCgroupPath(subsys, suffix string) string { // GetKubernetesCgroupRootPathWithSubSys returns all Cgroup paths to run container for // kubernetes, and the returned values are merged with subsys. func GetKubernetesCgroupRootPathWithSubSys(subsys string) []string { + k8sCgroupPathLock.RLock() + defer k8sCgroupPathLock.RUnlock() + var subsysCgroupPathList []string for _, p := range k8sCgroupPathList.List() { subsysCgroupPathList = append(subsysCgroupPathList, diff --git a/pkg/util/cgroup/common/path_test.go b/pkg/util/cgroup/common/path_test.go index 6d28a5cc2..f7ed7b4b9 100644 --- a/pkg/util/cgroup/common/path_test.go +++ b/pkg/util/cgroup/common/path_test.go @@ -26,6 +26,8 @@ import ( ) func TestAbsCgroupPathWithSuffix(t *testing.T) { + t.Parallel() + as := require.New(t) path := GetAbsCgroupPath("cpuset", "abc") @@ -37,24 +39,32 @@ func TestAbsCgroupPathWithSuffix(t *testing.T) { } func TestGetAbsCgroupPath(t *testing.T) { + t.Parallel() + as := require.New(t) _, err := GetKubernetesAnyExistAbsCgroupPath("cpuset", "") as.NotNil(err) } func TestGetPodAbsCgroupPath(t *testing.T) { + t.Parallel() + as := require.New(t) _, err := GetPodAbsCgroupPath("cpuset", "") as.NotNil(err) } func TestGetContainerAbsCgroupPath(t *testing.T) { + t.Parallel() + as := require.New(t) _, err := GetContainerAbsCgroupPath("cpuset", "", "") as.NotNil(err) } func TestIsContainerCgroupExist(t *testing.T) { + t.Parallel() + as := require.New(t) _, err := IsContainerCgroupExist("fake-pod-uid", "fake-container-id") as.NotNil(err) diff --git a/pkg/util/cgroup/manager/cgroup_test.go b/pkg/util/cgroup/manager/cgroup_test.go index 40cc79ccc..233f154e1 100644 --- a/pkg/util/cgroup/manager/cgroup_test.go +++ b/pkg/util/cgroup/manager/cgroup_test.go @@ -31,18 +31,24 @@ import ( ) func TestManager(t *testing.T) { + t.Parallel() + _ = GetManager() } func TestV1Manager(t *testing.T) { - manager = v1.NewManager() + t.Parallel() + + _ = v1.NewManager() testManager(t, "v1") testNetCls(t, "v1") } func TestV2Manager(t *testing.T) { - manager = v2.NewManager() + t.Parallel() + + _ = v2.NewManager() testManager(t, "v2") } diff --git a/pkg/util/cnr_test.go b/pkg/util/cnr_test.go index 2281faecc..1bd8b0dbd 100644 --- a/pkg/util/cnr_test.go +++ b/pkg/util/cnr_test.go @@ -28,6 +28,8 @@ import ( ) func TestAddOrUpdateCNRTaint(t *testing.T) { + t.Parallel() + type args struct { cnr *nodeapis.CustomNodeResource taint *nodeapis.Taint @@ -151,6 +153,8 @@ func TestAddOrUpdateCNRTaint(t *testing.T) { } func TestCNRTaintExists(t *testing.T) { + t.Parallel() + type args struct { taints []*nodeapis.Taint taintToFind *nodeapis.Taint @@ -205,6 +209,8 @@ func TestCNRTaintExists(t *testing.T) { } func TestMergeAllocations(t *testing.T) { + t.Parallel() + type args struct { dst []*nodeapis.Allocation src []*nodeapis.Allocation @@ -288,6 +294,8 @@ func TestMergeAllocations(t *testing.T) { } func TestMergeAttributes(t *testing.T) { + t.Parallel() + type args struct { dst []nodeapis.Attribute src []nodeapis.Attribute @@ -356,6 +364,8 @@ func TestMergeAttributes(t *testing.T) { } func TestMergeResources(t *testing.T) { + t.Parallel() + type args struct { dst nodeapis.Resources src nodeapis.Resources @@ -401,6 +411,8 @@ func TestMergeResources(t *testing.T) { } func TestMergeTopologyZone(t *testing.T) { + t.Parallel() + type args struct { dst []*nodeapis.TopologyZone src []*nodeapis.TopologyZone diff --git a/pkg/util/external/network/manager_linux_test.go b/pkg/util/external/network/manager_linux_test.go index af054ed78..2146ba6b0 100644 --- a/pkg/util/external/network/manager_linux_test.go +++ b/pkg/util/external/network/manager_linux_test.go @@ -35,6 +35,8 @@ var ( ) func TestNewDefaultManager(t *testing.T) { + t.Parallel() + defaultManager := NewNetworkManager() assert.NotNil(t, defaultManager) @@ -42,6 +44,8 @@ func TestNewDefaultManager(t *testing.T) { } func TestApplyNetClass(t *testing.T) { + t.Parallel() + defaultManager := NewNetworkManager() assert.NotNil(t, defaultManager) @@ -55,6 +59,8 @@ func TestApplyNetClass(t *testing.T) { } func TestClearNetClass(t *testing.T) { + t.Parallel() + defaultManager := NewNetworkManager() assert.NotNil(t, defaultManager) diff --git a/pkg/util/external/rdt/manager_linux_test.go b/pkg/util/external/rdt/manager_linux_test.go index 0f5a1cb6d..0ea3faec5 100644 --- a/pkg/util/external/rdt/manager_linux_test.go +++ b/pkg/util/external/rdt/manager_linux_test.go @@ -34,11 +34,15 @@ var ( ) func TestNewDefaultManager(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) } func TestCheckSupportRDT(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) @@ -48,6 +52,8 @@ func TestCheckSupportRDT(t *testing.T) { } func TestInitRDT(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) @@ -56,6 +62,8 @@ func TestInitRDT(t *testing.T) { } func TestApplyTasks(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) @@ -64,6 +72,8 @@ func TestApplyTasks(t *testing.T) { } func TestApplyCAT(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) @@ -81,6 +91,8 @@ func TestApplyCAT(t *testing.T) { } func TestApplyMBA(t *testing.T) { + t.Parallel() + defaultManager := NewDefaultManager() assert.NotNil(t, defaultManager) diff --git a/pkg/util/general/common_test.go b/pkg/util/general/common_test.go index c5f8bcca2..98808c85f 100644 --- a/pkg/util/general/common_test.go +++ b/pkg/util/general/common_test.go @@ -24,32 +24,44 @@ import ( ) func TestMax(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(2, Max(1, 2)) } func TestMaxUInt64(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(uint64(2), MaxUInt64(1, 2)) } func TestMinUInt64(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(uint64(2), MaxUInt64(1, 2)) } func TestMaxInt64(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(int64(2), MaxInt64(1, 2)) } func TestGetValueWithDefault(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal("5", GetValueWithDefault(map[string]string{"2": "2"}, "1", "5")) as.Equal("2", GetValueWithDefault(map[string]string{"1": "2"}, "1", "5")) } func TestIsNameEnabled(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(true, IsNameEnabled("test", sets.NewString(), []string{"test"})) as.Equal(true, IsNameEnabled("test", sets.NewString(), []string{"*"})) @@ -57,12 +69,16 @@ func TestIsNameEnabled(t *testing.T) { } func TestParseUint64PointerToString(t *testing.T) { + t.Parallel() + as := require.New(t) var a uint64 = 5 as.Equal(ParseUint64PointerToString(&a), "5") } func TestParseStringToUint64Pointer(t *testing.T) { + t.Parallel() + as := require.New(t) p, err := ParseStringToUint64Pointer("5") as.Nil(err) @@ -70,6 +86,8 @@ func TestParseStringToUint64Pointer(t *testing.T) { } func TestGetInt64PointerFromUint64Pointer(t *testing.T) { + t.Parallel() + as := require.New(t) var a uint64 = 5 p, err := GetInt64PointerFromUint64Pointer(&a) @@ -78,35 +96,47 @@ func TestGetInt64PointerFromUint64Pointer(t *testing.T) { } func TestGetStringValueFromMap(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal("a", GetStringValueFromMap(map[string]string{"labelA": "a"}, "labelA")) as.Equal("", GetStringValueFromMap(map[string]string{"labelB": "a"}, "labelA")) } func TestGenerateHash(t *testing.T) { + t.Parallel() + as := require.New(t) as.Greater(len(GenerateHash([]byte{60, 60}, 5)), 0) } func TestCheckMapEqual(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(true, CheckMapEqual(map[string]string{"labelA": "a"}, map[string]string{"labelA": "a"})) as.Equal(false, CheckMapEqual(map[string]string{"labelB": "a"}, map[string]string{"labelA": "a"})) } func TestUIntPointerToFloat64(t *testing.T) { + t.Parallel() + as := require.New(t) var a uint = 5 as.Equal(5.0, UIntPointerToFloat64(&a)) } func TestUInt64PointerToFloat64(t *testing.T) { + t.Parallel() + as := require.New(t) var a uint64 = 5 as.Equal(5.0, UInt64PointerToFloat64(&a)) } func TestJsonPathEmpty(t *testing.T) { + t.Parallel() + as := require.New(t) as.Equal(true, JsonPathEmpty([]byte("{}"))) as.Equal(true, JsonPathEmpty([]byte(""))) diff --git a/pkg/util/general/file_test.go b/pkg/util/general/file_test.go index 2acc30262..18efde860 100644 --- a/pkg/util/general/file_test.go +++ b/pkg/util/general/file_test.go @@ -26,8 +26,10 @@ import ( ) func TestFileUtils(t *testing.T) { + t.Parallel() + // test to read from none-existed and existed files - filename := "/tmp/katalyst_test" + filename := "/tmp/TestFileUtils" _, err := ReadFileIntoLines(filename) assert.NotNil(t, err) @@ -44,7 +46,9 @@ func TestFileUtils(t *testing.T) { } func Test_fileUniqueLock(t *testing.T) { - lockPath := "/tmp/test_lock" + t.Parallel() + + lockPath := "/tmp/Test_fileUniqueLock" flock, err := GetUniqueLock(lockPath) if err != nil { @@ -52,7 +56,7 @@ func Test_fileUniqueLock(t *testing.T) { return } - _, err = getUniqueLockWithTimeout(lockPath, time.Second, 3) + _, err = getUniqueLockWithTimeout(lockPath, time.Millisecond*100, 3) if err == nil { t.Errorf("GetNode() error = %v, wantErr not nil", err) return diff --git a/pkg/util/general/flags_test.go b/pkg/util/general/flags_test.go index 7da08746a..ae96d08d6 100644 --- a/pkg/util/general/flags_test.go +++ b/pkg/util/general/flags_test.go @@ -25,6 +25,8 @@ import ( ) func TestResourceList_Set(t *testing.T) { + t.Parallel() + type args struct { value string } @@ -57,6 +59,8 @@ func TestResourceList_Set(t *testing.T) { } func TestResourceList_String(t *testing.T) { + t.Parallel() + tests := []struct { name string r ResourceList diff --git a/pkg/util/general/log_test.go b/pkg/util/general/log_test.go index 85da6e726..0826d7c12 100644 --- a/pkg/util/general/log_test.go +++ b/pkg/util/general/log_test.go @@ -43,6 +43,8 @@ func (t testLogger) log(message string, params ...interface{}) string { } func TestLogging(t *testing.T) { + t.Parallel() + loggingWithoutStruct := logging("extra %v %v", 1, "test") require.Equal(t, "[testing.tRunner] extra 1 test", loggingWithoutStruct) @@ -95,5 +97,5 @@ func TestLogging(t *testing.T) { } go f() - time.Sleep(time.Second) + time.Sleep(time.Millisecond) } diff --git a/pkg/util/general/window_test.go b/pkg/util/general/window_test.go index 41207eceb..95b079ac1 100644 --- a/pkg/util/general/window_test.go +++ b/pkg/util/general/window_test.go @@ -25,6 +25,8 @@ import ( ) func TestNewCappedSmoothWindow(t *testing.T) { + t.Parallel() + type args struct { minStep resource.Quantity maxStep resource.Quantity @@ -59,6 +61,8 @@ func TestNewCappedSmoothWindow(t *testing.T) { } func TestCappedSmoothWindow_GetWindowedResources(t *testing.T) { + t.Parallel() + w := NewCappedSmoothWindow( resource.MustParse("0.3"), resource.MustParse("4"), diff --git a/pkg/util/kcct_test.go b/pkg/util/kcct_test.go index de6457e5d..a16b080ad 100644 --- a/pkg/util/kcct_test.go +++ b/pkg/util/kcct_test.go @@ -67,6 +67,8 @@ func toTestUnstructured(obj interface{}) *unstructured.Unstructured { } func Test_kccTargetResource_GetCollisionCount(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -107,6 +109,8 @@ func Test_kccTargetResource_GetCollisionCount(t *testing.T) { } func Test_kccTargetResource_GetHash(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -149,6 +153,8 @@ func Test_kccTargetResource_GetHash(t *testing.T) { } func Test_kccTargetResource_GetLabelSelector(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -191,6 +197,8 @@ func Test_kccTargetResource_GetLabelSelector(t *testing.T) { } func Test_kccTargetResource_GetLastDuration(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -235,6 +243,8 @@ func Test_kccTargetResource_GetLastDuration(t *testing.T) { } func Test_kccTargetResource_GetNodeNames(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -285,6 +295,8 @@ func Test_kccTargetResource_GetNodeNames(t *testing.T) { } func Test_kccTargetResource_GetObservedGeneration(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -325,6 +337,8 @@ func Test_kccTargetResource_GetObservedGeneration(t *testing.T) { } func Test_kccTargetResource_GetRevisionHistoryLimit(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -367,6 +381,8 @@ func Test_kccTargetResource_GetRevisionHistoryLimit(t *testing.T) { } func Test_kccTargetResource_SetCollisionCount(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -444,6 +460,8 @@ func Test_kccTargetResource_SetCollisionCount(t *testing.T) { } func Test_kccTargetResource_SetHash(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -509,6 +527,8 @@ func Test_kccTargetResource_SetHash(t *testing.T) { } func Test_kccTargetResource_SetObservedGeneration(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -568,6 +588,8 @@ func Test_kccTargetResource_SetObservedGeneration(t *testing.T) { } func Test_kccTargetResource_GetConfig(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -627,6 +649,8 @@ func Test_kccTargetResource_GetConfig(t *testing.T) { } func Test_kccTargetResource_GetGenericStatus(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -672,6 +696,8 @@ func Test_kccTargetResource_GetGenericStatus(t *testing.T) { } func Test_kccTargetResource_SetGenericStatus(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -758,6 +784,8 @@ func Test_kccTargetResource_SetGenericStatus(t *testing.T) { } func Test_kccTargetResource_GetIsValid(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -787,6 +815,8 @@ func Test_kccTargetResource_GetIsValid(t *testing.T) { } func Test_kccTargetResource_GenerateConfigHash(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } @@ -852,6 +882,8 @@ func Test_kccTargetResource_GenerateConfigHash(t *testing.T) { } func TestKCCTargetResource_IsExpired(t *testing.T) { + t.Parallel() + type fields struct { Unstructured *unstructured.Unstructured } diff --git a/pkg/util/machine/util_test.go b/pkg/util/machine/util_test.go index a1232ca00..48e97dc04 100644 --- a/pkg/util/machine/util_test.go +++ b/pkg/util/machine/util_test.go @@ -25,6 +25,8 @@ import ( ) func TestParseCPUAssignmentFormat(t *testing.T) { + t.Parallel() + assignment := map[int]CPUSet{ 0: NewCPUSet(1, 2), 1: NewCPUSet(3, 4), @@ -36,6 +38,8 @@ func TestParseCPUAssignmentFormat(t *testing.T) { } func TestDeepcopyCPUAssignment(t *testing.T) { + t.Parallel() + assignment := map[int]CPUSet{ 0: NewCPUSet(1, 2), 1: NewCPUSet(3, 4), @@ -44,6 +48,8 @@ func TestDeepcopyCPUAssignment(t *testing.T) { } func TestMaskToUInt64Array(t *testing.T) { + t.Parallel() + mask, err := bitmask.NewBitMask(0, 1, 2, 3) assert.NoError(t, err) assert.Equal(t, []uint64{0, 1, 2, 3}, MaskToUInt64Array(mask)) diff --git a/pkg/util/metric/store.go b/pkg/util/metric/store.go index 894e32108..995df75bb 100644 --- a/pkg/util/metric/store.go +++ b/pkg/util/metric/store.go @@ -48,28 +48,17 @@ type MetricStore struct { cgroupNumaMetricMap map[string]map[string]map[string]MetricData // map[cgroupPath]map[numaNode]map[metricName]value } -var ( - metricStoreInstance *MetricStore - metricStoreInitOnce sync.Once -) - -// GetMetricStoreInstance is defined as a singleton function to make sure -// only one metric instance is initialized -func GetMetricStoreInstance() *MetricStore { - metricStoreInitOnce.Do( - func() { - metricStoreInstance = &MetricStore{ - nodeMetricMap: make(map[string]MetricData), - numaMetricMap: make(map[int]map[string]MetricData), - deviceMetricMap: make(map[string]map[string]MetricData), - cpuMetricMap: make(map[int]map[string]MetricData), - podContainerMetricMap: make(map[string]map[string]map[string]MetricData), - podContainerNumaMetricMap: make(map[string]map[string]map[string]map[string]MetricData), - cgroupMetricMap: make(map[string]map[string]MetricData), - cgroupNumaMetricMap: make(map[string]map[string]map[string]MetricData), - } - }) - return metricStoreInstance +func NewMetricStore() *MetricStore { + return &MetricStore{ + nodeMetricMap: make(map[string]MetricData), + numaMetricMap: make(map[int]map[string]MetricData), + deviceMetricMap: make(map[string]map[string]MetricData), + cpuMetricMap: make(map[int]map[string]MetricData), + podContainerMetricMap: make(map[string]map[string]map[string]MetricData), + podContainerNumaMetricMap: make(map[string]map[string]map[string]map[string]MetricData), + cgroupMetricMap: make(map[string]map[string]MetricData), + cgroupNumaMetricMap: make(map[string]map[string]map[string]MetricData), + } } func (c *MetricStore) SetNodeMetric(metricName string, data MetricData) { diff --git a/pkg/util/metric/store_test.go b/pkg/util/metric/store_test.go index 8fc01dc91..d8d3589e4 100644 --- a/pkg/util/metric/store_test.go +++ b/pkg/util/metric/store_test.go @@ -24,9 +24,11 @@ import ( ) func TestStore_SetAndGetNodeMetric(t *testing.T) { + t.Parallel() + now := time.Now() - store := GetMetricStoreInstance() + store := NewMetricStore() store.SetNodeMetric("test-metric-name", MetricData{Value: 1.0, Time: &now}) value, _ := store.GetNodeMetric("test-metric-name") assert.Equal(t, MetricData{Value: 1.0, Time: &now}, value) @@ -35,9 +37,11 @@ func TestStore_SetAndGetNodeMetric(t *testing.T) { } func TestStore_SetAndGetNumaMetric(t *testing.T) { + t.Parallel() + now := time.Now() - store := GetMetricStoreInstance() + store := NewMetricStore() store.SetNumaMetric(0, "test-metric-name", MetricData{Value: 1.0, Time: &now}) value, _ := store.GetNumaMetric(0, "test-metric-name") assert.Equal(t, MetricData{Value: 1.0, Time: &now}, value) @@ -46,9 +50,11 @@ func TestStore_SetAndGetNumaMetric(t *testing.T) { } func TestStore_SetAndGeDeviceMetric(t *testing.T) { + t.Parallel() + now := time.Now() - store := GetMetricStoreInstance() + store := NewMetricStore() store.SetDeviceMetric("test-device", "test-metric-name", MetricData{Value: 1.0, Time: &now}) value, _ := store.GetDeviceMetric("test-device", "test-metric-name") assert.Equal(t, MetricData{Value: 1.0, Time: &now}, value) @@ -57,9 +63,11 @@ func TestStore_SetAndGeDeviceMetric(t *testing.T) { } func TestStore_SetAndGetCPUMetric(t *testing.T) { + t.Parallel() + now := time.Now() - store := GetMetricStoreInstance() + store := NewMetricStore() store.SetCPUMetric(0, "test-metric-name", MetricData{Value: 1.0, Time: &now}) value, _ := store.GetCPUMetric(0, "test-metric-name") assert.Equal(t, MetricData{Value: 1.0, Time: &now}, value) @@ -68,9 +76,11 @@ func TestStore_SetAndGetCPUMetric(t *testing.T) { } func TestStore_ContainerMetric(t *testing.T) { + t.Parallel() + now := time.Now() - store := GetMetricStoreInstance() + store := NewMetricStore() store.SetContainerMetric("pod1", "container1", "test-metric-name", MetricData{Value: 1.0, Time: &now}) store.SetContainerMetric("pod2", "container1", "test-metric-name", MetricData{Value: 1.0, Time: &now}) value, _ := store.GetContainerMetric("pod1", "container1", "test-metric-name") diff --git a/pkg/util/native/container_test.go b/pkg/util/native/container_test.go index 0c7d57455..0571b3fa1 100644 --- a/pkg/util/native/container_test.go +++ b/pkg/util/native/container_test.go @@ -24,12 +24,16 @@ import ( ) func TestContainerNotRunning(t *testing.T) { + t.Parallel() + as := require.New(t) notRunning := containerNotRunning([]v1.ContainerStatus{}) as.Equal(true, notRunning) } func TestTrimContainerIDPrefix(t *testing.T) { + t.Parallel() + as := require.New(t) ida := "docker://abc" idb := "containerd://abc" diff --git a/pkg/util/native/informer_test.go b/pkg/util/native/informer_test.go index 46bfc356c..335144e6c 100644 --- a/pkg/util/native/informer_test.go +++ b/pkg/util/native/informer_test.go @@ -25,6 +25,8 @@ import ( ) func TestPodTransformer(t *testing.T) { + t.Parallel() + as := require.New(t) WithPodTransformer(func(src, dest *core.Pod) { diff --git a/pkg/util/native/pods_test.go b/pkg/util/native/pods_test.go index 0582d14d9..ed55bf428 100644 --- a/pkg/util/native/pods_test.go +++ b/pkg/util/native/pods_test.go @@ -26,6 +26,8 @@ import ( ) func TestFilterPodAnnotations(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string pod *v1.Pod @@ -64,6 +66,8 @@ func TestFilterPodAnnotations(t *testing.T) { } func TestGetContainerID(t *testing.T) { + t.Parallel() + type args struct { pod *v1.Pod name string @@ -107,6 +111,8 @@ func TestGetContainerID(t *testing.T) { } func TestGetContainerEnvs(t *testing.T) { + t.Parallel() + type args struct { pod *v1.Pod containerName string diff --git a/pkg/util/native/qos_resources_test.go b/pkg/util/native/qos_resources_test.go index 1aaa0b7d9..b1c01b005 100644 --- a/pkg/util/native/qos_resources_test.go +++ b/pkg/util/native/qos_resources_test.go @@ -63,6 +63,8 @@ var makeQoSResourcePod = func(name string, container, initContainer, overhead v1 } func Test_CalculateQoSResource(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string pod *v1.Pod diff --git a/pkg/util/native/resources_test.go b/pkg/util/native/resources_test.go index 9bc5edc95..30080ab8f 100644 --- a/pkg/util/native/resources_test.go +++ b/pkg/util/native/resources_test.go @@ -44,6 +44,8 @@ var makePod = func(name string, request, limits v1.ResourceList) *v1.Pod { } func TestNeedUpdateResources(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string pod *v1.Pod diff --git a/pkg/util/process/http_test.go b/pkg/util/process/http_test.go index bbb83018d..c16333d52 100644 --- a/pkg/util/process/http_test.go +++ b/pkg/util/process/http_test.go @@ -51,8 +51,10 @@ func (d dummyResponseWriter) Write([]byte) (int, error) { return 0, nil } func (d dummyResponseWriter) WriteHeader(_ int) {} func TestHTTPHandler(t *testing.T) { - httpCleanupVisitorPeriod = time.Second - httpSyncPasswdPeriod = time.Second + t.Parallel() + + httpCleanupVisitorPeriod = time.Millisecond * 5 + httpSyncPasswdPeriod = time.Millisecond * 5 type req struct { burst int @@ -148,7 +150,7 @@ func TestHTTPHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) h.Run(ctx) - time.Sleep(time.Second) + time.Sleep(time.Millisecond * 3) for _, r := range tc.reqs { f := &dummyHandler{} @@ -172,7 +174,7 @@ func TestHTTPHandler(t *testing.T) { assert.Equal(t, tc.visitCnt, len(h.visitors)) h.mux.Unlock() - time.Sleep(time.Second * 3) + time.Sleep(time.Millisecond * 10) h.mux.Lock() assert.Equal(t, 0, len(h.visitors)) diff --git a/pkg/util/qos/net_enhancement_test.go b/pkg/util/qos/net_enhancement_test.go index bb1659409..94b2487a8 100644 --- a/pkg/util/qos/net_enhancement_test.go +++ b/pkg/util/qos/net_enhancement_test.go @@ -27,6 +27,8 @@ import ( ) func TestGetPodNetClassID(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string pod *v1.Pod diff --git a/pkg/util/spd_test.go b/pkg/util/spd_test.go index 9f7c1b810..4074a6057 100644 --- a/pkg/util/spd_test.go +++ b/pkg/util/spd_test.go @@ -40,6 +40,8 @@ import ( ) func TestGetSPDForPod(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) diff --git a/pkg/util/vpa_test.go b/pkg/util/vpa_test.go index c22a57499..0f9c35379 100644 --- a/pkg/util/vpa_test.go +++ b/pkg/util/vpa_test.go @@ -53,6 +53,8 @@ var ( ) func TestFindSpdByVpa(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string vpa *apis.KatalystVerticalPodAutoscaler @@ -188,6 +190,8 @@ func TestFindSpdByVpa(t *testing.T) { } func TestGetVPAForPod(t *testing.T) { + t.Parallel() + scheme := runtime.NewScheme() utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) @@ -311,6 +315,8 @@ func TestGetVPAForPod(t *testing.T) { } func TestGetWorkloadByVPA(t *testing.T) { + t.Parallel() + for _, tc := range []struct { name string vpa *apis.KatalystVerticalPodAutoscaler @@ -364,6 +370,8 @@ func TestGetWorkloadByVPA(t *testing.T) { } func TestCheckVPARecommendationMatchVPA(t *testing.T) { + t.Parallel() + vpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", @@ -422,6 +430,8 @@ func TestCheckVPARecommendationMatchVPA(t *testing.T) { } func TestIsVPAStatusLegal(t *testing.T) { + t.Parallel() + vpa1 := &apis.KatalystVerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: "vpa1", diff --git a/pkg/webhook/mutating/pod/pod_test.go b/pkg/webhook/mutating/pod/pod_test.go index e04a6bea0..645e42a62 100644 --- a/pkg/webhook/mutating/pod/pod_test.go +++ b/pkg/webhook/mutating/pod/pod_test.go @@ -49,6 +49,8 @@ func getPodJSON(pod *v1.Pod) []byte { } func TestMutatePod(t *testing.T) { + t.Parallel() + container1 := &v1.Container{ Name: "c1", Resources: v1.ResourceRequirements{