diff --git a/pkg/controllers/garbagecollector/garbagecollector.go b/pkg/controllers/garbagecollector/garbagecollector.go index be0a6b7e69..a6fcb5b392 100644 --- a/pkg/controllers/garbagecollector/garbagecollector.go +++ b/pkg/controllers/garbagecollector/garbagecollector.go @@ -21,7 +21,6 @@ import ( "fmt" "time" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -196,7 +195,7 @@ func (gc *gccontroller) processJob(key string) error { klog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name) // Ignore the Jobs that are already deleted or being deleted, or the ones that don't need clean up. job, err := gc.jobLister.Jobs(namespace).Get(name) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } if err != nil { @@ -214,7 +213,7 @@ func (gc *gccontroller) processJob(key string) error { // If TTL is modified before we do this check, we cannot be sure if the TTL truly expires. // The latest Job may have a different UID, but it's fine because the checks will be run again. fresh, err := gc.vcClient.BatchV1alpha1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return nil } if err != nil { diff --git a/pkg/controllers/job/job_controller_actions.go b/pkg/controllers/job/job_controller_actions.go index 0e45b045d8..d646310083 100644 --- a/pkg/controllers/job/job_controller_actions.go +++ b/pkg/controllers/job/job_controller_actions.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" @@ -137,7 +136,7 @@ func (cc *jobcontroller) killJob(jobInfo *apis.JobInfo, podRetainPhase state.Pha // Update Job status newJob, err := cc.vcClient.BatchV1alpha1().Jobs(job.Namespace).UpdateStatus(context.TODO(), job, metav1.UpdateOptions{}) - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { klog.Errorf("Job %v/%v was not found", job.Namespace, job.Name) return nil } diff --git a/pkg/controllers/job/job_controller_util_test.go b/pkg/controllers/job/job_controller_util_test.go index 80e8afc472..25f6b1d20f 100644 --- a/pkg/controllers/job/job_controller_util_test.go +++ b/pkg/controllers/job/job_controller_util_test.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "volcano.sh/apis/pkg/apis/batch/v1alpha1" - batch "volcano.sh/apis/pkg/apis/batch/v1alpha1" busv1alpha1 "volcano.sh/apis/pkg/apis/bus/v1alpha1" "volcano.sh/volcano/pkg/controllers/apis" ) @@ -1027,10 +1026,10 @@ func TestTaskPriority_CalcPGMin(t *testing.T) { func TestCalcPGMinResources(t *testing.T) { jc := newFakeController() - job := &batch.Job{ + job := &v1alpha1.Job{ TypeMeta: metav1.TypeMeta{}, - Spec: batch.JobSpec{ - Tasks: []batch.TaskSpec{ + Spec: v1alpha1.JobSpec{ + Tasks: []v1alpha1.TaskSpec{ master, worker, }, }, diff --git a/pkg/controllers/jobflow/jobflow_controller.go b/pkg/controllers/jobflow/jobflow_controller.go index 77512cb48a..dbff48fdcd 100755 --- a/pkg/controllers/jobflow/jobflow_controller.go +++ b/pkg/controllers/jobflow/jobflow_controller.go @@ -39,7 +39,6 @@ import ( flowlister "volcano.sh/apis/pkg/client/listers/flow/v1alpha1" "volcano.sh/volcano/pkg/controllers/apis" "volcano.sh/volcano/pkg/controllers/framework" - "volcano.sh/volcano/pkg/controllers/jobflow/state" jobflowstate "volcano.sh/volcano/pkg/controllers/jobflow/state" ) @@ -129,7 +128,7 @@ func (jf *jobflowcontroller) Initialize(opt *framework.ControllerOption) error { jf.syncHandler = jf.handleJobFlow - state.SyncJobFlow = jf.syncJobFlow + jobflowstate.SyncJobFlow = jf.syncJobFlow return nil } diff --git a/pkg/scheduler/api/devices/nvidia/gpushare/share.go b/pkg/scheduler/api/devices/nvidia/gpushare/share.go index 853b6a1a52..03e95a1e93 100644 --- a/pkg/scheduler/api/devices/nvidia/gpushare/share.go +++ b/pkg/scheduler/api/devices/nvidia/gpushare/share.go @@ -211,7 +211,7 @@ func (g *GPUDevice) getUsedGPUMemory() uint { // isIdleGPU check if the device is idled. func (g *GPUDevice) isIdleGPU() bool { - return g.PodMap == nil || len(g.PodMap) == 0 + return len(g.PodMap) == 0 } // getGPUMemoryPod returns the GPU memory required by the pod. diff --git a/pkg/scheduler/api/unschedule_info_test.go b/pkg/scheduler/api/unschedule_info_test.go index 56c9273ddf..0fff6f500c 100644 --- a/pkg/scheduler/api/unschedule_info_test.go +++ b/pkg/scheduler/api/unschedule_info_test.go @@ -16,7 +16,7 @@ limitations under the License. package api import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -95,7 +95,7 @@ func TestFitErrors(t *testing.T) { { node: "node1", fitStr: "fit failed", - err: fmt.Errorf(NodePodNumberExceeded), + err: errors.New(NodePodNumberExceeded), want: "fit failed: 1 node(s) pod number exceeded.", // no node has UnschedulableAndUnresolvable filterNodes: map[string]sets.Empty{}, @@ -103,7 +103,7 @@ func TestFitErrors(t *testing.T) { { node: "node1", fitStr: "NodeResourceFitFailed", - err: fmt.Errorf(NodePodNumberExceeded), + err: errors.New(NodePodNumberExceeded), fiterr: &FitError{ taskNamespace: "ns1", taskName: "task1", NodeName: "node2", Status: []*Status{{Reason: nodeAffinity, Code: UnschedulableAndUnresolvable}}, diff --git a/pkg/scheduler/cache/cache.go b/pkg/scheduler/cache/cache.go index 334d06d90a..f2498ee1aa 100644 --- a/pkg/scheduler/cache/cache.go +++ b/pkg/scheduler/cache/cache.go @@ -18,6 +18,7 @@ package cache import ( "context" + "errors" "fmt" "os" "strconv" @@ -173,7 +174,7 @@ type imageState struct { // Size of the image size int64 // A set of node names for nodes having this image present - nodes sets.String + nodes sets.Set[string] } // DefaultBinder with kube client and event recorder @@ -370,11 +371,11 @@ func (dvb *defaultVolumeBinder) GetPodVolumes(task *schedulingapi.TaskInfo, if err != nil { return nil, err } else if len(reasons) > 0 { - var errors []string + var errorslice []string for _, reason := range reasons { - errors = append(errors, string(reason)) + errorslice = append(errorslice, string(reason)) } - return nil, fmt.Errorf(strings.Join(errors, ",")) + return nil, errors.New(strings.Join(errorslice, ",")) } return podVolumes, err diff --git a/pkg/scheduler/cache/event_handlers.go b/pkg/scheduler/cache/event_handlers.go index 2e678ae5ce..1e812868f8 100644 --- a/pkg/scheduler/cache/event_handlers.go +++ b/pkg/scheduler/cache/event_handlers.go @@ -458,7 +458,7 @@ func (sc *SchedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *scheduling if !ok { state = &imageState{ size: image.SizeBytes, - nodes: sets.NewString(node.Name), + nodes: sets.New(node.Name), } sc.imageStates[name] = state } else { diff --git a/pkg/scheduler/metrics/source/metrics_client.go b/pkg/scheduler/metrics/source/metrics_client.go index 2d6e27b45f..078ab50b70 100644 --- a/pkg/scheduler/metrics/source/metrics_client.go +++ b/pkg/scheduler/metrics/source/metrics_client.go @@ -52,8 +52,8 @@ func NewMetricsClient(restConfig *rest.Config, metricsConf map[string]string) (M } else if metricsType == Metrics_Type_Prometheus_Adaptor { return NewCustomMetricsClient(restConfig) } else { - return nil, fmt.Errorf("Data cannot be collected from the %s monitoring system. "+ - "The supported monitoring systems are %s, %s, and %s.", + return nil, fmt.Errorf("data cannot be collected from the %s monitoring system. "+ + "The supported monitoring systems are %s, %s, and %s", metricsType, Metrics_Type_Elasticsearch, Metrics_Tpye_Prometheus, Metrics_Type_Prometheus_Adaptor) } } diff --git a/pkg/scheduler/plugins/drf/hdrf_test.go b/pkg/scheduler/plugins/drf/hdrf_test.go index 83698778f3..db7323cab0 100644 --- a/pkg/scheduler/plugins/drf/hdrf_test.go +++ b/pkg/scheduler/plugins/drf/hdrf_test.go @@ -37,20 +37,6 @@ func mergePods(pods ...[]*v1.Pod) []*v1.Pod { return ret } -type queueSpec struct { - name string - hierarchy string - weights string -} - -type pgSpec struct { - taskNum int - cpu string - mem string - pg string - queue string -} - func TestHDRF(t *testing.T) { options.Default() diff --git a/pkg/scheduler/plugins/predicates/predicates.go b/pkg/scheduler/plugins/predicates/predicates.go index 91d52713e2..e74dbb680a 100644 --- a/pkg/scheduler/plugins/predicates/predicates.go +++ b/pkg/scheduler/plugins/predicates/predicates.go @@ -346,7 +346,7 @@ func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) { // require to run the Pod. So there will be no overbooking. However, to // avoid the inconsistency in resource calculation between the scheduler // and the older (before v1.28) kubelet, make the Pod unschedulable. - return fmt.Errorf("Pod has a restartable init container and the SidecarContainers feature is disabled") + return fmt.Errorf("pod has a restartable init container and the SidecarContainers feature is disabled") } // InterPodAffinity Predicate diff --git a/pkg/scheduler/plugins/proportion/proportion_test.go b/pkg/scheduler/plugins/proportion/proportion_test.go index 11a3019303..fdb96fd596 100644 --- a/pkg/scheduler/plugins/proportion/proportion_test.go +++ b/pkg/scheduler/plugins/proportion/proportion_test.go @@ -213,56 +213,53 @@ func TestProportion(t *testing.T) { // proportion go func() { for { - select { - default: - ssn := framework.OpenSession(schedulerCache, []conf.Tier{ - { - Plugins: []conf.PluginOption{ - { - Name: PluginName, - EnabledPredicate: &trueValue, - }, - { - Name: gang.PluginName, - EnabledJobReady: &trueValue, - EnabledJobPipelined: &trueValue, - }, - { - Name: priority.PluginName, - EnabledJobOrder: &trueValue, - }, + ssn := framework.OpenSession(schedulerCache, []conf.Tier{ + { + Plugins: []conf.PluginOption{ + { + Name: PluginName, + EnabledPredicate: &trueValue, + }, + { + Name: gang.PluginName, + EnabledJobReady: &trueValue, + EnabledJobPipelined: &trueValue, + }, + { + Name: priority.PluginName, + EnabledJobOrder: &trueValue, }, }, - }, nil) - - allocator := allocate.New() - allocator.Execute(ssn) - framework.CloseSession(ssn) - time.Sleep(time.Second * 3) - if num == 1 { - metrics := getLocalMetrics() - if metrics == 12000 { - t.Logf("init queue_allocated metrics is ok,%v", metrics) - } - schedulerCache.DeletePodGroupV1beta1(pg1) - } else if num == 2 { - metrics := getLocalMetrics() - if metrics == 4000 { - t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics) - } - schedulerCache.DeletePodGroupV1beta1(pg2) - } else { - metrics := getLocalMetrics() - if metrics != 0 { - t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics) - c <- false - return - } - t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics) - c <- true + }, + }, nil) + + allocator := allocate.New() + allocator.Execute(ssn) + framework.CloseSession(ssn) + time.Sleep(time.Second * 3) + if num == 1 { + metrics := getLocalMetrics() + if metrics == 12000 { + t.Logf("init queue_allocated metrics is ok,%v", metrics) } - num++ + schedulerCache.DeletePodGroupV1beta1(pg1) + } else if num == 2 { + metrics := getLocalMetrics() + if metrics == 4000 { + t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics) + } + schedulerCache.DeletePodGroupV1beta1(pg2) + } else { + metrics := getLocalMetrics() + if metrics != 0 { + t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics) + c <- false + return + } + t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics) + c <- true } + num++ } }() @@ -274,17 +271,13 @@ func TestProportion(t *testing.T) { } }() - for { - select { - case res := <-c: - if !res { - t.Error("TestProportion failed") - } else { - t.Log("TestProportion successful") - } - return + for res := range c { + if !res { + t.Error("TestProportion failed") + } else { + t.Log("TestProportion successful") } - + return } } } diff --git a/pkg/scheduler/plugins/util/k8s/snapshot_test.go b/pkg/scheduler/plugins/util/k8s/snapshot_test.go index bc5c6d561d..60f802ec5e 100644 --- a/pkg/scheduler/plugins/util/k8s/snapshot_test.go +++ b/pkg/scheduler/plugins/util/k8s/snapshot_test.go @@ -125,7 +125,7 @@ func TestSnapshot(t *testing.T) { t.Fatalf("unexpected list PodsWithRequiredAntiAffinity nodeInfos value (+got: %s/-want: %s), err: %s", nodeInfoList, tc.expectedNodeInfos, err) } - sel, err := labels.Parse("test==test") + sel, _ := labels.Parse("test==test") pods, err := snapshot.Pods().List(sel) if !reflect.DeepEqual(tc.expectedPods, pods) || err != nil { t.Fatalf("unexpected list pods value (+got: %s/-want: %s), err: %s", pods, tc.expectedNodeInfos, err) diff --git a/pkg/scheduler/uthelper/helper.go b/pkg/scheduler/uthelper/helper.go index 56de82b444..b1722a60f0 100644 --- a/pkg/scheduler/uthelper/helper.go +++ b/pkg/scheduler/uthelper/helper.go @@ -176,7 +176,7 @@ func (test *TestCommonStruct) CheckBind(caseIndex int) error { select { case <-binder.Channel: case <-time.After(300 * time.Millisecond): - return fmt.Errorf("Failed to get Bind request in case %d(%s).", caseIndex, test.Name) + return fmt.Errorf("failed to get Bind request in case %d(%s)", caseIndex, test.Name) } } @@ -210,7 +210,7 @@ func (test *TestCommonStruct) CheckEvict(caseIndex int) error { select { case <-evictor.Channel: case <-time.After(300 * time.Millisecond): - return fmt.Errorf("Failed to get Evict request in case %d(%s).", caseIndex, test.Name) + return fmt.Errorf("failed to get Evict request in case %d(%s)", caseIndex, test.Name) } } diff --git a/pkg/webhooks/admission/jobs/validate/admit_job.go b/pkg/webhooks/admission/jobs/validate/admit_job.go index 87897682f8..a03fb106e3 100644 --- a/pkg/webhooks/admission/jobs/validate/admit_job.go +++ b/pkg/webhooks/admission/jobs/validate/admit_job.go @@ -36,7 +36,6 @@ import ( "volcano.sh/apis/pkg/apis/batch/v1alpha1" schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1" - "volcano.sh/volcano/pkg/controllers/job/helpers" jobhelpers "volcano.sh/volcano/pkg/controllers/job/helpers" "volcano.sh/volcano/pkg/controllers/job/plugins" controllerMpi "volcano.sh/volcano/pkg/controllers/job/plugins/distributed-framework/mpi" @@ -144,8 +143,8 @@ func validateJobCreate(job *v1alpha1.Job, reviewResponse *admissionv1.AdmissionR if _, ok := job.Spec.Plugins[controllerMpi.MPIPluginName]; ok { mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName]) - masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job) - workerIndex := helpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job) + masterIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetMasterName(), job) + workerIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job) if masterIndex == -1 { reviewResponse.Allowed = false return "The specified mpi master task was not found"