Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix staticcheck warnings on pkg/controller and pkg/scheduler #3722

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions pkg/controllers/garbagecollector/garbagecollector.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"time"

"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
Expand Down Expand Up @@ -196,7 +195,7 @@ func (gc *gccontroller) processJob(key string) error {
klog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name)
// Ignore the Jobs that are already deleted or being deleted, or the ones that don't need clean up.
job, err := gc.jobLister.Jobs(namespace).Get(name)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
Expand All @@ -214,7 +213,7 @@ func (gc *gccontroller) processJob(key string) error {
// If TTL is modified before we do this check, we cannot be sure if the TTL truly expires.
// The latest Job may have a different UID, but it's fine because the checks will be run again.
fresh, err := gc.vcClient.BatchV1alpha1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/job/job_controller_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
Expand Down Expand Up @@ -136,7 +135,7 @@ func (cc *jobcontroller) killJob(jobInfo *apis.JobInfo, podRetainPhase state.Pha

// Update Job status
newJob, err := cc.vcClient.BatchV1alpha1().Jobs(job.Namespace).UpdateStatus(context.TODO(), job, metav1.UpdateOptions{})
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.Errorf("Job %v/%v was not found", job.Namespace, job.Name)
return nil
}
Expand Down
7 changes: 3 additions & 4 deletions pkg/controllers/job/job_controller_util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"volcano.sh/apis/pkg/apis/batch/v1alpha1"
batch "volcano.sh/apis/pkg/apis/batch/v1alpha1"
busv1alpha1 "volcano.sh/apis/pkg/apis/bus/v1alpha1"
"volcano.sh/volcano/pkg/controllers/apis"
)
Expand Down Expand Up @@ -983,10 +982,10 @@ func TestTaskPriority_CalcPGMin(t *testing.T) {

func TestCalcPGMinResources(t *testing.T) {
jc := newFakeController()
job := &batch.Job{
job := &v1alpha1.Job{
TypeMeta: metav1.TypeMeta{},
Spec: batch.JobSpec{
Tasks: []batch.TaskSpec{
Spec: v1alpha1.JobSpec{
Tasks: []v1alpha1.TaskSpec{
master, worker,
},
},
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/jobflow/jobflow_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
flowlister "volcano.sh/apis/pkg/client/listers/flow/v1alpha1"
"volcano.sh/volcano/pkg/controllers/apis"
"volcano.sh/volcano/pkg/controllers/framework"
"volcano.sh/volcano/pkg/controllers/jobflow/state"
jobflowstate "volcano.sh/volcano/pkg/controllers/jobflow/state"
)

Expand Down Expand Up @@ -129,7 +128,7 @@ func (jf *jobflowcontroller) Initialize(opt *framework.ControllerOption) error {

jf.syncHandler = jf.handleJobFlow

state.SyncJobFlow = jf.syncJobFlow
jobflowstate.SyncJobFlow = jf.syncJobFlow
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/api/devices/nvidia/gpushare/share.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (g *GPUDevice) getUsedGPUMemory() uint {

// isIdleGPU check if the device is idled.
func (g *GPUDevice) isIdleGPU() bool {
return g.PodMap == nil || len(g.PodMap) == 0
return len(g.PodMap) == 0
}

// getGPUMemoryPod returns the GPU memory required by the pod.
Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/api/unschedule_info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ limitations under the License.
package api

import (
"fmt"
"errors"
"testing"

"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -95,15 +95,15 @@ func TestFitErrors(t *testing.T) {
{
node: "node1",
fitStr: "fit failed",
err: fmt.Errorf(NodePodNumberExceeded),
err: errors.New(NodePodNumberExceeded),
want: "fit failed: 1 node(s) pod number exceeded.",
// no node has UnschedulableAndUnresolvable
filterNodes: map[string]sets.Empty{},
},
{
node: "node1",
fitStr: "NodeResourceFitFailed",
err: fmt.Errorf(NodePodNumberExceeded),
err: errors.New(NodePodNumberExceeded),
fiterr: &FitError{
taskNamespace: "ns1", taskName: "task1", NodeName: "node2",
Status: []*Status{{Reason: nodeAffinity, Code: UnschedulableAndUnresolvable}},
Expand Down
9 changes: 5 additions & 4 deletions pkg/scheduler/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package cache

import (
"context"
"errors"
"fmt"
"os"
"strconv"
Expand Down Expand Up @@ -173,7 +174,7 @@ type imageState struct {
// Size of the image
size int64
// A set of node names for nodes having this image present
nodes sets.String
nodes sets.Set[string]
}

// DefaultBinder with kube client and event recorder
Expand Down Expand Up @@ -370,11 +371,11 @@ func (dvb *defaultVolumeBinder) GetPodVolumes(task *schedulingapi.TaskInfo,
if err != nil {
return nil, err
} else if len(reasons) > 0 {
var errors []string
var errorslice []string
for _, reason := range reasons {
errors = append(errors, string(reason))
errorslice = append(errorslice, string(reason))
}
return nil, fmt.Errorf(strings.Join(errors, ","))
return nil, errors.New(strings.Join(errorslice, ","))
}

return podVolumes, err
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/cache/event_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ func (sc *SchedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *scheduling
if !ok {
state = &imageState{
size: image.SizeBytes,
nodes: sets.NewString(node.Name),
nodes: sets.New(node.Name),
}
sc.imageStates[name] = state
} else {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/metrics/source/metrics_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ func NewMetricsClient(restConfig *rest.Config, metricsConf map[string]string) (M
} else if metricsType == Metrics_Type_Prometheus_Adaptor {
return NewCustomMetricsClient(restConfig)
} else {
return nil, fmt.Errorf("Data cannot be collected from the %s monitoring system. "+
"The supported monitoring systems are %s, %s, and %s.",
return nil, fmt.Errorf("data cannot be collected from the %s monitoring system. "+
"The supported monitoring systems are %s, %s, and %s",
metricsType, Metrics_Type_Elasticsearch, Metrics_Tpye_Prometheus, Metrics_Type_Prometheus_Adaptor)
}
}
14 changes: 0 additions & 14 deletions pkg/scheduler/plugins/drf/hdrf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,6 @@ func mergePods(pods ...[]*v1.Pod) []*v1.Pod {
return ret
}

type queueSpec struct {
name string
hierarchy string
weights string
}

type pgSpec struct {
taskNum int
cpu string
mem string
pg string
queue string
}

func TestHDRF(t *testing.T) {
options.Default()

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) {
// require to run the Pod. So there will be no overbooking. However, to
// avoid the inconsistency in resource calculation between the scheduler
// and the older (before v1.28) kubelet, make the Pod unschedulable.
return fmt.Errorf("Pod has a restartable init container and the SidecarContainers feature is disabled")
return fmt.Errorf("pod has a restartable init container and the SidecarContainers feature is disabled")
}

// InterPodAffinity Predicate
Expand Down
105 changes: 49 additions & 56 deletions pkg/scheduler/plugins/proportion/proportion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,56 +213,53 @@ func TestProportion(t *testing.T) {
// proportion
go func() {
for {
select {
default:
ssn := framework.OpenSession(schedulerCache, []conf.Tier{
{
Plugins: []conf.PluginOption{
{
Name: PluginName,
EnabledPredicate: &trueValue,
},
{
Name: gang.PluginName,
EnabledJobReady: &trueValue,
EnabledJobPipelined: &trueValue,
},
{
Name: priority.PluginName,
EnabledJobOrder: &trueValue,
},
ssn := framework.OpenSession(schedulerCache, []conf.Tier{
{
Plugins: []conf.PluginOption{
{
Name: PluginName,
EnabledPredicate: &trueValue,
},
{
Name: gang.PluginName,
EnabledJobReady: &trueValue,
EnabledJobPipelined: &trueValue,
},
{
Name: priority.PluginName,
EnabledJobOrder: &trueValue,
},
},
}, nil)

allocator := allocate.New()
allocator.Execute(ssn)
framework.CloseSession(ssn)
time.Sleep(time.Second * 3)
if num == 1 {
metrics := getLocalMetrics()
if metrics == 12000 {
t.Logf("init queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg1)
} else if num == 2 {
metrics := getLocalMetrics()
if metrics == 4000 {
t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg2)
} else {
metrics := getLocalMetrics()
if metrics != 0 {
t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics)
c <- false
return
}
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
},
}, nil)

allocator := allocate.New()
allocator.Execute(ssn)
framework.CloseSession(ssn)
time.Sleep(time.Second * 3)
if num == 1 {
metrics := getLocalMetrics()
if metrics == 12000 {
t.Logf("init queue_allocated metrics is ok,%v", metrics)
}
num++
schedulerCache.DeletePodGroupV1beta1(pg1)
} else if num == 2 {
metrics := getLocalMetrics()
if metrics == 4000 {
t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg2)
} else {
metrics := getLocalMetrics()
if metrics != 0 {
t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics)
c <- false
return
}
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
}
num++
}
}()

Expand All @@ -274,17 +271,13 @@ func TestProportion(t *testing.T) {
}
}()

for {
select {
case res := <-c:
if !res {
t.Error("TestProportion failed")
} else {
t.Log("TestProportion successful")
}
return
for res := range c {
if !res {
t.Error("TestProportion failed")
} else {
t.Log("TestProportion successful")
}

return
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/util/k8s/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func TestSnapshot(t *testing.T) {
t.Fatalf("unexpected list PodsWithRequiredAntiAffinity nodeInfos value (+got: %s/-want: %s), err: %s", nodeInfoList, tc.expectedNodeInfos, err)
}

sel, err := labels.Parse("test==test")
sel, _ := labels.Parse("test==test")
pods, err := snapshot.Pods().List(sel)
if !reflect.DeepEqual(tc.expectedPods, pods) || err != nil {
t.Fatalf("unexpected list pods value (+got: %s/-want: %s), err: %s", pods, tc.expectedNodeInfos, err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/uthelper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func (test *TestCommonStruct) CheckBind(caseIndex int) error {
select {
case <-binder.Channel:
case <-time.After(300 * time.Millisecond):
return fmt.Errorf("Failed to get Bind request in case %d(%s).", caseIndex, test.Name)
return fmt.Errorf("failed to get Bind request in case %d(%s)", caseIndex, test.Name)
}
}

Expand Down Expand Up @@ -203,7 +203,7 @@ func (test *TestCommonStruct) CheckEvict(caseIndex int) error {
select {
case <-evictor.Channel:
case <-time.After(300 * time.Millisecond):
return fmt.Errorf("Failed to get Evict request in case %d(%s).", caseIndex, test.Name)
return fmt.Errorf("failed to get Evict request in case %d(%s)", caseIndex, test.Name)
}
}

Expand Down
5 changes: 2 additions & 3 deletions pkg/webhooks/admission/jobs/validate/admit_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (

"volcano.sh/apis/pkg/apis/batch/v1alpha1"
schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
"volcano.sh/volcano/pkg/controllers/job/helpers"
jobhelpers "volcano.sh/volcano/pkg/controllers/job/helpers"
"volcano.sh/volcano/pkg/controllers/job/plugins"
controllerMpi "volcano.sh/volcano/pkg/controllers/job/plugins/distributed-framework/mpi"
Expand Down Expand Up @@ -144,8 +143,8 @@ func validateJobCreate(job *v1alpha1.Job, reviewResponse *admissionv1.AdmissionR

if _, ok := job.Spec.Plugins[controllerMpi.MPIPluginName]; ok {
mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName])
masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
workerIndex := helpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job)
masterIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
workerIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job)
if masterIndex == -1 {
reviewResponse.Allowed = false
return "The specified mpi master task was not found"
Expand Down