Skip to content

Commit

Permalink
fix staticcheck warnings on pkg/scheduler
Browse files Browse the repository at this point in the history
Signed-off-by: xovoxy <xilovele@gmail.com>
  • Loading branch information
xovoxy committed Oct 16, 2024
1 parent ddaa721 commit e289b0f
Show file tree
Hide file tree
Showing 11 changed files with 67 additions and 88 deletions.
2 changes: 1 addition & 1 deletion pkg/scheduler/api/devices/nvidia/gpushare/share.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (g *GPUDevice) getUsedGPUMemory() uint {

// isIdleGPU check if the device is idled.
func (g *GPUDevice) isIdleGPU() bool {
return g.PodMap == nil || len(g.PodMap) == 0
return len(g.PodMap) == 0
}

// getGPUMemoryPod returns the GPU memory required by the pod.
Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/api/unschedule_info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ limitations under the License.
package api

import (
"fmt"
"errors"
"testing"

"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -95,15 +95,15 @@ func TestFitErrors(t *testing.T) {
{
node: "node1",
fitStr: "fit failed",
err: fmt.Errorf(NodePodNumberExceeded),
err: errors.New(NodePodNumberExceeded),
want: "fit failed: 1 node(s) pod number exceeded.",
// no node has UnschedulableAndUnresolvable
filterNodes: map[string]sets.Empty{},
},
{
node: "node1",
fitStr: "NodeResourceFitFailed",
err: fmt.Errorf(NodePodNumberExceeded),
err: errors.New(NodePodNumberExceeded),
fiterr: &FitError{
taskNamespace: "ns1", taskName: "task1", NodeName: "node2",
Status: []*Status{{Reason: nodeAffinity, Code: UnschedulableAndUnresolvable}},
Expand Down
9 changes: 5 additions & 4 deletions pkg/scheduler/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package cache

import (
"context"
"errors"
"fmt"
"os"
"strconv"
Expand Down Expand Up @@ -173,7 +174,7 @@ type imageState struct {
// Size of the image
size int64
// A set of node names for nodes having this image present
nodes sets.String
nodes sets.Set[string]
}

// DefaultBinder with kube client and event recorder
Expand Down Expand Up @@ -370,11 +371,11 @@ func (dvb *defaultVolumeBinder) GetPodVolumes(task *schedulingapi.TaskInfo,
if err != nil {
return nil, err
} else if len(reasons) > 0 {
var errors []string
var errorslice []string
for _, reason := range reasons {
errors = append(errors, string(reason))
errorslice = append(errorslice, string(reason))
}
return nil, fmt.Errorf(strings.Join(errors, ","))
return nil, errors.New(strings.Join(errorslice, ","))
}

return podVolumes, err
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/cache/event_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ func (sc *SchedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *scheduling
if !ok {
state = &imageState{
size: image.SizeBytes,
nodes: sets.NewString(node.Name),
nodes: sets.New(node.Name),
}
sc.imageStates[name] = state
} else {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/metrics/source/metrics_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ func NewMetricsClient(restConfig *rest.Config, metricsConf map[string]string) (M
} else if metricsType == Metrics_Type_Prometheus_Adaptor {
return NewCustomMetricsClient(restConfig)
} else {
return nil, fmt.Errorf("Data cannot be collected from the %s monitoring system. "+
"The supported monitoring systems are %s, %s, and %s.",
return nil, fmt.Errorf("data cannot be collected from the %s monitoring system. "+
"The supported monitoring systems are %s, %s, and %s",
metricsType, Metrics_Type_Elasticsearch, Metrics_Tpye_Prometheus, Metrics_Type_Prometheus_Adaptor)
}
}
14 changes: 0 additions & 14 deletions pkg/scheduler/plugins/drf/hdrf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,6 @@ func mergePods(pods ...[]*v1.Pod) []*v1.Pod {
return ret
}

type queueSpec struct {
name string
hierarchy string
weights string
}

type pgSpec struct {
taskNum int
cpu string
mem string
pg string
queue string
}

func TestHDRF(t *testing.T) {
options.Default()

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ func (pp *predicatesPlugin) OnSessionOpen(ssn *framework.Session) {
// require to run the Pod. So there will be no overbooking. However, to
// avoid the inconsistency in resource calculation between the scheduler
// and the older (before v1.28) kubelet, make the Pod unschedulable.
return fmt.Errorf("Pod has a restartable init container and the SidecarContainers feature is disabled")
return fmt.Errorf("pod has a restartable init container and the SidecarContainers feature is disabled")
}

// InterPodAffinity Predicate
Expand Down
105 changes: 49 additions & 56 deletions pkg/scheduler/plugins/proportion/proportion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,56 +213,53 @@ func TestProportion(t *testing.T) {
// proportion
go func() {
for {
select {
default:
ssn := framework.OpenSession(schedulerCache, []conf.Tier{
{
Plugins: []conf.PluginOption{
{
Name: PluginName,
EnabledPredicate: &trueValue,
},
{
Name: gang.PluginName,
EnabledJobReady: &trueValue,
EnabledJobPipelined: &trueValue,
},
{
Name: priority.PluginName,
EnabledJobOrder: &trueValue,
},
ssn := framework.OpenSession(schedulerCache, []conf.Tier{
{
Plugins: []conf.PluginOption{
{
Name: PluginName,
EnabledPredicate: &trueValue,
},
{
Name: gang.PluginName,
EnabledJobReady: &trueValue,
EnabledJobPipelined: &trueValue,
},
{
Name: priority.PluginName,
EnabledJobOrder: &trueValue,
},
},
}, nil)

allocator := allocate.New()
allocator.Execute(ssn)
framework.CloseSession(ssn)
time.Sleep(time.Second * 3)
if num == 1 {
metrics := getLocalMetrics()
if metrics == 12000 {
t.Logf("init queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg1)
} else if num == 2 {
metrics := getLocalMetrics()
if metrics == 4000 {
t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg2)
} else {
metrics := getLocalMetrics()
if metrics != 0 {
t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics)
c <- false
return
}
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
},
}, nil)

allocator := allocate.New()
allocator.Execute(ssn)
framework.CloseSession(ssn)
time.Sleep(time.Second * 3)
if num == 1 {
metrics := getLocalMetrics()
if metrics == 12000 {
t.Logf("init queue_allocated metrics is ok,%v", metrics)
}
num++
schedulerCache.DeletePodGroupV1beta1(pg1)
} else if num == 2 {
metrics := getLocalMetrics()
if metrics == 4000 {
t.Logf("after delete vcjob pg1, queue_allocated metrics is ok,%v", metrics)
}
schedulerCache.DeletePodGroupV1beta1(pg2)
} else {
metrics := getLocalMetrics()
if metrics != 0 {
t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics)
c <- false
return
}
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
}
num++
}
}()

Expand All @@ -274,17 +271,13 @@ func TestProportion(t *testing.T) {
}
}()

for {
select {
case res := <-c:
if !res {
t.Error("TestProportion failed")
} else {
t.Log("TestProportion successful")
}
return
for res := range c {
if !res {
t.Error("TestProportion failed")
} else {
t.Log("TestProportion successful")
}

return
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/util/k8s/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func TestSnapshot(t *testing.T) {
t.Fatalf("unexpected list PodsWithRequiredAntiAffinity nodeInfos value (+got: %s/-want: %s), err: %s", nodeInfoList, tc.expectedNodeInfos, err)
}

sel, err := labels.Parse("test==test")
sel, _ := labels.Parse("test==test")
pods, err := snapshot.Pods().List(sel)
if !reflect.DeepEqual(tc.expectedPods, pods) || err != nil {
t.Fatalf("unexpected list pods value (+got: %s/-want: %s), err: %s", pods, tc.expectedNodeInfos, err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/uthelper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func (test *TestCommonStruct) CheckBind(caseIndex int) error {
select {
case <-binder.Channel:
case <-time.After(300 * time.Millisecond):
return fmt.Errorf("Failed to get Bind request in case %d(%s).", caseIndex, test.Name)
return fmt.Errorf("failed to get Bind request in case %d(%s)", caseIndex, test.Name)
}
}

Expand Down Expand Up @@ -210,7 +210,7 @@ func (test *TestCommonStruct) CheckEvict(caseIndex int) error {
select {
case <-evictor.Channel:
case <-time.After(300 * time.Millisecond):
return fmt.Errorf("Failed to get Evict request in case %d(%s).", caseIndex, test.Name)
return fmt.Errorf("failed to get Evict request in case %d(%s)", caseIndex, test.Name)
}
}

Expand Down
5 changes: 2 additions & 3 deletions pkg/webhooks/admission/jobs/validate/admit_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (

"volcano.sh/apis/pkg/apis/batch/v1alpha1"
schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"
"volcano.sh/volcano/pkg/controllers/job/helpers"
jobhelpers "volcano.sh/volcano/pkg/controllers/job/helpers"
"volcano.sh/volcano/pkg/controllers/job/plugins"
controllerMpi "volcano.sh/volcano/pkg/controllers/job/plugins/distributed-framework/mpi"
Expand Down Expand Up @@ -144,8 +143,8 @@ func validateJobCreate(job *v1alpha1.Job, reviewResponse *admissionv1.AdmissionR

if _, ok := job.Spec.Plugins[controllerMpi.MPIPluginName]; ok {
mp := controllerMpi.NewInstance(job.Spec.Plugins[controllerMpi.MPIPluginName])
masterIndex := helpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
workerIndex := helpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job)
masterIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetMasterName(), job)
workerIndex := jobhelpers.GetTaskIndexUnderJob(mp.GetWorkerName(), job)
if masterIndex == -1 {
reviewResponse.Allowed = false
return "The specified mpi master task was not found"
Expand Down

0 comments on commit e289b0f

Please sign in to comment.