Skip to content

Commit

Permalink
Add WaitForCompletedJobs measurement
Browse files Browse the repository at this point in the history
  • Loading branch information
alculquicondor committed Jan 25, 2022
1 parent b2f2197 commit 4aee4f5
Show file tree
Hide file tree
Showing 4 changed files with 304 additions and 1 deletion.
247 changes: 247 additions & 0 deletions clusterloader2/pkg/measurement/common/wait_for_jobs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package common

import (
"context"
"fmt"
"sync"
"time"

batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"

"k8s.io/perf-tests/clusterloader2/pkg/framework"
"k8s.io/perf-tests/clusterloader2/pkg/measurement"
measurementutil "k8s.io/perf-tests/clusterloader2/pkg/measurement/util"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/informer"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/runtimeobjects"
"k8s.io/perf-tests/clusterloader2/pkg/measurement/util/workerqueue"
"k8s.io/perf-tests/clusterloader2/pkg/util"
)

const (
defaultWaitForCompletedJobsTimeout = 10 * time.Minute
waitForCompletedJobsName = "WaitForCompletedJobs"
waitForCompletedJobsWorkers = 1
checkCompletedJobsInterval = 5 * time.Second
)

func init() {
if err := measurement.Register(waitForCompletedJobsName, createWaitForCompletedJobsMeasurement); err != nil {
klog.Fatalf("Cannot register %s: %v", waitForCompletedJobsName, err)
}
}

func createWaitForCompletedJobsMeasurement() measurement.Measurement {
return &waitForCompletedJobsMeasurement{
selector: measurementutil.NewObjectSelector(),
queue: workerqueue.NewWorkerQueue(waitForCompletedJobsWorkers),
finishedJobs: make(map[string]batchv1.JobConditionType),
}
}

type waitForCompletedJobsMeasurement struct {
selector *measurementutil.ObjectSelector

queue workerqueue.Interface
isRunning bool
clusterFramework *framework.Framework
cancel context.CancelFunc
lock sync.Mutex
finishedJobs map[string]batchv1.JobConditionType
}

func (w *waitForCompletedJobsMeasurement) Execute(config *measurement.Config) ([]measurement.Summary, error) {
w.clusterFramework = config.ClusterFramework

action, err := util.GetString(config.Params, "action")
if err != nil {
return nil, err
}

switch action {
case "start":
if err = w.selector.Parse(config.Params); err != nil {
return nil, err
}
return nil, w.start()
case "gather":
timeout, err := util.GetDurationOrDefault(config.Params, "timeout", defaultWaitForCompletedJobsTimeout)
if err != nil {
return nil, err
}
return nil, w.gather(timeout)
default:
return nil, fmt.Errorf("unknown action %v", action)
}
}

func (w *waitForCompletedJobsMeasurement) Dispose() {
if !w.isRunning {
return
}
w.isRunning = false
w.queue.Stop()
w.cancel()
}

func (w *waitForCompletedJobsMeasurement) String() string {
return waitForCompletedJobsName
}

func (w *waitForCompletedJobsMeasurement) start() error {
if w.isRunning {
klog.V(2).Infof("%v: wait for completed jobs measurement already running", w)
return nil
}
klog.V(2).Infof("%v: starting wait for completed jobs measurement...", w)
w.isRunning = true
ctx, cancel := context.WithCancel(context.Background())
w.cancel = cancel
c := w.clusterFramework.GetClientSets().GetClient()
inf := informer.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
w.selector.ApplySelectors(&options)
return c.BatchV1().Jobs(w.selector.Namespace).List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
w.selector.ApplySelectors(&options)
return c.BatchV1().Jobs(w.selector.Namespace).Watch(ctx, options)
},
},
func(oldObj, newObj interface{}) {
f := func() {
w.handleObject(oldObj, newObj)
}
w.queue.Add(&f)
},
)
return informer.StartAndSync(inf, ctx.Done(), informerSyncTimeout)
}

func (w *waitForCompletedJobsMeasurement) gather(timeout time.Duration) error {
if !w.isRunning {
return fmt.Errorf("%v: wait for completed jobs was not started", w)
}
klog.V(2).Infof("%v: waiting for completed jobs measurment...", w)
jobKeys, err := w.objectKeys()
if err != nil {
return err
}

cond := func() (bool, error) {
w.lock.Lock()
defer w.lock.Unlock()
finishedKeys := make(sets.String, len(w.finishedJobs))
for k := range w.finishedJobs {
finishedKeys.Insert(k)
}
return jobKeys.Equal(finishedKeys), nil
}
if err := wait.Poll(checkCompletedJobsInterval, timeout, cond); err != nil {
klog.V(2).Infof("Timed out waiting for all jobs to complete: %w", err)
}
w.lock.Lock()
completed := 0
failed := 0
for _, cond := range w.finishedJobs {
if cond == batchv1.JobComplete {
completed++
} else if cond == batchv1.JobFailed {
failed++
}
}
w.lock.Unlock()
klog.V(2).Infof("%v: %d/%d Jobs finished, %d completed, %d failed", w, completed+failed, len(jobKeys), completed, failed)
return nil
}

func (w *waitForCompletedJobsMeasurement) handleObject(oldObj, newObj interface{}) {
var oldJob, newJob *batchv1.Job
var ok bool
oldJob, ok = oldObj.(*batchv1.Job)
if oldObj != nil && !ok {
klog.Errorf("%v: uncastable old object: %v", w, oldObj)
}
newJob, ok = newObj.(*batchv1.Job)
if newObj != nil && !ok {
klog.Errorf("%v: uncastable new object: %v", w, oldObj)
return
}
handleJob := newJob
if newJob == nil {
handleJob = oldJob
}
key, err := runtimeobjects.CreateMetaNamespaceKey(handleJob)
if err != nil {
klog.Errorf("Failed obtaining meta key for Job: %w", err)
return
}
completed, condition := finishedJobCondition(newJob)

w.lock.Lock()
if completed {
w.finishedJobs[key] = condition
} else {
delete(w.finishedJobs, key)
}
w.lock.Unlock()
}

func (w *waitForCompletedJobsMeasurement) objectKeys() (sets.String, error) {
objs, err := w.clusterFramework.GetClientSets().GetClient().BatchV1().Jobs(w.selector.Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: w.selector.LabelSelector,
FieldSelector: w.selector.FieldSelector,
})
if err != nil {
return nil, fmt.Errorf("listing jobs: %w", err)
}
keys := sets.NewString()
for _, j := range objs.Items {
key, err := runtimeobjects.CreateMetaNamespaceKey(&j)
if err != nil {
return nil, fmt.Errorf("getting key for Job: %w", err)
}
keys.Insert(key)
}
return keys, nil
}

func finishedJobCondition(j *batchv1.Job) (bool, batchv1.JobConditionType) {
if j == nil {
return false, ""
}
for _, cond := range j.Status.Conditions {
if cond.Status != corev1.ConditionTrue {
continue
}

if cond.Type == batchv1.JobComplete || cond.Type == batchv1.JobFailed {
return true, cond.Type
}
}
return false, ""
}
2 changes: 1 addition & 1 deletion clusterloader2/pkg/measurement/util/informer/informer.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func addEventHandler(i cache.SharedInformer,
}

// StartAndSync starts informer and waits for it to be synced.
func StartAndSync(i cache.SharedInformer, stopCh chan struct{}, timeout time.Duration) error {
func StartAndSync(i cache.SharedInformer, stopCh <-chan struct{}, timeout time.Duration) error {
go i.Run(stopCh)
timeoutCh := make(chan struct{})
timeoutTimer := time.AfterFunc(timeout, func() {
Expand Down
37 changes: 37 additions & 0 deletions clusterloader2/testing/batch/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: batch

namespace:
number: 1

tuningSets:
- name: Uniform1qps
qpsLoad:
qps: 1

steps:
- name: Start measurements
measurements:
- Identifier: WaitForCompletedJobs
Method: WaitForCompletedJobs
Params:
action: start
labelSelector: group = test-job
- name: Create job
phases:
- namespaceRange:
min: 1
max: 1
replicasPerNamespace: 1
tuningSet: Uniform1qps
objectBundle:
- basename: test-job
objectTemplatePath: "job.yaml"
templateFillMap:
Replicas: 10
- name: Wait for pods to be running
measurements:
- Identifier: WaitForCompletedJobs
Method: WaitForCompletedJobs
Params:
action: gather
timeout: 1m
19 changes: 19 additions & 0 deletions clusterloader2/testing/batch/job.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{.Name}}
labels:
group: test-job
spec:
parallelism: {{.Replicas}}
completions: {{.Replicas}}
template:
metadata:
labels:
group: test-pod
spec:
containers:
- name: {{.Name}}
image: bash
args: ["-c", "exit"]
restartPolicy: Never

0 comments on commit 4aee4f5

Please sign in to comment.