-
Notifications
You must be signed in to change notification settings - Fork 139
/
cronjob.yaml
91 lines (87 loc) · 3 KB
/
cronjob.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#
# Author: Hari Sekhon
# Date: [% DATE # 2019-11-28 18:51:59 +0000 (Thu, 28 Nov 2019) %]
#
# vim:ts=2:sts=2:sw=2:et
# lint: k8s
#
# https://github.com/HariSekhon/Kubernetes-configs
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve or steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
# ============================================================================ #
# C r o n J o b
# ============================================================================ #
# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/cron-job-v1/
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: NAME-schedule
namespace: NAMESPACE
#annotations:
# datree.skip/CONTAINERS_MISSING_READINESSPROBE_KEY: readiness probe is not relevant for this cronjob
# datree.skip/CONTAINERS_MISSING_LIVENESSPROBE_KEY: liveness probe is not relevant for this cronjob
spec:
schedule: "0 0 * * *"
#
# XXX: setting startingDeadlineSeconds is very important for cronjobs on schedules like * * * * * because a planned maintenance / outage will cause these jobs to miss the magic 100 executions and fail to start after this!
#
# To detect such stuck cronjobs look at the LAST SCHEDULE field from this output:
#
# kubectl get cronjobs -A --sort-by=.status.lastScheduleTime
#
# See the DevOps Bash tools repo for related kubernetes scripts such as detecting stuck jobs:
#
# https://github.com/HariSekhon/DevOps-Bash-tools
#
# If you run in to this issue, see the adjacent fix in
#
# cronjob-fix-stale.patch.yaml
#
# More details here:
#
# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-job-limitations
#
startingDeadlineSeconds: 3600 # (re)start up to an hour late
concurrencyPolicy: Forbid # Allow / Forbid / Replace - start new job if old job is still running (default: Allow, in which case your jobs should be idempotent)
suspend: false # set to true to prevent new job executions (counted as missed jobs)
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
metadata:
name: NAME
spec:
restartPolicy: Never
containers:
- name: NAME
image: perl
# takes about a minute to execute
command:
- perl
args:
- -Mbignum=bpi
- -wle
- print bpi(2000)
readinessProbe:
exec:
command:
- pgrep -f perl
livenessProbe:
exec:
command:
- pgrep -f perl
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: 300m
memory: 300Mi