diff --git a/deploy/role.yaml b/deploy/role.yaml index 1d0a841fa..58966b6f7 100644 --- a/deploy/role.yaml +++ b/deploy/role.yaml @@ -22,6 +22,7 @@ rules: - watch - update - patch + - delete - apiGroups: - authorization.k8s.io resources: @@ -185,4 +186,4 @@ rules: verbs: - get - list - - watch \ No newline at end of file + - watch diff --git a/roles/servicetelemetry/tasks/component_alertmanager.yml b/roles/servicetelemetry/tasks/component_alertmanager.yml index bcb63e44f..1e5551311 100644 --- a/roles/servicetelemetry/tasks/component_alertmanager.yml +++ b/roles/servicetelemetry/tasks/component_alertmanager.yml @@ -66,7 +66,7 @@ kind: Route name: '{{ ansible_operator_meta.name }}-alertmanager-proxy' -- name: Add a service account to used by Alertmanager +- name: Create ServiceAccount/alertmanager-stf with oauth redirect annotation k8s: definition: apiVersion: v1 @@ -77,22 +77,121 @@ annotations: serviceaccounts.openshift.io/oauth-redirectreference.alertmanager: '{{ alertmanager_oauth_redir_ref | to_json }}' -- name: Bind role +- name: Create ClusterRole/alertmanager-stf k8s: definition: apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding + kind: ClusterRole metadata: name: alertmanager-stf - namespace: '{{ ansible_operator_meta.namespace }}' - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: alertmanager-main - subjects: - - kind: ServiceAccount + rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + +- name: Setup ClusterRoleBinding for Alertmanager + block: + - name: Define ClusterRoleBinding/alertmanager-stf + set_fact: + def_alertmanager_stf_crb: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: alertmanager-stf + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alertmanager-stf + subjects: + - kind: ServiceAccount + name: alertmanager-stf + namespace: '{{ ansible_operator_meta.namespace }}' + + - name: Create ClusterRoleBinding/alertmanager-stf + k8s: + definition: + "{{ def_alertmanager_stf_crb }}" + rescue: + - name: Remove ClusterRoleBinding/alertmanager-stf when fail to update + k8s: + state: absent + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: alertmanager-stf + + - name: Create ClusterRoleBinding/alertmanager-stf + k8s: + definition: + "{{ def_alertmanager_stf_crb }}" + +- name: Create Role/alertmanager-stf + k8s: + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: name: alertmanager-stf namespace: '{{ ansible_operator_meta.namespace }}' + rules: + - apiGroups: + - security.openshift.io + resourceNames: + - nonroot + resources: + - securitycontextconstraints + verbs: + - use + +- name: Setup RoleBinding for Alertmanager + block: + - name: Define RoleBinding/alertmanager-stf + set_fact: + def_alertmanager_stf_rb: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: alertmanager-stf + namespace: '{{ ansible_operator_meta.namespace }}' + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: alertmanager-stf + namespace: '{{ ansible_operator_meta.namespace }}' + subjects: + - kind: ServiceAccount + name: alertmanager-stf + namespace: '{{ ansible_operator_meta.namespace }}' + + - name: Create RoleBinding/alertmanager-stf + k8s: + definition: + "{{ def_alertmanager_stf_rb }}" + rescue: + - name: Remove RoleBinding/alertmanager-stf when fail to update + k8s: + state: absent + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: alertmanager-stf + namespace: '{{ ansible_operator_meta.namespace }}' + + - name: Create RoleBinding/alertmanager-stf + k8s: + definition: + "{{ def_alertmanager_stf_rb }}" - name: Set default alertmanager service template set_fact: diff --git a/roles/servicetelemetry/tasks/component_prometheus.yml b/roles/servicetelemetry/tasks/component_prometheus.yml index eb890c1be..2e865abd2 100644 --- a/roles/servicetelemetry/tasks/component_prometheus.yml +++ b/roles/servicetelemetry/tasks/component_prometheus.yml @@ -7,91 +7,171 @@ kind: Route name: '{{ ansible_operator_meta.name }}-prometheus-proxy' -- name: Add oauth redirect annotation to prometheus-k8s service account +- name: Create ServiceAccount/prometheus-stf with oauth redirect annotation k8s: definition: apiVersion: v1 kind: ServiceAccount metadata: - name: prometheus-k8s + name: prometheus-stf namespace: '{{ ansible_operator_meta.namespace }}' annotations: serviceaccounts.openshift.io/oauth-redirectreference.prometheus: '{{ prom_oauth_redir_ref | to_json }}' -- block: - - name: Install RBAC Role for prometheus operations +- name: Create ClusterRole/prometheus-stf for non-resource URL /metrics access + k8s: + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: prometheus-stf + rules: + - nonResourceURLs: + - /metrics + verbs: + - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + +- name: Setup ClusterRoleBinding for Prometheus + block: + - name: Define ClusterRoleBinding/prometheus-stf + set_fact: + def_prometheus_stf_crb: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: prometheus-stf + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-stf + subjects: + - kind: ServiceAccount + name: prometheus-stf + namespace: '{{ ansible_operator_meta.namespace }}' + + - name: Create ClusterRoleBinding/prometheus-stf + k8s: + definition: + "{{ def_prometheus_stf_crb }}" + rescue: + - name: Remove ClusterRoleBinding/prometheus-stf when fail to update + k8s: + state: absent + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: prometheus-stf + + - name: Create ClusterRoleBinding/prometheus-stf + k8s: + definition: + "{{ def_prometheus_stf_crb }}" + +- name: Create Role/prometheus-stf for Prometheus operations + k8s: + definition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: prometheus-stf + namespace: '{{ ansible_operator_meta.namespace }}' + rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resourceNames: + - nonroot + - nonroot-v2 + resources: + - securitycontextconstraints + verbs: + - use + +- name: Setup RoleBinding for Prometheus + block: + - name: Define RoleBinding/prometheus-stf + set_fact: + def_prometheus_stf_rb: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: prometheus-stf + namespace: '{{ ansible_operator_meta.namespace }}' + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-stf + subjects: + - kind: ServiceAccount + name: prometheus-stf + namespace: '{{ ansible_operator_meta.namespace }}' + + - name: Create RoleBinding/prometheus-stf + k8s: + definition: + "{{ def_prometheus_stf_rb }}" + rescue: + - name: Remove RoleBinding/prometheus-stf on failure to update k8s: + state: absent definition: apiVersion: rbac.authorization.k8s.io/v1 - kind: Role + kind: RoleBinding metadata: name: prometheus-stf namespace: '{{ ansible_operator_meta.namespace }}' - rules: - - apiGroups: - - "" - resources: - - services - - endpoints - - pods - verbs: - - get - - list - - watch - - apiGroups: - - extensions - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - security.openshift.io - resourceNames: - - nonroot - - nonroot-v2 - resources: - - securitycontextconstraints - verbs: - - use - - - name: Bind the local prometheus SA to our new role + + - name: Create RoleBinding/prometheus-stf k8s: definition: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: prometheus-k8s-stf - namespace: '{{ ansible_operator_meta.namespace }}' - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: prometheus-stf - subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: '{{ ansible_operator_meta.namespace }}' - when: - - observability_strategy in ['use_redhat', 'use_hybrid'] + "{{ def_prometheus_stf_rb }}" -- name: Bind the local prometheus SA to prometheus cluster role (for oauth perms) +- name: Remove old ClusterRoleBinding for prometheus-k8s using CMO roleRef k8s: + state: absent definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: prometheus-k8s-{{ ansible_operator_meta.namespace }} namespace: '{{ ansible_operator_meta.namespace }}' - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus-k8s - subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: '{{ ansible_operator_meta.namespace }}' - name: Check for existing prometheus htpasswd user secret k8s_info: diff --git a/roles/servicetelemetry/templates/manifest_prometheus.j2 b/roles/servicetelemetry/templates/manifest_prometheus.j2 index 4e8651def..2bdf408b9 100644 --- a/roles/servicetelemetry/templates/manifest_prometheus.j2 +++ b/roles/servicetelemetry/templates/manifest_prometheus.j2 @@ -11,7 +11,7 @@ spec: replicas: {{ servicetelemetry_vars.backends.metrics.prometheus.deployment_size }} ruleSelector: {} securityContext: {} - serviceAccountName: prometheus-k8s + serviceAccountName: prometheus-stf serviceMonitorSelector: matchLabels: app: smart-gateway @@ -44,7 +44,7 @@ spec: - -upstream=http://localhost:9090/ - -htpasswd-file=/etc/proxy/htpasswd/auth - -cookie-secret-file=/etc/proxy/secrets/session_secret - - -openshift-service-account=prometheus-k8s + - -openshift-service-account=prometheus-stf - '-openshift-sar={"resource": "namespaces", "verb": "get"}' ports: - containerPort: 9092 diff --git a/tests/smoketest/smoketest.sh b/tests/smoketest/smoketest.sh index 8a801c004..4204398f2 100755 --- a/tests/smoketest/smoketest.sh +++ b/tests/smoketest/smoketest.sh @@ -28,6 +28,7 @@ if [ "${OC_CLIENT_VERSION_Y}" -lt "${OC_CLIENT_VERSION_Y_REQUIRED}" ] || [ "${OC fi CLEANUP=${CLEANUP:-true} +SMOKETEST_VERBOSE=${SMOKETEST_VERBOSE:-true} for ((i=1; i<=NUMCLOUDS; i++)); do NAME="smoke${i}" @@ -71,33 +72,18 @@ echo "*** [INFO] Triggering an alertmanager notification..." # check if the oc client version is less than 4.11 and adjust the token command to match available commands if [ 0${OC_CLIENT_VERSION_Y} -lt 011 ]; then - PROMETHEUS_K8S_TOKEN=$(oc serviceaccounts get-token prometheus-k8s) + PROMETHEUS_K8S_TOKEN=$(oc serviceaccounts get-token prometheus-stf) else - PROMETHEUS_K8S_TOKEN=$(oc create token prometheus-k8s) + PROMETHEUS_K8S_TOKEN=$(oc create token prometheus-stf) fi -oc run curl --restart='Never' --image=quay.io/infrawatch/busyboxplus:curl -- sh -c "curl -k -H \"Content-Type: application/json\" -H \"Authorization: Bearer ${PROMETHEUS_K8S_TOKEN}\" -d '[{\"labels\":{\"alertname\":\"Testalert1\"}}]' https://default-alertmanager-proxy:9095/api/v1/alerts" -# it takes some time to get the alert delivered, continuing with other tests - - -# Trying to find a less brittle test than a timeout -JOB_TIMEOUT=300s -for NAME in "${CLOUDNAMES[@]}"; do - echo "*** [INFO] Waiting on job/stf-smoketest-${NAME}..." - oc wait --for=condition=complete --timeout=${JOB_TIMEOUT} "job/stf-smoketest-${NAME}" - RET=$((RET || $?)) # Accumulate exit codes -done - -echo "*** [INFO] Checking that the qdr certificate has a long expiry" -EXPIRETIME=$(oc get secret default-interconnect-openstack-ca -o json | grep \"tls.crt\"\: | awk -F '": "' '{print $2}' | rev | cut -c3- | rev | base64 -d | openssl x509 -in - -text | grep "Not After" | awk -F " : " '{print $2}') -EXPIRETIME_UNIX=$(date -d "${EXPIRETIME}" "+%s") -TARGET_UNIX=$(date -d "now + 7 years" "+%s") -if [ ${EXPIRETIME_UNIX} -lt ${TARGET_UNIX} ]; then - echo "[FAILURE] Certificate expire time (${EXPIRETIME}) less than 7 years from now" -fi +# create the alert using startsAt which in theory may cause trigger to be faster +echo "*** [INFO] Create alert" +oc delete pod -l run=curl ; oc run curl --wait --restart='Never' --image=quay.io/infrawatch/busyboxplus:curl -- sh -c "curl -v -k -H \"Content-Type: application/json\" -H \"Authorization: Bearer ${PROMETHEUS_K8S_TOKEN}\" -d '[{\"status\":\"firing\",\"labels\":{\"alertname\":\"smoketest\",\"severity\":\"warning\"},\"startsAt\":\"$(date --rfc-3339=seconds | sed 's/ /T/')\"}]' https://default-alertmanager-proxy:9095/api/v1/alerts" +oc wait --for=jsonpath='{.status.phase}'=Succeeded pod/curl +oc logs curl echo "*** [INFO] Waiting to see SNMP trap message in webhook pod" -oc delete pod curl SNMP_WEBHOOK_POD=$(oc get pod -l "app=default-snmp-webhook" -ojsonpath='{.items[0].metadata.name}') SNMP_WEBHOOK_CHECK_MAX_TRIES=5 SNMP_WEBHOOK_CHECK_TIMEOUT=30 @@ -112,74 +98,88 @@ while [ $SNMP_WEBHOOK_CHECK_COUNT -lt $SNMP_WEBHOOK_CHECK_MAX_TRIES ]; do sleep $SNMP_WEBHOOK_CHECK_TIMEOUT done -echo "*** [INFO] Showing oc get all..." -oc get all -echo - -echo "*** [INFO] Showing servicemonitors..." -oc get servicemonitor -o yaml -echo - -echo "*** [INFO] Logs from smoketest containers..." +# Trying to find a less brittle test than a timeout +JOB_TIMEOUT=300s for NAME in "${CLOUDNAMES[@]}"; do - oc logs "$(oc get pod -l "job-name=stf-smoketest-${NAME}" -o jsonpath='{.items[0].metadata.name}')" -c smoketest-collectd - oc logs "$(oc get pod -l "job-name=stf-smoketest-${NAME}" -o jsonpath='{.items[0].metadata.name}')" -c smoketest-ceilometer + echo "*** [INFO] Waiting on job/stf-smoketest-${NAME}..." + oc wait --for=condition=complete --timeout=${JOB_TIMEOUT} "job/stf-smoketest-${NAME}" + RET=$((RET || $?)) # Accumulate exit codes done -echo - -echo "*** [INFO] Logs from qdr..." -oc logs "$(oc get pod -l application=default-interconnect -o jsonpath='{.items[0].metadata.name}')" -echo -echo "*** [INFO] Logs from smart gateways..." -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-event" -o jsonpath='{.items[0].metadata.name}')" -c bridge -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-event" -o jsonpath='{.items[0].metadata.name}')" -c sg-core -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-event" -o jsonpath='{.items[0].metadata.name}')" -c bridge -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-event" -o jsonpath='{.items[0].metadata.name}')" -c sg-core -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-sens-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge -oc logs "$(oc get pod -l "smart-gateway=default-cloud1-sens-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core -echo - -echo "*** [INFO] Logs from smart gateway operator..." -oc logs "$(oc get pod -l app=smart-gateway-operator -o jsonpath='{.items[0].metadata.name}')" -echo - -echo "*** [INFO] Logs from prometheus..." -oc logs "$(oc get pod -l prometheus=default -o jsonpath='{.items[0].metadata.name}')" -c prometheus -echo +echo "*** [INFO] Checking that the qdr certificate has a long expiry" +EXPIRETIME=$(oc get secret default-interconnect-openstack-ca -o json | grep \"tls.crt\"\: | awk -F '": "' '{print $2}' | rev | cut -c3- | rev | base64 -d | openssl x509 -text | grep "Not After" | awk -F " : " '{print $2}') +EXPIRETIME_UNIX=$(date -d "${EXPIRETIME}" "+%s") +TARGET_UNIX=$(date -d "now + 7 years" "+%s") +if [ ${EXPIRETIME_UNIX} -lt ${TARGET_UNIX} ]; then + echo "[FAILURE] Certificate expire time (${EXPIRETIME}) less than 7 years from now" +fi -echo "*** [INFO] Logs from elasticsearch..." -oc logs "$(oc get pod -l common.k8s.elastic.co/type=elasticsearch -o jsonpath='{.items[0].metadata.name}')" +echo "*** [INFO] Showing oc get all..." +oc get all echo -echo "*** [INFO] Logs from snmp webhook..." -oc logs "$(oc get pod -l app=default-snmp-webhook -o jsonpath='{.items[0].metadata.name}')" +echo "*** [INFO] Showing servicemonitors..." +oc get servicemonitors.monitoring.rhobs -o yaml echo -echo "*** [INFO] Logs from alertmanager..." -oc logs "$(oc get pod -l app.kubernetes.io/name=alertmanager -o jsonpath='{.items[0].metadata.name}')" -c alertmanager -echo +if [ "$SMOKETEST_VERBOSE" = "true" ]; then + echo "*** [INFO] Logs from smoketest containers..." + for NAME in "${CLOUDNAMES[@]}"; do + oc logs "$(oc get pod -l "job-name=stf-smoketest-${NAME}" -o jsonpath='{.items[0].metadata.name}')" -c smoketest-collectd + oc logs "$(oc get pod -l "job-name=stf-smoketest-${NAME}" -o jsonpath='{.items[0].metadata.name}')" -c smoketest-ceilometer + done + echo + + echo "*** [INFO] Logs from qdr..." + oc logs "$(oc get pod -l application=default-interconnect -o jsonpath='{.items[0].metadata.name}')" + echo + + echo "*** [INFO] Logs from smart gateways..." + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-event" -o jsonpath='{.items[0].metadata.name}')" -c bridge + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-coll-event" -o jsonpath='{.items[0].metadata.name}')" -c sg-core + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-event" -o jsonpath='{.items[0].metadata.name}')" -c bridge + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-ceil-event" -o jsonpath='{.items[0].metadata.name}')" -c sg-core + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-sens-meter" -o jsonpath='{.items[0].metadata.name}')" -c bridge + oc logs "$(oc get pod -l "smart-gateway=default-cloud1-sens-meter" -o jsonpath='{.items[0].metadata.name}')" -c sg-core + echo + + echo "*** [INFO] Logs from smart gateway operator..." + oc logs "$(oc get pod -l app=smart-gateway-operator -o jsonpath='{.items[0].metadata.name}')" + echo + + echo "*** [INFO] Logs from prometheus..." + oc logs "$(oc get pod -l prometheus=default -o jsonpath='{.items[0].metadata.name}')" -c prometheus + echo + + echo "*** [INFO] Logs from elasticsearch..." + oc logs "$(oc get pod -l common.k8s.elastic.co/type=elasticsearch -o jsonpath='{.items[0].metadata.name}')" + echo + + echo "*** [INFO] Logs from snmp webhook..." + oc logs "$(oc get pod -l app=default-snmp-webhook -o jsonpath='{.items[0].metadata.name}')" + echo + + echo "*** [INFO] Logs from alertmanager..." + oc logs "$(oc get pod -l app.kubernetes.io/name=alertmanager -o jsonpath='{.items[0].metadata.name}')" -c alertmanager + echo +fi echo "*** [INFO] Cleanup resources..." if $CLEANUP; then oc delete "job/stf-smoketest-${NAME}" + # resolve the alert to clean up the system, otherwise this expires in 5 minutes + oc delete pod -l run=curl ; oc run curl --restart='Never' --image=quay.io/infrawatch/busyboxplus:curl -- sh -c "curl -v -k -H \"Content-Type: application/json\" -H \"Authorization: Bearer ${PROMETHEUS_K8S_TOKEN}\" -d '[{\"status\":\"firing\",\"labels\":{\"alertname\":\"smoketest\",\"severity\":\"warning\"},\"startsAt\":\"$(date --rfc-3339=seconds | sed 's/ /T/')\",\"endsAt\":\"$(date --rfc-3339=seconds | sed 's/ /T/')\"}]' https://default-alertmanager-proxy:9095/api/v1/alerts" fi echo -if [ $SNMP_WEBHOOK_STATUS -ne 0 ]; then - echo "*** [FAILURE] SNMP Webhook failed" - exit 1 -fi - -if [ $RET -eq 0 ]; then +if [ $RET -eq 0 ] && [ $SNMP_WEBHOOK_STATUS -eq 0 ]; then echo "*** [SUCCESS] Smoke test job completed successfully" + exit 0 else echo "*** [FAILURE] Smoke test job still not succeeded after ${JOB_TIMEOUT}" + exit 1 fi -echo - -exit $RET