|
| 1 | +#! /bin/bash |
| 2 | +set -euo pipefail |
| 3 | +IFS=$'\n\t' |
| 4 | + |
| 5 | +if systemctl status glusterd | grep -q '(running) since' |
| 6 | +then |
| 7 | + |
| 8 | + # Run some api commands to figure out who we are and our state |
| 9 | + CURL_COMMAND="curl -v" |
| 10 | + K8_CERTS="--cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt" |
| 11 | + GET_TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" |
| 12 | + K8_TOKEN="-H \"Authorization: Bearer $GET_TOKEN\"" |
| 13 | + |
| 14 | + # StatefulSet Calls |
| 15 | + STATEFULSET_API_CALL="https://kubernetes.default.svc.cluster.local/apis/apps/v1beta1/namespaces/$NAMESPACE/statefulsets/$BASE_NAME" |
| 16 | + STATEFULSET_API_COMMAND="$CURL_COMMAND $K8_CERTS $K8_TOKEN $STATEFULSET_API_CALL" |
| 17 | + REPLICA_COUNT=`eval $STATEFULSET_API_COMMAND | grep 'replicas'|cut -f2 -d ":" |cut -f2 -d "," | tr -d '[:space:]'` |
| 18 | + echo "replica count = $REPLICA_COUNT" |
| 19 | + |
| 20 | + # Get Node running on |
| 21 | + PODS_API_CALL="https://kubernetes.default.svc.cluster.local/api/v1/namespaces/default/pods" |
| 22 | + PODS_API_COMMAND="$CURL_COMMAND $K8_CERTS $K8_TOKEN $PODS_API_CALL" |
| 23 | + MY_PODS=`eval $PODS_API_COMMAND | grep 'pod.beta.kubernetes.io/hostname'|cut -f2 -d ":" | tr -d '[:space:]'` |
| 24 | + |
| 25 | + # Get Node running on |
| 26 | + PODS_API_CALL="https://kubernetes.default.svc.cluster.local/api/v1/namespaces/default/pods?labelSelector=app=glusterfs" |
| 27 | + PODS_API_COMMAND="$CURL_COMMAND $K8_CERTS $K8_TOKEN $PODS_API_CALL" |
| 28 | + MY_PODS=`eval $PODS_API_COMMAND | grep 'pod.beta.kubernetes.io/hostname'|cut -f2 -d ":" | tr -d '[:space:]' | tr -d '"'` |
| 29 | + |
| 30 | + # Get Host the pods are running |
| 31 | + HOSTS_API_CALL="https://kubernetes.default.svc.cluster.local/api/v1/namespaces/default/pods?labelSelector=app=glusterfs" |
| 32 | + HOSTS_API_COMMAND="$CURL_COMMAND $K8_CERTS $K8_TOKEN $HOSTS_API_CALL" |
| 33 | + MY_HOSTS=`eval $HOSTS_API_COMMAND | grep 'nodeName'|cut -f2 -d ":" | tr -d '[:space:]' | tr -d '"'` |
| 34 | + |
| 35 | + # Find the pod running on this particular host |
| 36 | + HOSTCOUNT=0 |
| 37 | + HOSTPOD="" |
| 38 | + mycount=0 |
| 39 | + |
| 40 | + for host in $(echo $MY_HOSTS | tr ',' '\n') |
| 41 | + do |
| 42 | + # call your procedure/other scripts here below |
| 43 | + mycount=$(( $mycount + 1 )) |
| 44 | + if [ "$HOSTNAME" == "$host" ] |
| 45 | + then |
| 46 | + # get index |
| 47 | + HOSTCOUNT=$mycount |
| 48 | + fi |
| 49 | + done |
| 50 | + |
| 51 | + echo " --- NEXT ---" |
| 52 | + mycount=0 |
| 53 | + for pod in $(echo $MY_PODS | tr ',' '\n') |
| 54 | + do |
| 55 | + # call your procedure/other scripts here below |
| 56 | + mycount=$(( $mycount + 1 )) |
| 57 | + if [ "$HOSTCOUNT" -eq "$mycount" ] |
| 58 | + then |
| 59 | + # get the pod |
| 60 | + HOSTPOD=$pod |
| 61 | + fi |
| 62 | + done |
| 63 | + echo $HOSTPOD |
| 64 | + |
| 65 | + |
| 66 | + # For this to work we need to be able to determine what host we are on |
| 67 | + # search on this pod.beta.kubernetes.io/hostname= |
| 68 | + |
| 69 | + #Figure State of Cluster |
| 70 | + # Keeps track of initial peer count only run on original starting cluster |
| 71 | + numpeers="$(gluster peer status | grep -oP 'Peers:\s\K\w+')" |
| 72 | + EXPECTED_REPLICA_COUNT=$(( $numpeers + 1 )) #should match REPLICA_COUNT after script runs |
| 73 | + ORIGINAL_PEER_COUNT=$numpeers |
| 74 | + CURRENT_NODE_COUNT=$(( $numpeers + 1 )) |
| 75 | + EXPECTED_PEER_COUNT=$(( $REPLICA_COUNT - 1 )) |
| 76 | + PEER_COUNT=$(( $REPLICA_COUNT - 1 )) |
| 77 | + VOLUME_LIST="" |
| 78 | + INITIAL_RUN="no" |
| 79 | + |
| 80 | + echo "Pre Termination Script Executed" > /usr/share/bin/gluster-stop.log |
| 81 | + echo "" >> /usr/share/bin/gluster-stop.log |
| 82 | + echo "" >> /usr/share/bin/gluster-stop.log |
| 83 | + echo "****** LOG ******" >> /usr/share/bin/gluster-stop.log |
| 84 | + echo "original_peer_count = $ORIGINAL_PEER_COUNT" >> /usr/share/bin/gluster-stop.log |
| 85 | + echo "expected_peer_count = $EXPECTED_PEER_COUNT" >> /usr/share/bin/gluster-stop.log |
| 86 | + echo "peer_count = $PEER_COUNT" >> /usr/share/bin/gluster-stop.log |
| 87 | + echo "expected_replica_count = $EXPECTED_REPLICA_COUNT" >> /usr/share/bin/gluster-stop.log |
| 88 | + echo "replica_count = $REPLICA_COUNT" >> /usr/share/bin/gluster-stop.log |
| 89 | + echo "initial run? $INITIAL_RUN" >> /usr/share/bin/gluster-stop.log |
| 90 | + echo "MY_HOSTS = $MY_HOSTS" >> /usr/share/bin/gluster-stop.log |
| 91 | + echo "MY_PODS = $MY_PODS" >> /usr/share/bin/gluster-stop.log |
| 92 | + echo "HOSTCOUNT = $HOSTCOUNT" >> /usr/share/bin/gluster-stop.log |
| 93 | + echo "HOSTPOD = $HOSTPOD" >> /usr/share/bin/gluster-stop.log |
| 94 | + |
| 95 | + |
| 96 | + |
| 97 | + if [ "${ORIGINAL_PEER_COUNT}" -eq "0" ] && [ "$INITIAL_RUN" == "no" ] |
| 98 | + then |
| 99 | + echo "nothing in the pool, probably should do nothing" >> /usr/share/bin/gluster-stop.log |
| 100 | + |
| 101 | + |
| 102 | + else |
| 103 | + echo "Someone is terminating our pod" >> /usr/share/bin/gluster-stop.log |
| 104 | + |
| 105 | + # Let's proactively remove the TSP?? |
| 106 | + # Remove from TSP |
| 107 | + if (gluster peer status | grep -q "Hostname: $HOSTPOD.$SERVICE_NAME.$NAMESPACE.svc.cluster.local") |
| 108 | + then |
| 109 | + result=`eval gluster peer detach $HOSTPOD.$SERVICE_NAME.$NAMESPACE.svc.cluster.local` |
| 110 | + wait |
| 111 | + echo "... Removed $HOSTPOD from TSP" >> /usr/share/bin/gluster-stop.log |
| 112 | + else |
| 113 | + echo "...Nothing to do here" >> /usr/share/bin/gluster-stop.log |
| 114 | + fi |
| 115 | + |
| 116 | + |
| 117 | + else |
| 118 | + echo "Why did we hit this, what is our state at this point" >> /usr/share/bin/gluster.log |
| 119 | + fi |
| 120 | + echo "pre-termination script executed" >> /usr/share/bin/gluster-stop.log |
| 121 | + exit 0 |
| 122 | +else |
| 123 | + echo "glusterd not running...fail" >> /usr/share/bin/gluster-stop.log |
| 124 | + exit 1 |
| 125 | +fi |
0 commit comments