From d3bf5dde9a05add2e2ebce164e0fef83d189b7b3 Mon Sep 17 00:00:00 2001 From: Daryes Date: Mon, 5 Apr 2021 11:08:22 +0200 Subject: [PATCH] v1.1.0 --- CHANGELOG.md | 29 +++++--- README.md | 127 ++++++++++++++++++-------------- contents/dep_wait_job.sh | 152 ++++++++++++++++++++------------------- plugin.yaml | 4 +- 4 files changed, 173 insertions(+), 139 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 277852b..21ffd19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,13 @@ Change history ====== -v1.10 (2019/06/06) +v1.1.0 (2021/04/04) +------ + +- updated the minimum Rundeck API to 11 +- changed the api token to support a file based location + +v1.0.10 (2019/06/06) ------ - updated the token location when installing the plugin the first time @@ -9,7 +15,7 @@ v1.10 (2019/06/06) - updated the shell command for exiting a waiting/stuck loop to be more copy/paste friendly -v1.9 (2019/01/02) +v1.0.9 (2019/01/02) ------ - new optional Node Filtering option allowing the dependency to adjust when a job was launched using 'Change the Target Nodes' option. @@ -19,7 +25,8 @@ v1.9 (2019/01/02) - changelog rewrote in english - max loop duration format changed to HH MM SS -v1.8 (2019/01/02) + +v1.0.8 (2019/01/02) ------ - New REF_TMP_DIR variable @@ -28,7 +35,7 @@ v1.8 (2019/01/02) - Some error messages rewrote -v1.7 (2018/08/21) +v1.0.7 (2018/08/21) ------ - Better visibility of API errors @@ -38,7 +45,7 @@ v1.7 (2018/08/21) - Flow start time changed to 15h00 -v1.6 (2018/05/05) +v1.0.6 (2018/05/05) ------ - project, group and job name parameters trimmed @@ -47,13 +54,13 @@ v1.6 (2018/05/05) - wait expiration when the end of flow is reached -v1.5 (2018/01/06) +v1.0.5 (2018/01/06) ------ - Internal variable API_VERSION in curl calls -v1.4 (2017/12/10) +v1.0.4 (2017/12/10) ------ - rewrite to switch to Rundeck API instead of rd-cli @@ -61,7 +68,7 @@ v1.4 (2017/12/10) - added an information message visible after each hour when waiting -v1.3 (2017/09/17) +v1.0.3 (2017/09/17) ------ - wait_timeout duration set to 16h @@ -69,7 +76,7 @@ v1.3 (2017/09/17) - script PID visible in the console output -v1.2 (2017/09/09) +v1.0.2 (2017/09/09) ------ - rdJob_GetLastExecValue : using correct rd-cli with iso-8601 date format @@ -78,14 +85,14 @@ v1.2 (2017/09/09) - rd-cli commands using nice -v1.1 (2017/08/26) +v1.0.1 (2017/08/26) ------ - Comments - Better handling of the command line -v1.0 (2017/07/09) +v1.0.0 (2017/07/09) ------ first version diff --git a/README.md b/README.md index b914349..a3d660d 100644 --- a/README.md +++ b/README.md @@ -1,41 +1,53 @@ Rundeck plugin : dependencies ====== -This plugin add precise control options to Rundeck job's interaction between them -in a global flow. +This plugin add precise control options to Rundeck job's interaction between them +in a global flow. +Support Rundeck 2.x up to 3.x -Internal flow + +Semantics - internal flow ------ -Currently, there is no any easy way to handle the date when switching to the next -day, or the start and end of a global flow, or also to distinguish a specific -launch between multiple schedules of the same job. +Currently, there is no any easy way in Rundeck to handle the date when switching to the +next day, or the start and end of a global flow, or also to distinguish a specific +launch between multiple schedules of the same job. + +Other scheduler might allow to set an execution date to alleviate this, +which is not present in Rundeck. In this regard, the dependencies plugin works with an internal flow definition. -This flow will start at d+0, 14h00, and end at d+1, 13h59. -When looking for a job, only those started in the boundaries of the current flow -will be analyzed, without restriction even if the day change. +This flow will start at d+0, 15h00, and end at d+1, 14h59. +When looking for a job history, only those started in the boundaries of the current flow +will be analyzed, without restriction even if the day change. Installation ------ -The zip archive must be placed in the var/rundeck/lib/rundeck/libext subdirectory. -It can be directly the zip file, or a symlink to the archive. +The zip archive must be placed in the /var/rundeck/lib/rundeck/libext/ subdirectory. +Either the zip file as is, or a symlink to the zip file elsewhere. The curl command must be available in the path. This command must return a message with "API Version not specified" : - curl http(s)://rundeck_server/api/ + curl https:///api/ A token must be created from Rundeck, with at least read access on all project. In rundeck GUI (as admin) => user profile => User API Tokens. -- Leave the role empty. -- If you don't want to handle the date limitation, add / change the following option -in rundeck-config.properties : "rundeck.api.tokens.duration.max=0" +- Set a value to the role or leave it empty. +- If you don't want to handle the date limitation, add / change the following option +in rundeck-config.properties : "rundeck.api.tokens.duration.max=0" + +Then, place the token in a dedicated file under /etc/rundeck/ +``` +sudo su - rundeck +echo "" > /etc/rundeck/plugin-dependencies.conf +chmod 600 /etc/rundeck/plugin-dependencies.conf +``` + +It can be set instead in the RD_TOKEN environment variable, but as since Rundeck v3, +the user profile isn't loaded, this use is not recommended anymore. -The token must be placed in the following environment variable: -RD_TOKEN=< API admin token > -In the rundeck user $HOME/.profile, as 'export RD_TOKEN=...' Module: Wait for / Job @@ -47,41 +59,46 @@ complete with a specific status. Available modes ------ + * blocking mode : wait for a job until it is finished, even if it will be started -much after, or if the end of the internal flow is reached. -This mode allows to launch multiples jobs at the same time, the order being handled -by the dependencies. +much later, or if the end of the internal flow is reached. +This mode allows to launch multiples jobs at the same time, with the order handled +by the dependencies themselves. * non-blocking mode: in the case of a conditional job that isn't executed each time, the module will wait only if the job has been started previously and is still running, or already finished in the current flow. Otherwise, skip the dependency step without error. -A job with a step using this mode might require another step in blocking mode, -to ensure the global sequence is respected. +A job with a step using this mode might require an additional step using also the +Wait for module, but this time in blocking mode to a different job, to ensure +the global sequence is respected. In both modes, the module will keep waiting if the target job is finished but -without the required status (success or error) +without the required status (success or error) + +Hint: if you have a lot of jobs, a diagram tool might be handy to write down the +expected order. Skipping a dependency ------ -When using this module, 2 ways are usable to skip a dependency : +When using this module, 2 ways are available to skip a dependency : * using an additionnal option/variable in the job declaration : DEPENDENCY_EXTRA_PARAMS : text type, empty, optionnal. Filling the option with the value " -bypass " at launch will set the step to exit immediatly, allowing the next step to run. -Note : the module will use this name as a default value, but can be changed in the -step options. +Note : the module will use this variable name as a default value, but can be changed +in the step options. -* at the execution time, the step will show in the log output an information line to -skip the current waiting loop, starting with "touch /tmp/..." -This command will create an empty file the job is looking for, which is required to -exit immediatly without error, allowing the next step to run. +* at the execution time, the step will show an information line in the log output +explaining how to skip the current waiting loop, starting with "touch /tmp/..." +This command will create an empty file this job is looking for, which will allow +to exit immediatly without error, allowing the next step to run. The file method can only be used when the dependency step is running, as the filename -is unique to the execution. On the opposite, the variable used at launch works better -with a scheduled job. +is unique to the execution. +On the opposite, the variable works best on a job that must be launched immediatly. Customization @@ -92,33 +109,35 @@ in the rundeck profile under /etc or in the user home (a restart is required) : - RD_FLOW_DAILY_STOP="hh:mm:ss" - Study case: managing a sizeable number of jobs in a flow +Study case: managing a sizeable number of jobs in a flow ------ -Let's start with multiple backup jobs running on different servers: +For example, let's start with multiple backup jobs running on different servers: one for a database type A, another one for a db type B, and a file backup. The easiest declaration would be a single job, with differents steps for each backups. -When working with production and large environments, it's more appropriate -to separate such steps on differents jobs, allowing easier actions in case of -error or when on-demand launch is required. -Also, you can have each job targeting a different group of multiple servers. -So you'll end with at least 3 separate jobs. - -Now, add another job requiring all backups to be finished with a success status. -If it's the only job, you can directly add the 3 dependencies on the job. -Right after, create after other jobs with similar requirements. -In such situation, inserting a blank job, also known as a flag, is more suited: +When working in production and large environments, it's more appropriate +to separate such steps on differents jobs, allowing easier actions in case of error +or when on-demand launch is required, or when each job must target a unique server. +So you'll end with at least 3 separate jobs. + +Then, create another job requiring all backups to be finished with a success status. +The fastest way would be to add on this new job 3 dependencies in blocking mode +to the backup jobs. + +Now, add others jobs with similar requirements on those backup jobs. +In such situation, inserting a blank job, also known as a job flag, is more suited: its single purpose is to contains only the dependencies to the backup jobs. -You can then link instead to the flag all the jobs waiting for the backups jobs. - -Usually, in very large flow, it's better to aggregate the jobs in distinct groups, -either arbitrary or for application purpose, and enclose them with 2 flags: -a starting flag which will be linked to by all the jobs in the group, -and an ending flag linked to all the said jobs in this group (and the starting flag). -Any job in such group won't start until the starting flag is complete. In the same -manner, any job linked to the ending flag will have to wait for the completion of -all the jobs in the group. +You can then link to this flag job all the other jobs waiting for the backups. + +In very large flow, it's usually more suited to aggregate the jobs in distinct groups, +either arbitrary or for thematic purpose, and enclose them with 2 job flags: +- a starting job flag which will be linked to by all the jobs in the group +- an ending job flag linked to all those jobs in this group (and the starting job flag) + +Any job in such group won't start until the starting job flag is complete. +In the same manner, any job linked to the ending flag will have to wait for the +completion of all the jobs in the group. While this increase the complexity with the number of total job, it also gives more flexibility with manual execution, and less work for maintaining and altering diff --git a/contents/dep_wait_job.sh b/contents/dep_wait_job.sh index 3d50b63..0839169 100644 --- a/contents/dep_wait_job.sh +++ b/contents/dep_wait_job.sh @@ -1,15 +1,14 @@ #!/bin/bash # ------------------------------------------------------------------------------------------- -# script d'attente d'un autre job et permettant de simuler une dependance -# requiers un token d'accès en lecture a Rundeck dans la variable RD_TOKEN de l'environnement de Rundeck +# Script for waiting for another job, allowing to create a dependency between them +# A token with read access to the Rundeck API is required # -# aide integree +# integrated self-help # ------------------------------------------------------------------------------------------- # 2017/07/09 AHZ creation -# variables externes -# RD_TOKEN= +# External variables # RD_JOB_SERVERURL= # DEPENDENCY_IGNORE= # RD_FLOW_DAILY_START= @@ -17,6 +16,9 @@ # RD_TMP_DIR= # RD_JOB_FILTER= +# External file +PLUGIN_CONF_FILE=/etc/rundeck/plugin-dependencies.conf +[ -s $PLUGIN_CONF_FILE ] && RD_TOKEN=$( cat $PLUGIN_CONF_FILE 2>&1| tr -d '\n' ) # default values TARGET_PROJECT_NAME="" @@ -58,48 +60,54 @@ VAL_KO=";ko;error;failed;aborted;timedout;timeout" # ---------------------------------------------------------------------------- -# syntaxe d'utilisation +# integrated help function usageSyntax() { echo -e " -syntaxe : $(basename $0) -project '' -group '' -job '' [-state ] - [-force_launch] [-hardlink|-softlink] [-wait ] [-skip] +syntaxe : $(basename $0) -project '' -group '' -job '' [-state ] + [-force_launch] [-hardlink|-softlink] [-wait ] [-skip] [-flow_daily_start hh:mm:ss] [-flow_daily_end hh:mm:ss] [-node_filtering ] [-nodefilter_regex ] - -state : etat attendu du job cible, par defaut : success - -force_launch : force le lancement une fois la limite de periode atteinte - -hardlink : (defaut) active la dependance et attend que le job cible soit lance s'il est absent - -softlink : active la dependance uniquement si le job cible est deja lance (en cours, ou termine ok/ko) - -wait : temps d'attente maximal du job cible, par defaut (sec) : $DEP_WAIT_TIMEOUT - -skip : desactive la verification et sort immediatement sans erreur. - -flow_daily_start : indique l'heure de debut du plan, par defaut : $REF_FLOW_DAILY_START - -flow_daily_end : indique l'heure de fin du plan, par defaut : $REF_FLOW_DAILY_END - -node_filtering : valeur entre : adapt, global, regex. Reutilise la notion du noeud d'execution pour etablir la dependance. Par defaut : adapt - -nodefilter_regex : si regex est utilise pour -node_filtering, indiquer ici le filtre de recherche. + -state : expected status for the target job between [ success, error ] (default: success) + -force_launch : force the execution when the waiting time period is reached + -hardlink : (default) activate the dependency and wait until the targed job is started (if absent) and completed + -softlink : activate the dependency only if the target job is already running, or completed (ok or ko) + -wait : maximum wait duration in seconds (default: $DEP_WAIT_TIMEOUT ) + -skip : skip all checks and exit immediatly with a success state + -flow_daily_start : start time of the execution flow (defaut : $REF_FLOW_DAILY_START ) + -flow_daily_end : end time of the execution flow (default : $REF_FLOW_DAILY_END ) + -node_filtering : one of those value [ adapt, global, regex ] to change the behavior when waiting between different nodes (default : adapt) + -nodefilter_regex : specify the regex string if '-node_filtering' is set to 'regex' -Notes : - * $(basename $0) reste en attente jusqu'a expiration du delai ou jusqu'a la fin du plan tant que le job indique n'est pas dans l'etat attendu. - * si softlink est present mais que le job cible n'a pas ete lance, il n'y aura pas d'attente - * valeurs possibles pour -state : success error +Worth mentionning : + * $(basename $0) wait until the expiration delay is reached or the end of the execution flow as long as the targeted job isn't in the wanted state + * if '-softlink' is set but the targeted job isn't already launched, there will be no wait. " } -# ---------------------------------------------------------------------------- -# affichage sur stderr +# ----------------------------------------------------------------------------- +# stderr output echoerr() { printf "%s\n" "$*" >&2; } +# check the Rundeck API access to all projects +rdProjects_VerifyAccess() { + CURL_API_VERSION=11 + sTemp=$( ${CURL_API_CMD}/$CURL_API_VERSION/projects 2>&1) + if [ $? -ne 0 ] || ! echo "$sTemp" | grep -i -q "projects count="; then echoerr "Error: cannot contact rundeck through the API nor access the project list"; echoerr "$sTemp"; exit 1; fi +} + # find a job GID from his project, group and job names rdJob_GetIdFromName() { CURL_API_VERSION=17 sData=$( ${CURL_API_CMD}/${CURL_API_VERSION}/project/${TARGET_PROJECT_NAME}/jobs --data-urlencode groupPathExact="$TARGET_GROUP_NAME" --data-urlencode jobExactFilter="$TARGET_JOB_NAME" 2>&1 ) - if [ $? -ne 0 ] || ! echo "$sData"|grep -i -q ""; then echoerr "Error: rdJob_GetIdFromName - more than a single job was returned "; exit 1; fi - # uniquement pour obtenir un message d'etat - echoerr "Notice: JOB '${TARGET_JOB_NAME}' found - extracting id ..." + # on purpose to have a progress status message + echoerr "Notice: JOB definition for '${TARGET_JOB_NAME}' found - extracting id ..." - # format attendu : + # expected format: echo "$sData" | grep -oP -i "job id='\K.*?(?=')" } @@ -107,7 +115,7 @@ rdJob_GetIdFromName() { rdJob_IsRunning() { CURL_API_VERSION=14 sData=$( ${CURL_API_CMD}/${CURL_API_VERSION}/project/${TARGET_PROJECT_NAME}/executions/running 2>&1 ) - if [ $? -ne 0 ]; then echoerr "Error: rdJob_IsRunning - bad API query"; echoerr "$sData"; exit 1; fi + if [ $? -ne 0 ]; then echoerr "Error: rdJob_IsRunning - bad API query"; echoerr "API message: $sData"; exit 1; fi # recherche de l'id du job cible sData=$( echo "$sData" | grep "$TARGET_JOB_ID" | head -1 ) @@ -120,9 +128,9 @@ rdJob_IsRunning() { } rdJob_GetLastExecData() { - CURL_API_VERSION=1 + CURL_API_VERSION=11 sData=$( ${CURL_API_CMD}/${CURL_API_VERSION}/job/${TARGET_JOB_ID}/executions --data-urlencode max=1 2>&1 ) - if [ $? -ne 0 ]; then echoerr "Error: rdJob_GetLastExecData - bad API query"; echoerr "$sData"; exit 1; fi + if [ $? -ne 0 ]; then echoerr "Error: rdJob_GetLastExecData - bad API query"; echoerr "API message: $sData"; exit 1; fi echo "$sData" | grep -v '^#' return 0 # grep renvoie rc=1 s'il n'y a pas de donnees @@ -196,23 +204,14 @@ valueRet="" echo "$valueRet" } -# ---------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- echo "RUNDECK DEPENDENCIES WAIT_JOB MODULE" echo "Command line used : $0 $*" echo "" -if [ -z "${RD_TOKEN}" ]; then echoerr "Error: the environment variable RD_TOKEN wasn't found, check your $HOME/.profile file"; exit 1; fi -echo "Rundeck API Token found" - -# test d'acces a l'API via curl -sTemp=$( ${CURL_API_CMD}/1/projects 2>&1) -if [ $? -ne 0 ] || ! echo "$sTemp" | grep -i -q "projects count="; then echoerr "Error: cannot contact rundeck through the API"; echoerr "$sTemp"; exit 1; fi - - -# verification de la presence de parametres +# parameters access validation if [ $# -eq 0 ]; then usageSyntax; exit 1; fi -# traitement de la ligne de commande while [ $# -gt 0 ]; do arg="$1" @@ -301,28 +300,35 @@ while [ $# -gt 0 ]; do ;; esac - # argument suivant + # next argument [ $# -gt 0 ] && shift done -# verification valeurs recue +# Cmd line args: mandatory values if [ -z "$TARGET_PROJECT_NAME" ]; then echoerr "Error: the job's project name is required"; exit 1; fi if [ -z "$TARGET_GROUP_NAME" ]; then echoerr "Error: the job's group name is required"; exit 1; fi if [ -z "$TARGET_JOB_NAME" ]; then echoerr "Error: the job name is required"; exit 1; fi -# vérification des filtres de noeuds +# Cmd line args: node filters if [ "$DEP_NODE_MODE" != "global" ]; then - # un job lance avec un filtre aura la variable RD_JOB_FILTER=name: srv1,srv2,... + # a started job with an active filter will have RD_JOB_FILTER=name: srv1,srv2,... if [ ! -z "$RD_JOB_FILTER" ] && [[ ${RD_JOB_FILTER} == "name:"* ]]; then DEP_JOB_NODE_LST=$( echo "$RD_JOB_FILTER" | cut -d ':' -f2 | tr -d '[:blank:]' | tr ',' '|' ) fi fi -# calcul des limites horaires du plan +if [ ! -s "$PLUGIN_CONF_FILE" ] && [ -z "${RD_TOKEN}" ]; then echoerr "Error: couldn't find the API token in the $PLUGIN_CONF_FILE file or in the environment"; exit 1; fi +echo "Rundeck API Token found" + +# API access validation +rdProjects_VerifyAccess || exit 1 + + +# Workflow start and end time calculation ------------------------------------- dTodayLimit=$( date "+%Y-%m-%d ${REF_FLOW_DAILY_START}" ) dTodayLimit=$( date -d "${dTodayLimit}" "+%s" ) -# Le plan en cours se termine depuis j-1 +# Current workflow is still in the day-1 => today boundary if [ $TIME_CURRENT -lt $dTodayLimit ]; then TIME_FLOW_DAILY_START=$( date --date='-1 day' "+%Y-%m-%d ${REF_FLOW_DAILY_START}" ) TIME_FLOW_DAILY_START=$( date -d "${TIME_FLOW_DAILY_START}" "+%s" ) @@ -330,7 +336,7 @@ if [ $TIME_CURRENT -lt $dTodayLimit ]; then TIME_FLOW_DAILY_END=$( date "+%Y-%m-%d ${REF_FLOW_DAILY_END}" ) TIME_FLOW_DAILY_END=$( date -d "${TIME_FLOW_DAILY_END}" "+%s" ) -# le plan est celui qui commence jusqu'à j+1 +# Current workflow was started today and will end at day+1 else TIME_FLOW_DAILY_START=$( date "+%Y-%m-%d ${REF_FLOW_DAILY_START}" ) TIME_FLOW_DAILY_START=$( date -d "${TIME_FLOW_DAILY_START}" "+%s" ) @@ -339,7 +345,8 @@ else TIME_FLOW_DAILY_END=$( date -d "${TIME_FLOW_DAILY_END}" "+%s" ) fi -# information banner + +# information banner ---------------------------------------------------------- echo "Current PID:$$" echo "----------------------------------------------" echo "FLOW START: $( date -d @$TIME_FLOW_DAILY_START --rfc-2822 )" @@ -355,16 +362,19 @@ echo "Node filter mode: ${DEP_NODE_MODE}" echo "----------------------------------------------" echo "" -# verification du respect de la dependance + +# check if the dependency was set to skip if [ ! -z "$DEPENDENCY_IGNORE" ]; then echo "DEPENDENCY_IGNORE variable or -bypass parameter is set => the script will exit immediately => success" exit 0 fi -# temporisation +# Forced wait to allow other started jobs to set in sleep ${STARTUP_DELAY_SEC}s -# recherche de l'id du job cible + +# Target job waiting sequence ------------------------------------------------- +# lookup for the target job ID TARGET_JOB_ID=$( rdJob_GetIdFromName ) || exit 1 TARGET_JOB_SKIPFILE=${REF_TMP_DIR}/deps_skip.$$.${TARGET_JOB_ID} @@ -374,11 +384,12 @@ echo "" echo "Waiting loop started (for $( date -u -d @${DEP_WAIT_TIMEOUT} +'%Hh%Mm%Ss' ))..." echo "To exit this loop, run this shell command : sudo -u ${USER} touch ${TARGET_JOB_SKIPFILE}" echo "" -# traitement du job jusqu'a la fin du timeout + +# Wait loop nCount=0 while [ $nCount -lt ${DEP_WAIT_TIMEOUT} ]; do - # verification du fichier skip + # search the manual skipfile presence if [ ! -z "$TARGET_JOB_SKIPFILE" ] && [ -f "$TARGET_JOB_SKIPFILE" ]; then echo "Skip file $TARGET_JOB_SKIPFILE present => success" rm "$TARGET_JOB_SKIPFILE" @@ -390,11 +401,11 @@ while [ $nCount -lt ${DEP_WAIT_TIMEOUT} ]; do fi fi - # Etat d'execution du job + # Job execution status TARGET_JOB_ISRUNNING=$( rdJob_IsRunning ) || exit 1 if [ "$TARGET_JOB_ISRUNNING" == "0" ]; then - # recuperation des donnees de la derniere execution, si disponibles + # retrieve last execution information, if available TARGET_JOB_LASTEXEC_DATA=$( rdJob_GetLastExecData ) || exit 1 if [ ! -z "$TARGET_JOB_LASTEXEC_DATA" ]; then TARGET_JOB_LASTEXEC_STATUS=$( rdJob_GetLastExecValue -status ) || exit 1 @@ -404,10 +415,9 @@ while [ $nCount -lt ${DEP_WAIT_TIMEOUT} ]; do SEARCH_STATUS="" - # verif si la date de demarrage du job cible correspond au plan du jour + # validate the job was started in the today's workflow time range if [ $TARGET_JOB_LASTEXEC_TIME_START -ge $TIME_FLOW_DAILY_START ]; then - - # le job correspond au plan en cours, verification de son etat + case "$TARGET_JOB_EXPECTED_STATUS" in success|ok) SEARCH_STATUS="$VAL_OK" @@ -420,29 +430,31 @@ while [ $nCount -lt ${DEP_WAIT_TIMEOUT} ]; do ;; esac - # Validation de l'etat du job, si ce n'est pas celui attendu => retour en attente + # Compare the job completion status to the expected state if echo "$SEARCH_STATUS" | grep -q "$TARGET_JOB_LASTEXEC_STATUS"; then - # mode automatique si un filtre de node a ete utilise au lancement + # behavior change depending of the node comparison mode + # adapt mode => will look for having the same node both jobs were executed on if [ "$DEP_NODE_MODE" == "adapt" ] && [ ! -z "$DEP_JOB_NODE_LST" ]; then if echo "$TARGET_JOB_STATUS_NODES" | egrep -i -q "^(${DEP_JOB_NODE_LST})$"; then TARGET_JOB_DEP_RESOLVED=1 echo "Node filter list - node found in : "$TARGET_JOB_STATUS_NODES fi - # mode regex, si un filtre regex a ete indique + # regex mode elif [ "$DEP_NODE_MODE" == "regex" ] && [ ! -z "$DEP_JOB_NODE_REGEX" ]; then if echo "$TARGET_JOB_STATUS_NODES" | egrep -i -q "${DEP_JOB_NODE_REGEX}"; then TARGET_JOB_DEP_RESOLVED=1 echo "Node filter regex - match found in : "$TARGET_JOB_STATUS_NODES fi - # mode global ou aucun filtre, tout job dans l'etat attendu est valide + # global mode or no filter: any job in the expected state is valid, without regards for the execution node else TARGET_JOB_DEP_RESOLVED=1 fi - # verification de la resolution de la dependance et sortie + + # exit if the deps resolution state is valid if [ $TARGET_JOB_DEP_RESOLVED -eq 1 ]; then echo "Valid execution found : $( date --iso-8601=seconds -d @$TARGET_JOB_LASTEXEC_TIME_END ) => status: $TARGET_JOB_LASTEXEC_STATUS" break @@ -450,28 +462,24 @@ while [ $nCount -lt ${DEP_WAIT_TIMEOUT} ]; do fi - # plan du job trouve different + # different workflow found else - # verification du type de dependance + # exit if the dependency wasn't mandatory if [ $TARGET_JOB_MANDATORY -eq 0 ]; then echo "No job execution for the current flow AND optional dependency => success" TARGET_JOB_DEP_RESOLVED=1 break fi - - # attente fi # no data else - # dependance optionnelle => job absent + # job is missing and the dependency is not mandatory if [ $TARGET_JOB_MANDATORY -eq 0 ]; then echo "No job execution data found AND optional dependency => success" TARGET_JOB_DEP_RESOLVED=1 break fi - - # toujours pas de job => attente fi fi diff --git a/plugin.yaml b/plugin.yaml index 1095ed7..3e27b54 100644 --- a/plugin.yaml +++ b/plugin.yaml @@ -1,5 +1,5 @@ name: dependencies -version: 1.0.9 +version: 1.1.0 rundeckPluginVersion: 1.2 author: HAL date: Wed Aug 17 19:37:26 CEST 2017 @@ -66,7 +66,7 @@ providers: - type: Select name: node_filtering title: Node filtering - description: "Option to manage launches when the 'Change the target nodes' option is used instead of the default setting. \nAdapt tries to reuse the filtered node list when a job is manually launched using specific nodes (default). \nGlobal will search for the first job with the valid status, without regard for the execution node. \nRegex will use the provided filter in the execution node list. When used, add in 'Other Params' this extra parameter : -nodefilter_regex 'regex mask' " + description: "Option to manage launches when the 'Change the target nodes' option is used instead of the default setting. \nAdapt tries to reuse the information from the filtered node list to target the same nodes (default). \nGlobal will search for the first job with a valid status, without regard for the execution node. \nRegex will use the provided filter in the execution node list. When used, add in 'Other Params' this extra parameter : -nodefilter_regex 'regex mask' " values: "adapt,global,regex" default: "adapt" required: false