diff --git a/.gitignore b/.gitignore index 13bbe5b9..14859fc5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .idea/ +.project diff --git a/os-datavirt/module.yaml b/os-datavirt/module.yaml index 9576349f..a16a2dda 100644 --- a/os-datavirt/module.yaml +++ b/os-datavirt/module.yaml @@ -6,6 +6,7 @@ description: Legacy os-datavirt script package. modules: install: - name: os-eap-launch + - name: os-partition - name: os-eap-migration execute: diff --git a/os-eap-migration/added/launch/openshift-migrate-common.sh b/os-eap-migration/added/launch/openshift-migrate-common.sh index d0867df8..63566bbf 100644 --- a/os-eap-migration/added/launch/openshift-migrate-common.sh +++ b/os-eap-migration/added/launch/openshift-migrate-common.sh @@ -7,9 +7,11 @@ source /opt/partition/partitionPV.sh function runMigration() { local instanceDir=$1 + + # if count provided the node_name should be constructed local count=$2 + [ "x$count" != "x" ] && export NODE_NAME="${NODE_NAME:-node}-${count}" - export NODE_NAME="${NODE_NAME:-node}-${count}" cp -f ${STANDALONE_XML_COPY} ${STANDALONE_XML} source $JBOSS_HOME/bin/launch/configure.sh @@ -29,9 +31,11 @@ function runMigration() { local success=false local message="Finished, migration pod has been terminated" ${JBOSS_HOME}/bin/readinessProbe.sh + local probeStatus=$? - if [ $? -eq 0 ] ; then + if [ $probeStatus -eq 0 ] ; then echo "$(date): Server started, checking for transactions" + local startTime=$(date +'%s') local endTime=$((startTime + ${RECOVERY_TIMEOUT} + 1)) @@ -43,13 +47,23 @@ function runMigration() { local recoveryClass="com.arjuna.ats.arjuna.tools.RecoveryMonitor" recoveryJar=$(find "${JBOSS_HOME}" -name \*.jar | xargs grep -l "${recoveryClass}") if [ -n "${recoveryJar}" ] ; then - echo "$(date): Executing synchronous recovery scan" + echo "$(date): Executing synchronous recovery scan for a first time" java -cp "${recoveryJar}" "${recoveryClass}" -host "${recoveryHost}" -port "${recoveryPort}" echo "$(date): Executing synchronous recovery scan for a second time" java -cp "${recoveryJar}" "${recoveryClass}" -host "${recoveryHost}" -port "${recoveryPort}" + echo "$(date): Synchronous recovery scans finished for the first and the second time" fi fi + fi + + # -- checking if the pod log is clean from errors (only if function of the particular name exists, provided by the os-partition module) + if [ $probeStatus -eq 0 ] && [ "$(type -t probePodLogForRecoveryErrors)" = 'function' ]; then + probePodLogForRecoveryErrors + probeStatus=$? + [ $probeStatus -ne 0 ] && echo "The migration container log contains periodic recovery errors, check it for details." + fi + if [ $probeStatus -eq 0 ] ; then while [ $(date +'%s') -lt $endTime -a ! -f "${terminatingFile}" ] ; do run_cli_cmd '/subsystem=transactions/log-store=log-store/:probe' > /dev/null 2>&1 local transactions="$(run_cli_cmd 'ls /subsystem=transactions/log-store=log-store/transactions')" @@ -66,7 +80,7 @@ function runMigration() { if [ "${success}" = "true" ] ; then message="Finished, recovery terminated successfully" else - message="Finished, Recovery DID NOT complete, check log for details. Recovery will be reattempted." + message="Finished, Recovery DID NOT complete, check log for details. Recovery will be reattempted." fi fi diff --git a/os-eap-migration/module.yaml b/os-eap-migration/module.yaml index 93eb7d53..e60f9440 100644 --- a/os-eap-migration/module.yaml +++ b/os-eap-migration/module.yaml @@ -3,10 +3,6 @@ name: os-eap-migration version: '1.0' description: EAP common migration scripts -modules: - install: - - name: os-partition - envs: - name: "RECOVERY_TIMEOUT" example: "360" diff --git a/os-eap-probes/added/readinessProbe.sh b/os-eap-probes/added/readinessProbe.sh index 694b0166..679b2072 100644 --- a/os-eap-probes/added/readinessProbe.sh +++ b/os-eap-probes/added/readinessProbe.sh @@ -8,7 +8,7 @@ LOG=/tmp/readiness-log COUNT=30 SLEEP=5 -DEBUG=false +DEBUG=${SCRIPT_DEBUG:-false} PROBE_IMPL=probe.eap.dmr.EapProbe if [ $# -gt 0 ] ; then diff --git a/os-eap64-launch/added/openshift-launch.sh b/os-eap64-launch/added/openshift-launch.sh index 6268ad23..430590fd 100755 --- a/os-eap64-launch/added/openshift-launch.sh +++ b/os-eap64-launch/added/openshift-launch.sh @@ -7,9 +7,6 @@ source ${JBOSS_HOME}/bin/launch/openshift-common.sh function runServer() { local instanceDir=$1 - local count=$2 - - export NODE_NAME="${NODE_NAME:-node}-${count}" source $JBOSS_HOME/bin/launch/configure.sh diff --git a/os-eap64-launch/module.yaml b/os-eap64-launch/module.yaml index c6217504..b40b850b 100644 --- a/os-eap64-launch/module.yaml +++ b/os-eap64-launch/module.yaml @@ -7,6 +7,7 @@ modules: install: - name: os-eap-launch - name: os-eap-node-name + - name: os-partition-txnrecovery - name: os-eap-migration execute: diff --git a/os-eap7-launch/added/openshift-launch.sh b/os-eap7-launch/added/openshift-launch.sh index 941c235b..cb2f8b34 100755 --- a/os-eap7-launch/added/openshift-launch.sh +++ b/os-eap7-launch/added/openshift-launch.sh @@ -7,15 +7,12 @@ source $JBOSS_HOME/bin/launch/logging.sh # TERM signal handler function clean_shutdown() { log_error "*** JBossAS wrapper process ($$) received TERM signal ***" - $JBOSS_HOME/bin/jboss-cli.sh -c ":shutdown(timeout=60)" + $JBOSS_HOME/bin/jboss-cli.sh -c "shutdown --timeout=60" wait $! } function runServer() { local instanceDir=$1 - local count=$2 - - export NODE_NAME="${NODE_NAME:-node}-${count}" source $JBOSS_HOME/bin/launch/configure.sh diff --git a/os-eap7-launch/module.yaml b/os-eap7-launch/module.yaml index 63bd076a..0b93c66e 100644 --- a/os-eap7-launch/module.yaml +++ b/os-eap7-launch/module.yaml @@ -7,6 +7,7 @@ modules: install: - name: os-eap-launch - name: os-eap-node-name + - name: os-partition-txnrecovery - name: os-eap-migration execute: diff --git a/os-jdg-launch/module.yaml b/os-jdg-launch/module.yaml index d023c7df..d5123002 100644 --- a/os-jdg-launch/module.yaml +++ b/os-jdg-launch/module.yaml @@ -6,6 +6,7 @@ description: Legacy os-jdg-launch script package. modules: install: - name: os-eap-launch + - name: os-partition - name: os-eap-migration execute: diff --git a/os-jdg7-launch/module.yaml b/os-jdg7-launch/module.yaml index e6c2cf0a..ae223225 100644 --- a/os-jdg7-launch/module.yaml +++ b/os-jdg7-launch/module.yaml @@ -6,6 +6,7 @@ description: Legacy os-jdg7-launch script package. modules: install: - name: os-eap-launch + - name: os-partition - name: os-eap-migration execute: diff --git a/os-partition-txnrecovery/added/partitionPV.sh b/os-partition-txnrecovery/added/partitionPV.sh new file mode 100644 index 00000000..02078498 --- /dev/null +++ b/os-partition-txnrecovery/added/partitionPV.sh @@ -0,0 +1,256 @@ +#!/bin/sh + +[ "x${SCRIPT_DEBUG}" = "xtrue" ] && DEBUG_QUERY_API_PARAM="-l debug" + +# parameters +# - needle to search in array +# - array passed as: "${ARRAY_VAR[@]}" +function arrContains() { + local element match="$1" + shift + for element; do + [[ "$element" == "$match" ]] && return 0 + done + return 1 +} + +function init_pod_name() { + # when POD_NAME is non-zero length using that given name + + # docker sets up container_uuid + [ -z "${POD_NAME}" ] && POD_NAME="${container_uuid}" + # openshift sets up the node id as host name + [ -z "${POD_NAME}" ] && POD_NAME="${HOSTNAME}" + + # having set the POD_NAME is crucial as the processing depends on unique + # pod name being used as identifier for migration + if [ -z "${POD_NAME}" ]; then + >&2 echo "Cannot proceed further as failed to get unique POD_NAME as identifier of the server to be started" + exit 1 + fi +} + +# used to redefine starting jboss.node.name as identifier of jboss container +# need to be restricted to 23 characters (CLOUD-427) +function truncate_jboss_node_name() { + local NODE_NAME_TRUNCATED="$1" + if [ ${#1} -gt 23 ]; then + NODE_NAME_TRUNCATED=${1: -23} + fi + NODE_NAME_TRUNCATED=${NODE_NAME_TRUNCATED##-} # do not start the identifier with '-', it makes bash issues + echo "${NODE_NAME_TRUNCATED}" +} + +# parameters +# - base directory +function partitionPV() { + local podsDir="$1" + local applicationPodDir + + mkdir -p "${podsDir}" + + init_pod_name + local applicationPodDir="${podsDir}/${POD_NAME}" + + local waitCounter=0 + # 2) while any file matching, sleep + while true; do + local isRecoveryInProgress=false + # is there an existing RECOVERY descriptor that means a recovery is in progress + find "${podsDir}" -maxdepth 1 -type f -name "${POD_NAME}-RECOVERY-*" 2>/dev/null | grep -q . + [ $? -eq 0 ] && isRecoveryInProgress=true + + # we are free to start the app container + if ! $isRecoveryInProgress; then + break + fi + + if $isRecoveryInProgress; then + echo "Waiting to start pod ${POD_NAME} as recovery process '$(echo ${podsDir}/${POD_NAME}-RECOVERY-*)' is currently cleaning data directory." + fi + + sleep 1 + echo "`date`: waiting for recovery process to clean the environment for the pod to start" + done + + # 3) create /pods/<applicationPodName> + SERVER_DATA_DIR="${applicationPodDir}/serverData" + mkdir -p "${SERVER_DATA_DIR}" + + if [ ! -f "${SERVER_DATA_DIR}/../data_initialized" ]; then + init_data_dir ${SERVER_DATA_DIR} + touch "${SERVER_DATA_DIR}/../data_initialized" + fi + + # 4) launch server with node name as pod name (openshift-node-name.sh uses the node name value) + NODE_NAME=$(truncate_jboss_node_name "${POD_NAME}") runServer "${SERVER_DATA_DIR}" & + + PID=$! + + trap "echo Received TERM of pid ${PID} of pod name ${POD_NAME}; kill -TERM $PID" TERM + + wait $PID 2>/dev/null + STATUS=$? + trap - TERM + wait $PID 2>/dev/null + + echo "Server terminated with status $STATUS ($(kill -l $STATUS 2>/dev/null))" + + if [ "$STATUS" -eq 255 ] ; then + echo "Server returned 255, changing to 254" + STATUS=254 + fi + + exit $STATUS +} + +# parameters +# - base directory +# - migration pause between cycles +function migratePV() { + local podsDir="$1" + local applicationPodDir + MIGRATION_PAUSE="${2:-30}" + MIGRATED=false + + init_pod_name + local recoveryPodName="${POD_NAME}" + + while true ; do + + # 1) Periodically, for each /pods/<applicationPodName> + for applicationPodDir in "${podsDir}"/*; do + # check if the found file is type of directory, if not directory move to the next item + [ ! -d "$applicationPodDir" ] && continue + + # 1.a) create /pods/<applicationPodName>-RECOVERY-<recoveryPodName> + local applicationPodName="$(basename ${applicationPodDir})" + touch "${podsDir}/${applicationPodName}-RECOVERY-${recoveryPodName}" + sync + STATUS=42 # expecting there could be error on getting living pods + + # 1.a.i) if <applicationPodName> is not in the cluster + echo "examining existence of living pod for directory: '${applicationPodDir}'" + unset LIVING_PODS + LIVING_PODS=($($(dirname ${BASH_SOURCE[0]})/query.py -q pods_living -f list_space ${DEBUG_QUERY_API_PARAM})) + [ $? -ne 0 ] && echo "ERROR: Can't get list of living pods" && continue + # expecting the application pod of the same name was started/is living, it will manage recovery on its own + local IS_APPLICATION_POD_LIVING=true + if ! arrContains ${applicationPodName} "${LIVING_PODS[@]}"; then + + IS_APPLICATION_POD_LIVING=false + + ( + # 1.a.ii) run recovery until empty (including orphan checks and empty object store hierarchy deletion) + MIGRATION_POD_TIMESTAMP=$(getPodLogTimestamp) # investigating on current pod timestamp + SERVER_DATA_DIR="${applicationPodDir}/serverData" + NODE_NAME=$(truncate_jboss_node_name "${applicationPodName}") runMigration "${SERVER_DATA_DIR}" & + + PID=$! + + trap "echo Received TERM ; kill -TERM $PID" TERM + + wait $PID 2>/dev/null + STATUS=$? + trap - TERM + wait $PID 2>/dev/null + + echo "Migration terminated with status $STATUS ($(kill -l $STATUS))" + + if [ "$STATUS" -eq 255 ] ; then + echo "Server returned 255, changing to 254" + STATUS=254 + fi + exit $STATUS + ) & + + PID=$! + + trap "kill -TERM $PID" TERM + + wait $PID 2>/dev/null + STATUS=$? + trap - TERM + wait $PID 2>/dev/null + + if [ $STATUS -eq 0 ]; then + # 1.a.iii) Delete /pods/<applicationPodName> when recovery was succesful + echo "`date`: Migration succesfully finished for application directory ${applicationPodDir} thus removing it by recovery pod ${recoveryPodName}" + rm -rf "${applicationPodDir}" + fi + fi + + # 1.b.) Deleting the recovery marker + if [ $STATUS -eq 0 ] || [ $IS_APPLICATION_POD_LIVING ]; then + # STATUS is 0: we are free from in-doubt transactions + # IS_APPLICATION_POD_LIVING==true: there is a running pod of the same name, will do the recovery on his own, recovery pod won't manage it + rm -f "${podsDir}/${applicationPodName}-RECOVERY-${recoveryPodName}" + fi + + # 2) Periodically, for files /pods/<applicationPodName>-RECOVERY-<recoveryPodName>, for failed recovery pods + for recoveryPodFilePathToCheck in "${podsDir}/"*-RECOVERY-*; do + local recoveryPodFileToCheck="$(basename ${recoveryPodFilePathToCheck})" + local recoveryPodNameToCheck=${recoveryPodFileToCheck#*RECOVERY-} + + unset LIVING_PODS + LIVING_PODS=($($(dirname ${BASH_SOURCE[0]})/query.py -q pods_living -f list_space ${DEBUG_QUERY_API_PARAM})) + [ $? -ne 0 ] && echo "ERROR: Can't get list of living pods" && continue + + if ! arrContains ${recoveryPodNameToCheck} "${LIVING_PODS[@]}"; then + # recovery pod is dead, garbage collecting + rm -f "${recoveryPodFilePathToCheck}" + fi + done + + done + + echo "`date`: Finished Migration Check cycle, pausing for ${MIGRATION_PAUSE} seconds before resuming" + MIGRATION_POD_TIMESTAMP=$(getPodLogTimestamp) + sleep "${MIGRATION_PAUSE}" + done +} + +# parameters +# - pod name (optional) +function getPodLogTimestamp() { + init_pod_name + local podNameToProbe=${1:-$POD_NAME} + + local logOutput=$($(dirname ${BASH_SOURCE[0]})/query.py -q log --pod ${podNameToProbe} --tailline 1) + # only one, last line of the log, is returned, taking the start which is timestamp + echo $logOutput | sed 's/ .*$//' +} + +# parameters +# - since time (when the pod listing start at) +# - pod name (optional) +function probePodLogForRecoveryErrors() { + init_pod_name + local sinceTimestampParam='' + local sinceTimestamp=${1:-$MIGRATION_POD_TIMESTAMP} + [ "x$sinceTimestamp" != "x" ] && sinceTimestampParam="--sincetime ${sinceTimestamp}" + local podNameToProbe=${2:-$POD_NAME} + + local logOutput=$($(dirname ${BASH_SOURCE[0]})/query.py -q log --pod ${podNameToProbe} ${sinceTimestampParam}) + local probeStatus=$? + + if [ $probeStatus -ne 0 ]; then + echo "Cannot contact OpenShift API to get log for pod ${POD_NAME}" + return 1 + fi + + local isPeriodicRecoveryError=false + local patternToCheck="ERROR.*Periodic Recovery" + [ "x${SCRIPT_DEBUG}" = "xtrue" ] && set +x # even for debug it's too verbose to print this pattern checking + while read line; do + [[ $line =~ $patternToCheck ]] && isPeriodicRecoveryError=true && break + done <<< "$logOutput" + [ "x${SCRIPT_DEBUG}" = "xtrue" ] && set -x + + if $isPeriodicRecoveryError; then # ERROR string was found in the log output + echo "Pod '${POD_NAME}' started with periodic recovery errors: '$line'" + return 1 + fi + + return 0 +} diff --git a/os-partition-txnrecovery/added/query.py b/os-partition-txnrecovery/added/query.py new file mode 100644 index 00000000..a52606b9 --- /dev/null +++ b/os-partition-txnrecovery/added/query.py @@ -0,0 +1,169 @@ +#!/bin/python +""" +Copyright 2018 Red Hat, Inc. + +Red Hat licenses this file to you under the Apache License, version +2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. +""" + +import argparse +import json +import logging +import urllib2 + +from enum import Enum + + +class QueryType(Enum): + """ + Represents what could be queried. + PODS: list of pods + LOG: log from particular pod + """ + + PODS = 'pods' + PODS_LIVING = 'pods_living' + LOG = 'log' + + def __str__(self): + return self.value + +class OutputFormat(Enum): + """ + Represents output format of this script. + RAW: no formatting + LIST_SPACE: if possible values are delimited with space and returned + LIST_COMMA: comma separated list + """ + + RAW = "raw" + LIST_SPACE = "list_space" + LIST_COMMA = "list_comma" + + def __str__(self): + return self.value + + +class OpenShiftQuery(): + """ + Utility class to help query OpenShift api. Declares constant + to get token and uri of the query. Having methods doing the query etc. + """ + + API_URL = 'https://openshift.default.svc' + TOKEN_FILE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' + NAMESPACE_FILE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + CERT_FILE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' + STATUS_LIVING_PODS = ['Pending', 'Running', 'Unknown'] + + @staticmethod + def __readFile(fileToRead): + with open(fileToRead, 'r') as readingfile: + return readingfile.read().strip() + + @staticmethod + def getToken(): + return OpenShiftQuery.__readFile(OpenShiftQuery.TOKEN_FILE_PATH) + + @staticmethod + def getNameSpace(): + return OpenShiftQuery.__readFile(OpenShiftQuery.NAMESPACE_FILE_PATH) + + @staticmethod + def queryApi(urlSuffix): + request = urllib2.Request(OpenShiftQuery.API_URL + urlSuffix, + headers = {'Authorization': 'Bearer ' + OpenShiftQuery.getToken(), "Accept": 'application/json'}) + logger.debug('query for: "%s"', request.get_full_url()) + try: + return urllib2.urlopen(request, cafile = OpenShiftQuery.CERT_FILE_PATH).read() + except: + logger.critical('Cannot query OpenShift API for "%s"', request.get_full_url()) + raise + + + +def getPodsJsonData(): + jsonText = OpenShiftQuery.queryApi('/api/v1/namespaces/{}/pods'.format(OpenShiftQuery.getNameSpace())) + return json.loads(jsonText) + +def getPods(): + jsonPodsData = getPodsJsonData() + pods = [] + for pod in jsonPodsData["items"]: + logger.debug('query pod %s of status %s', pod["metadata"]["name"], pod["status"]["phase"]) + pods.append(pod["metadata"]["name"]) + return pods + +def getLivingPods(): + jsonPodsData = getPodsJsonData() + + pods = [] + for pod in jsonPodsData["items"]: + logger.debug('query pod %s of status %s', pod["metadata"]["name"], pod["status"]["phase"]) + if pod["status"]["phase"] in OpenShiftQuery.STATUS_LIVING_PODS: + pods.append(pod["metadata"]["name"]) + return pods + +def getLog(podName, sinceTime, tailLine): + sinceTimeParam = '' if sinceTime is None else '&sinceTime=' + sinceTime + tailLineParam = '' if tailLine is None else '&tailLines=' + tailLine + podLogLines = OpenShiftQuery.queryApi('/api/v1/namespaces/{}/pods/{}/log?timestamps=true{}{}' + .format(OpenShiftQuery.getNameSpace(), podName, sinceTimeParam, tailLineParam)) + return podLogLines + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description = "Queries OpenShift API, gathering the json and parsing it to get specific info from it") + parser.add_argument("-q", "--query", required = False, type = QueryType, default = QueryType.PODS, choices=list(QueryType), help = "Query type/what to query\n" + + "either printing log of a pod, or listing of all pods in the current namespace, or listing of living pods in the current namespace") + parser.add_argument("-f", "--format", required = False, type = OutputFormat, default = OutputFormat.RAW, choices=list(OutputFormat), help = "Output format") + parser.add_argument("--pod", required = False, type = str, default = None, help = "Pod name to work with") + parser.add_argument("--sincetime", required = False, type = str, default = None, + help = "what is time to log will be started to be shown from (relevant with '--query log')") + parser.add_argument("--tailline", required = False, type = str, default = None, + help = "how many lines to be printed from end of the log (relevant with '--query log')") + parser.add_argument("-l", "--loglevel", default="CRITICAL", help="Log level", + choices=["debug", "DEBUG", "info", "INFO", "warning", "WARNING", "error", "ERROR", "critical", "CRITICAL"]) + parser.add_argument("args", nargs = argparse.REMAINDER, help = "Arguments of the query (each query type has different)") + + args = parser.parse_args() + + # don't spam warnings (e.g. when not verifying ssl connections) + logging.captureWarnings(True) + logging.basicConfig(level = args.loglevel.upper()) + logger = logging.getLogger(__name__) + + logger.debug("Starting query openshift api with args: %s", args) + + if args.query == QueryType.PODS: + queryResult = getPods() + elif args.query == QueryType.PODS_LIVING: + queryResult = getLivingPods() + elif args.query == QueryType.LOG: + if args.pod is None: + logger.critical('query of type "--query log" requires one argument to be an existing pod name') + exit(1) + podName = args.pod + sinceTime = args.sincetime + tailLine = args.tailline + queryResult = getLog(podName, sinceTime, tailLine) + else: + logger.critical('No handler for query type %s', args.query) + exit(1) + + if args.format == OutputFormat.LIST_SPACE: + print ' '.join(queryResult) + elif args.format == OutputFormat.LIST_COMMA: + print ','.join(queryResult) + else: # RAW format + print queryResult, + + exit(0) diff --git a/os-partition-txnrecovery/configure.sh b/os-partition-txnrecovery/configure.sh new file mode 100755 index 00000000..7bbfb7f4 --- /dev/null +++ b/os-partition-txnrecovery/configure.sh @@ -0,0 +1,11 @@ +#!/bin/sh +set -e + +unset DEBUG_QUERY_API_PARAM + +SCRIPT_DIR=$(dirname $0) +ADDED_DIR=${SCRIPT_DIR}/added + +mkdir -p ${JBOSS_HOME}/bin/queryapi +cp -r ${ADDED_DIR}/queryapi/* ${JBOSS_HOME}/bin/queryapi +chmod -R ugo+x $JBOSS_HOME/bin/queryapi diff --git a/os-partition-txnrecovery/install_as_root.sh b/os-partition-txnrecovery/install_as_root.sh new file mode 100755 index 00000000..7fa743f8 --- /dev/null +++ b/os-partition-txnrecovery/install_as_root.sh @@ -0,0 +1,11 @@ +set -u +set -e + +SCRIPT_DIR=$(dirname $0) +ADDED_DIR=${SCRIPT_DIR}/added + +test -d /opt/partition || mkdir /opt/partition + +cp "$ADDED_DIR"/* /opt/partition/ + +chmod 755 /opt/partition/* diff --git a/os-partition-txnrecovery/module.yaml b/os-partition-txnrecovery/module.yaml new file mode 100644 index 00000000..0bd0a3bd --- /dev/null +++ b/os-partition-txnrecovery/module.yaml @@ -0,0 +1,12 @@ +schema_version: 1 +name: os-partition-txnrecovery +version: '1.0' +description: Lock-free os-partition script package. +execute: +- script: install_as_root.sh +packages: + repositories: + - jboss-os + install: + - python-enum34 + - python-requests diff --git a/os-sso71/module.yaml b/os-sso71/module.yaml index 458a59be..88928eb0 100644 --- a/os-sso71/module.yaml +++ b/os-sso71/module.yaml @@ -7,6 +7,7 @@ modules: install: - name: os-eap-launch - name: os-eap7-launch + - name: os-partition - name: os-eap-migration execute: diff --git a/os-sso72/module.yaml b/os-sso72/module.yaml index 94bbf5c7..bd36f63d 100644 --- a/os-sso72/module.yaml +++ b/os-sso72/module.yaml @@ -7,6 +7,7 @@ modules: install: - name: os-eap-launch - name: os-eap7-launch + - name: os-partition - name: os-eap-migration packages: diff --git a/tests/features/datagrid/datagrid_split.feature b/tests/features/datagrid/datagrid_split.feature index 6148b55c..ea2c1b5c 100644 --- a/tests/features/datagrid/datagrid_split.feature +++ b/tests/features/datagrid/datagrid_split.feature @@ -4,7 +4,7 @@ Feature: Openshift DataGrid SPLIT tests Scenario: Ensure split doesn't happen with regular configuration When container is ready Then container log should match regex .*Data Grid.*started.* - And available container log should not contain Attempting to obtain lock for directory: + And available container log should contain jboss.server.data.dir = /opt/datagrid/standalone/data @jboss-datagrid-6 @jboss-datagrid-7 Scenario: Ensure split happens with SPLIT_DATA @@ -12,7 +12,7 @@ Feature: Openshift DataGrid SPLIT tests | variable | value | | SPLIT_DATA | TRUE | Then container log should match regex .*Data Grid.*started.* - And available container log should contain Attempting to obtain lock for directory: + And available container log should contain jboss.server.data.dir = /opt/datagrid/standalone/partitioned_data/ @jboss-datagrid-7 Scenario: Ensure split happens with DATAGRID_SPLIT @@ -20,4 +20,4 @@ Feature: Openshift DataGrid SPLIT tests | variable | value | | DATAGRID_SPLIT | TRUE | Then container log should match regex .*Data Grid.*started.* - And available container log should contain Attempting to obtain lock for directory: + And available container log should contain jboss.server.data.dir = /opt/datagrid/standalone/partitioned_data/