forked from gluster/glusterfs-patch-acceptance-tests
-
Notifications
You must be signed in to change notification settings - Fork 0
/
regression.sh
executable file
·199 lines (172 loc) · 7.02 KB
/
regression.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#!/bin/bash
# Set the locations we'll be using
BASE="/build/install"
ARCHIVE_BASE="/archives"
ARCHIVED_BUILDS="archived_builds"
UNIQUE_ID="${JOB_NAME}-${BUILD_ID}"
SERVER=$(hostname)
LIBLIST=${BASE}/cores/liblist.txt
# Create the folders if they don't exist
mkdir -p ${BASE}
mkdir -p ${ARCHIVE_BASE}/${ARCHIVED_BUILDS}
# Clean up old archives
find ${ARCHIVE_BASE} -name '*.tgz' -mtime +15 -delete -type f
# Get the list of shared libraries that the core file uses
# first argument is path to the core file
getliblistfromcore() {
# Cleanup the tmp file for gdb output
rm -f ${BASE}/cores/gdbout.txt
# execute the gdb command to get the share library raw output to file
gdb -c "$1" -q -ex "set pagination off" -ex "info sharedlibrary" -ex q 2>/dev/null > ${BASE}/cores/gdbout.txt
# For each line start extracting the sharelibrary paths once we see
# the text line "Shared Object Path" in the raw gdb output. Append this
# in the an output file.
set +x
local STARTPR=0
while IFS=' ' read -r f1 f2 f3 f4 f5 f6 f7 fdiscard; do
if [[ $STARTPR == 1 && "$f4" != "" ]]; then
printf "%s\n" $f4 >> ${LIBLIST}
fi
if [[ "$f5" == "Shared" && "$f6" == "Object" && "$f7" == "Library" ]]; then
STARTPR=1
fi
done < "${BASE}/cores/gdbout.txt"
set -x
# Cleanup the tmp file for gdb output
rm -f ${BASE}/cores/gdbout.txt
}
# Determine the python version used by the installed Gluster
PY_NAME=($(ls "${BASE}/lib/" | grep "python"))
if [[ ${#PY_NAME[@]} -ne 1 ]]; then
echo "Unable to determine python location" >&2
exit 1
fi
# Point to the build we're testing
export PATH="${BASE}/sbin${PATH:+:${PATH}}"
export PYTHONPATH="${BASE}/lib/${PY_NAME[0]}/site-packages${PYTHONPATH:+:${PYTHONPATH}}"
export LIBRARY_PATH="${BASE}/lib${LIBRARY_PATH:+:${LIBRARY_PATH}}"
export LD_LIBRARY_PATH="${BASE}/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"
# save core_patterns
case $(uname -s) in
'Linux')
old_core_pattern=$(/sbin/sysctl -n kernel.core_pattern)
/sbin/sysctl -w kernel.core_pattern="/%e-%p.core"
;;
'NetBSD')
old_core_pattern=$(/sbin/sysctl -n kern.defcorename)
/sbin/sysctl -w kern.defcorename="/%n-%p.core"
;;
esac
# Count the number of core files in /
core_count=$(ls -l /*.core|wc -l);
old_cores=$(ls /*.core);
# Run the regression tests
if [ -x ./run-tests.sh ]; then
# If we're in the root of a GlusterFS source repo, use its tests
./run-tests.sh "$@"
RET=$?
elif [ -x ${BASE}/share/glusterfs/run-tests.sh ]; then
# Otherwise, use the tests in the installed location
${BASE}/share/glusterfs/run-tests.sh "$@"
RET=$?
fi
# If there are new core files in /, archive this build for later analysis
cur_count=$(ls -l /*.core 2>/dev/null|wc -l);
cur_cores=$(ls /*.core 2>/dev/null);
if [ ${cur_count} != ${core_count} ]; then
declare -a corefiles
for word1 in ${cur_cores}; do
for word2 in ${old_cores}; do
if [ ${word1} == ${word2} ]; then
x=1
break;
fi
done
if [[ ${x} -eq 0 ]]; then
corefiles=("${corefiles[@]}" "${word1}")
fi
x=0
done
core_count=$(echo "${corefiles[@]}" | wc -w)
# Dump backtrace of generated corefiles
if [ ${core_count} -gt 0 ]; then
for corefile in "${corefiles[@]}"
do
set -x
gdb -ex "core-file ${corefile}" -ex \
'set pagination off' -ex 'info proc exe' -ex q \
2>/dev/null
executable_name=$(gdb -ex "core-file ${corefile}" -ex \
'set pagination off' -ex 'info proc exe' -ex q \
2>/dev/null | tail -1 | cut -d "'" -f2 | cut -d " " -f1)
executable_path=$(which "${executable_name}")
set +x
echo ""
echo "========================================================="
echo " Start printing backtrace"
echo " program name : ${executable_path}"
echo " corefile : ${corefile}"
echo "========================================================="
gdb -nx --batch --quiet -ex "thread apply all bt full" \
-ex "quit" --exec=${executable_path} --core=${corefile}
echo "========================================================="
echo " Finish backtrace"
echo " program name : ${executable_path}"
echo " corefile : ${corefile}"
echo "========================================================="
echo ""
done
fi
# Archive the build and any cores
mkdir -p ${BASE}/cores
mv /*.core ${BASE}/cores
filename=${ARCHIVED_BUILDS}/build-install-${UNIQUE_ID}.tar
# Remove temporary files generated to stash libraries from cores
rm -f ${LIBLIST}
rm -f ${LIBLIST}.tmp
#Generate library list from all cores
CORELIST="$(ls ${BASE}/cores/*.core)"
for corefile in $CORELIST; do
getliblistfromcore $corefile
done
# Get rid of duplicates
sort ${LIBLIST} | uniq > ${LIBLIST}.tmp 2>/dev/null
# Get rid of BASE dir libraries as they are already packaged
cat ${LIBLIST}.tmp | grep -v "${BASE}" > ${LIBLIST}
# tar the dependent libraries and other stuff for analysis
tar -cf ${ARCHIVE_BASE}/${filename} ${BASE}/{sbin,bin,lib,libexec,cores}
# Append to the tar ball the system libraries that were part of the cores
# 'h' option is so that the links are followed and actual content is in the tar
tar -rhf ${ARCHIVE_BASE}/${filename} -T ${LIBLIST}
bzip2 ${ARCHIVE_BASE}/${filename}
# Cleanup the temporary files
rm -f ${LIBLIST}
rm -f ${LIBLIST}.tmp
# Delete all files that are larger than 1G including the currently archived
# build if it's too large
find ${ARCHIVE_BASE} -size +1G -delete -type f
if [[ ${SERVER} == *"aws"* ]]; then
scp -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" -i "$LOG_KEY" /archives/archived_builds/build-install-${UNIQUE_ID}.tar.bz2 "_logs-collector@logs.aws.gluster.org:/var/www/glusterfs-logs/$JOB_NAME-$BUILD_ID.bz2" || true
echo "Cores and builds archived in https://logs.aws.gluster.org/$JOB_NAME-$BUILD_ID.bz2"
else
echo "Cores and build archived in http://${SERVER}/${filename}.bz2"
fi
echo "Open core using the following command to get a proper stack"
echo "Example: From root of extracted tarball"
echo "\t\tgdb -ex 'set sysroot ./' -ex 'core-file ./build/install/cores/xxx.core' <target, say ./build/install/sbin/glusterd>"
# Forcefully fail the regression run if it has not already failed.
RET=1
fi
if [ ${RET} -ne 0 ]; then
tar -czf $WORKSPACE/glusterfs-logs.tgz /var/log/glusterfs /var/log/messages*;
fi
# reset core_patterns
case $(uname -s) in
'Linux')
/sbin/sysctl -w kernel.core_pattern="${old_core_pattern}"
;;
'NetBSD')
/sbin/sysctl -w kern.defcorename="${old_core_pattern}"
;;
esac
exit ${RET};