-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathsubmit_tests.py
executable file
·344 lines (315 loc) · 15.5 KB
/
submit_tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
#!/usr/bin/env python
#
###################################################################################
# Copyright (c) 2009, Los Alamos National Security, LLC All rights reserved.
# Copyright 2009. Los Alamos National Security, LLC. This software was produced
# under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National
# Laboratory (LANL), which is operated by Los Alamos National Security, LLC for
# the U.S. Department of Energy. The U.S. Government has rights to use,
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
# ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
# modified to produce derivative works, such modified software should be
# clearly marked, so as not to confuse it with the version available from
# LANL.
#
# Additionally, redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of Los Alamos National Security, LLC, Los Alamos National
# Laboratory, LANL, the U.S. Government, nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
###################################################################################
#
import os,shlex,sys,subprocess,re,time,shutil,pickle,imp
from optparse import OptionParser,OptionGroup
# Parse command line arguments
def parse_args(argv, num_required, num_test_types):
"""Parse the command line arguments.
Requires two input parameters: the number of required command line
arguments and the number of test types that are possible to run. The
test types passed in the command line option will be used to figure out
which tests in the control file to run.
"""
usage = "\n %prog [options]"
description = ("This script will submit tests within the tests directory.")
parser = OptionParser(usage=usage, description=description)
parser.set_defaults(types="1,2", basedir=".", control_file=None,
sub_id_file=None, dict_file=None, lockfile=None)
group = OptionGroup(parser, "Required")
group.add_option("-c", "--control", dest="control_file", help="Specify "
"which file to use in the tests directory to obtain a "
"list of tests to run. Each line should have a number "
"to specify the test's type and then a test name that "
"corresponds to a directory in the tests directory "
"which contains the test.", metavar="FILE")
group.add_option("-i", "--idfile", dest="sub_id_file", help="Specify "
"the file to save job ids to. This file can be used "
"to check the status of running jobs.", metavar="FILE")
group.add_option("-d", "--dictfile", dest="dict_file", help="Specify "
"the file to save the python dictionary that keeps "
"track of submitted tests. This file can be used "
"to supply a list of tests submitted by this script. "
"For instance, that list could be used to generate a "
"list of tests that need their results checked.",
metavar="FILE")
parser.add_option_group(group)
group = OptionGroup(parser, "Optional")
group.add_option("-t", "--types", dest="types", help="Specify which "
"types of tests to run. The types are 1 (fuse serial "
"tests), 2 (fuse parallel tests), and 3 (plfs adio "
"tests). LIST can be a comma-separated list of any of "
"these digits:1 or 1,2 or 1,3. Default is %default.",
metavar="LIST")
group.add_option("-b", "--basedir", dest="basedir", help="Specify the base "
"regression directory to be DIR. Only necessary when "
"this script is run as part of a cron job.",
metavar="DIR")
parser.add_option_group(group)
(options, args) = parser.parse_args(argv)
if len(args) < num_required:
parser.error("Required argument not provided. Use -h or --help for help.")
elif len(args) > num_required:
parser.error("Unknown extra arguments: " + str(args[1:])
+ ". Use -h or --help for help.")
if options.control_file == None:
parser.error("Required -c or --control not specified. Use -h or "
"--help for help.")
if options.sub_id_file == None:
parser.error("Required -i or --idfile not specified. Use -h or "
"--help for help.")
if options.dict_file == None:
parser.error("Required -d or --dictfile not specified. Use -h or "
"--help for help.")
# Get the types of tests to run into a list.
types = [int(x) for x in options.types.split(',')]
# Keep track in a table what types of tests are to be run as well
# as check the types
types_table=[]
# 0 is not a valid test type, but it will be included so that it is easy
# to find test types just by using the type of test as an index.
for i in range(0,num_test_types+1):
types_table.append(False)
for i in types:
if i < 1 or i > num_test_types:
parser.error("Invalid test type " + str(i)
+ ". Use -h or --help for help.")
types_table[i] = True
return options, args, types_table
def submit_tests(options, types_table):
"""Run tests specified in the control file in the tests directory.
Return values:
Successfully submitted at least one job flag and dictionary of jobs submitted.
0, {non-empty} - At least one test successfully submitted.
1, {} - Problem working with files related to submitting tests.
1, {non-empty} - Problems with submitting tests. Not one test fully submitted.
2, {non-empty} - At least one test successfully submitted, but there were lines
in the test list file that had problems.
"""
test_info = {}
# Test to make sure the id file exits. If it doesn't, let the user know
# and create it. This allows this script to be run outside of the regression
# scripts by itself.
if os.path.isfile(options.sub_id_file):
print ("Using " + str(options.sub_id_file) + " to store "
+ "submitted job ids.")
else:
print ("Id file " + str(options.sub_id_file) + " does not exist. "
+ "Creating it...")
os.system("touch " + str(options.sub_id_file))
print "Opening id file " + str(options.sub_id_file)
try:
f_sub = open(options.sub_id_file, 'a')
except IOError:
print ("Error opening submit id file " + str(test_dir)
+ "/" + str(options.sub_id_file) + ". Exiting...")
return -1, {}
print "Successfully opened " + str(options.sub_id_file)
test_dir = reg_base_dir + "/tests"
print "Entering " + str(test_dir)
try:
os.chdir(test_dir)
except OSError:
print "Error: " + str(test_dir) + " does not exist. Exiting..."
return 1, {}
# Open the control file (will have a list of tests to run and their types)
print "Opening control file " + str(options.control_file)
try:
f_cont = open(options.control_file, 'r')
except IOError:
print ("Error opening control file " + str(test_dir)
+ "/" + str(options.control_file) + ". Exiting...")
return 1, {}
print "Successfully opened " + str(options.control_file)
line_num = 0 # Keep track of the line we're on in the control file
ids = [] # Keep track of the id of the last job submitted
loaded = False # Keep track if import or reload statement is needed.
last_id = -1 # Keep track of the last job successfully submitted.
# Flag when at least one test reports successfully submitted. This will
# be the return value: 0 for at least one submitted, 1 for none submitted.
succ_submitted = 1
# Flag to tell whether there was a problem with one of the lines in the
# test list file.
test_list_line_problem = False
for line in f_cont:
# Parse the control file line by line, looking for valid lines
# (lines with 2 non-commented fields). Use tokens to split up the line
line_num += 1
tokens = shlex.split(line,comments=True)
if tokens == []: # Empty line or a line with only comments in it.
continue
if len(tokens) != 2:
print ("Error in " + str(options.control_file) + " line " + str(line_num)
+ ". Improper number of fields. Skipping...")
test_list_line_problem = True
continue
test_type = tokens[0]
# Check the test type
if (int(test_type)) < 1 or (int(test_type) > (len(types_table) - 1)):
print ("Error in " + str(options.control_file) + " line " + str(line_num)
+ ". Improper test type " + str(test_type)
+ ". Skipping...")
test_list_line_problem = True
continue
# Get the test's directory name (its location)
test_loc = tokens[1]
# We now have where to run the test and its valid test type
if types_table[int(test_type)] == True:
print ("Entering " + str(test_dir) + "/" + str(test_loc))
try:
os.chdir(test_loc)
except OSError:
print ("Error in " + str(options.control_file) + " line " + str(line_num)
+ ". No such directory " + str(test_loc)
+ ". Skipping...")
continue
# Print out a delimiter to make parsing the output easier
print "-" * 50
print "Submitting test in directory " + str(test_loc)
# Load test.py
(fp, path, desc) = imp.find_module('reg_test', ['./'])
reg_test = imp.load_module('reg_test', fp, path, desc)
fp.close()
ids = reg_test.main()
# Check to see if the test was successfully submitted.
if ids == [] or ids == None:
print ("Error: there was a problem getting the last job id "
"from the test; nothing was returned by reg_test.py. "
"Unable to keep track of this test.")
test_info[test_loc] = ['Unable to submit',
'Test passed nothing back when submitted.']
# See if any submittals failed.
if -1 in ids or "-1" in ids:
print "Error: Unable to fully submit test " + str(test_loc)
test_info[test_loc] = ['Unable to submit', '']
for i in range(ids.count("-1")):
ids.remove("-1")
for i in range(ids.count(-1)):
ids.remove(-1)
if len(ids) > 0:
print ("Job ids associated with this test (the regression "
"suite will not wait for these to finish to check results):")
for i in ids:
print i
# See if a test was skipped
elif -2 in ids or "-2" in ids:
print (str(test_loc) + " skipped due to configuration")
test_info[test_loc] = ['Skipped due to configuration', '']
else:
succ_submitted = 0
print ("Submitted test in directory " + str(test_loc))
if (len(ids) == 1) and (ids[0] == 0 or ids[0] == "0"):
# Don't have to wait for this test to finish
print ("This test does not require the regression suite to "
"wait for it.")
else:
print ("Job ids associated with this test:")
for i in ids:
print (i)
f_sub.write(str(i) + "\n")
last_id = ids[-1]
test_info[test_loc] = ['Submitted', '']
ids = []
# Go back to the tests directory and do the next test.
print "Entering " + str(test_dir)
os.chdir(test_dir)
print "-" * 50
print "-" * 50
f_cont.close()
f_sub.close()
print "Entering " + str(reg_base_dir)
os.chdir(reg_base_dir)
# Only return 2 if at least one job was successfully submitted and there
# was a problem with one of the lines in the test list file.
if succ_submitted == 0 and test_list_line_problem == True:
return 2, test_info
else:
return succ_submitted, test_info
# Main routine
def main(argv=None):
"""The main routine for submitting tests inside the regression suite.
Return values:
0: At least some jobs submitted.
1: No jobs submitted.
2: At least some jobs submitted, but there was an error in parsing a line
in the test list file.
"""
if argv == None:
argv = sys.argv[1:]
required_args = 0
num_test_types = 3
options, args, types_table = parse_args(argv=argv,
num_required=required_args, num_test_types=num_test_types)
global reg_base_dir
if options.basedir == ".":
reg_base_dir = os.getcwd()
else:
reg_base_dir = options.basedir
# Submit tests, getting a flag about whether or not at least one test was
# successfully submitted and a dictionary of jobs that need to be checked
# and reported on.
succ_sub, test_info = submit_tests(options=options, types_table=types_table)
# succ_sub = 0
# test_info = {"write_read_no_error": ['Submitted', ''],
# "write_read_error": ['Submitted', '']}
if succ_sub == 1:
print ("Error: problems with submitting tests. No tests fully "
"submitted.")
return 1
# Write out the test_info dictionary to be used later.
print ("Writing dictionary containing what tests were submitted to "
+ str(options.dict_file) + ".")
try:
f = open(options.dict_file, 'w')
pickle.dump(test_info, f)
f.close()
except IOError, detail:
print ("Error writing dictionary file " + str(options.dict_file)
+ ": " + str(detail) + ".\nExiting without writing.")
return 1
print ("Successfully wrote dictionary to " + str(options.dict_file) + ".")
# succ_sub could be 0 or 2 at this point.
return succ_sub
if __name__ == "__main__":
sys.exit(main())