|
1 # |
|
2 # Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 # All rights reserved. |
|
4 # This component and the accompanying materials are made available |
|
5 # under the terms of the License "Eclipse Public License v1.0" |
|
6 # which accompanies this distribution, and is available |
|
7 # at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 # |
|
9 # Initial Contributors: |
|
10 # Nokia Corporation - initial contribution. |
|
11 # |
|
12 # Contributors: |
|
13 # |
|
14 # Description: |
|
15 # |
|
16 # Runs the specified suite of raptor tests |
|
17 |
|
18 import os |
|
19 import sys |
|
20 import re |
|
21 import imp |
|
22 import datetime |
|
23 import traceback |
|
24 raptor_tests = imp.load_source("raptor_tests", "common/raptor_tests.py") |
|
25 |
|
26 # Command line options ######################################################## |
|
27 from optparse import OptionParser |
|
28 |
|
29 parser = OptionParser( |
|
30 prog = "run", |
|
31 usage = "%prog [Options]") |
|
32 |
|
33 parser.add_option("-u", "--upload", action = "store", type = "string", |
|
34 dest = "upload", default = None, |
|
35 help = "Path for uploading results (Can be UNC path)") |
|
36 parser.add_option("-b", "--branch", action = "store", type = "choice", |
|
37 dest = "branch", choices = ["master", "m", "fix", "f", "wip", "w"], |
|
38 help = "string indicating which branch is being tested:\n" + \ |
|
39 "master, fix or wip. Default is 'fix'") |
|
40 parser.add_option("-s", "--suite", action = "store", type = "string", |
|
41 dest = "suite", help = "regex to use for selecting test suites") |
|
42 parser.add_option("-t", "--tests", action = "store", type = "string", |
|
43 dest = "tests", help = "regex to use for selecting tests") |
|
44 parser.add_option("-d", "--debug", action = "store_true", dest = "debug_mode", |
|
45 default = False, help = "Turns on debug-mode") |
|
46 parser.add_option("--test-home", action = "store", type = "string", |
|
47 dest = "test_home", |
|
48 help = "Location of custom .sbs_init.xml (name of directory in " + |
|
49 "'custom_options'): test/custom_options/<test_home>/.sbs_init.xml") |
|
50 parser.add_option("--what-failed", action = "store_true", dest = "what_failed", |
|
51 help = "Re-run all the tests that failed in the previous test run") |
|
52 parser.add_option("--clean", action = "store_true", dest = "clean", |
|
53 help = "Clean EPOCROOT after each test is run") |
|
54 |
|
55 |
|
56 (options, args) = parser.parse_args() |
|
57 |
|
58 # Check for --what-failed and override '-s' and '-t' (including flagless regex) |
|
59 if options.what_failed: |
|
60 try: |
|
61 what_failed_file = open("what_failed", "r") |
|
62 what_failed = what_failed_file.readline() |
|
63 what_failed_file.close() |
|
64 print "Running: run " + what_failed |
|
65 |
|
66 first = what_failed.find('"') |
|
67 second = what_failed.find('"', (first + 1)) |
|
68 options.suite = what_failed[(first + 1):second] |
|
69 |
|
70 first = what_failed.find('"', (second + 1)) |
|
71 second = what_failed.find('"', (first + 1)) |
|
72 options.tests = what_failed[(first + 1):second] |
|
73 except: |
|
74 # If no file exists, nothing failed, so run as usual |
|
75 pass |
|
76 |
|
77 # Allow flagless test regex |
|
78 if (options.tests == None) and (len(args) > 0): |
|
79 options.tests = args[len(args) - 1] |
|
80 |
|
81 if options.upload != None: |
|
82 if options.branch != None: |
|
83 if options.branch == "m": |
|
84 branch = "master" |
|
85 elif options.branch == "f": |
|
86 branch = "fix" |
|
87 elif options.branch == "w": |
|
88 branch = "wip" |
|
89 else: |
|
90 branch = options.branch |
|
91 else: |
|
92 print "Warning: Test branch not set - Use " + \ |
|
93 "'-b [master|fix|wip]'\n Using default of 'Fix'..." |
|
94 branch = "fix" |
|
95 |
|
96 if options.debug_mode: |
|
97 raptor_tests.activate_debug() |
|
98 |
|
99 |
|
100 # Set $HOME environment variable for finding a custom .sbs_init.xml |
|
101 if options.test_home != None: |
|
102 home_dir = options.test_home |
|
103 if home_dir in os.listdir("./custom_options"): |
|
104 os.environ["HOME"] = os.environ["SBS_HOME"] + "/test/custom_options/" \ |
|
105 + home_dir + "/" |
|
106 else: |
|
107 print "Warning: Path to custom .sbs_init.xml file not found (" + \ |
|
108 home_dir + ")\nUsing defaults..." |
|
109 options.test_home = None |
|
110 |
|
111 |
|
112 def format_milliseconds(microseconds): |
|
113 """ format a microsecond time in milliseconds """ |
|
114 milliseconds = (microseconds / 1000) |
|
115 if milliseconds == 0: |
|
116 return "000" |
|
117 elif milliseconds < 10: |
|
118 return "00" + str(milliseconds) |
|
119 elif milliseconds < 100: |
|
120 return "0" + str(milliseconds) |
|
121 return milliseconds |
|
122 |
|
123 |
|
124 |
|
125 class TestRun(object): |
|
126 """Represents any series of tests""" |
|
127 def __init__(self): |
|
128 self.test_set = [] |
|
129 self.failed_tests = [] |
|
130 self.error_tests = [] |
|
131 self.pass_total = 0 |
|
132 self.fail_total = 0 |
|
133 self.skip_total = 0 |
|
134 self.exception_total = 0 |
|
135 self.test_total = 0 |
|
136 # For --what-failed: |
|
137 self.suites_failed = [] |
|
138 self.tests_failed = [] |
|
139 |
|
140 def aggregate(self, atestrun): |
|
141 """ Aggregate other test results into this one """ |
|
142 self.test_set.append(atestrun) |
|
143 self.test_total += len(atestrun.test_set) |
|
144 |
|
145 def show(self): |
|
146 for test_set in self.test_set: |
|
147 print "\n\n" + str(test_set.suite_dir) + ":\n" |
|
148 |
|
149 # If a suite has failed/erroneous tests, add it to what_failed |
|
150 if (test_set.fail_total + test_set.exception_total) > 0: |
|
151 self.suites_failed.append(test_set.suite_dir) |
|
152 |
|
153 if len(test_set.test_set) < 1: |
|
154 print "No tests run" |
|
155 else: |
|
156 print "PASSED: " + str(test_set.pass_total) |
|
157 print "FAILED: " + str(test_set.fail_total) |
|
158 if test_set.skip_total > 0: |
|
159 print "SKIPPED: " + str(test_set.skip_total) |
|
160 if test_set.exception_total > 0: |
|
161 print "EXCEPTIONS: " + str(test_set.exception_total) |
|
162 |
|
163 if test_set.fail_total > 0: |
|
164 print "\nFAILED TESTS:" |
|
165 |
|
166 # Add each failed test to what_failed and print it |
|
167 for test in test_set.failed_tests: |
|
168 self.tests_failed.append("^" + test + ".py") |
|
169 print "\t", test |
|
170 |
|
171 if test_set.exception_total > 0: |
|
172 print "\nERRONEOUS TESTS:" |
|
173 |
|
174 # Add each erroneous test to what_failed and print it |
|
175 for test in test_set.error_tests: |
|
176 first = test.find("'") |
|
177 second = test.find("'", (first + 1)) |
|
178 self.tests_failed.append("^" + |
|
179 test[(first + 1):second] + ".py") |
|
180 print "\t", test |
|
181 |
|
182 def what_failed(self): |
|
183 "Create the file for --what-failed if there were failing tests" |
|
184 if len(self.suites_failed) > 0: |
|
185 self.what_failed = open("what_failed", "w") |
|
186 # Add the suites and tests to the file as command-line options |
|
187 self.what_failed.write('-s "') |
|
188 loop_number = 0 |
|
189 for suite in self.suites_failed: |
|
190 loop_number += 1 |
|
191 self.what_failed.write(suite) |
|
192 |
|
193 # If this is not the last suite, prepare to add another |
|
194 if loop_number < len(self.suites_failed): |
|
195 self.what_failed.write("|") |
|
196 |
|
197 self.what_failed.write('" -t "') |
|
198 loop_number = 0 |
|
199 for test in self.tests_failed: |
|
200 loop_number += 1 |
|
201 self.what_failed.write(test) |
|
202 |
|
203 # If this is not the last test, prepare to add another |
|
204 if loop_number < len(self.tests_failed): |
|
205 self.what_failed.write("|") |
|
206 self.what_failed.write('"') |
|
207 self.what_failed.close() |
|
208 |
|
209 else: |
|
210 # If there were no failing tests this time, remove any previous file |
|
211 try: |
|
212 os.remove("what_failed") |
|
213 except: |
|
214 try: |
|
215 os.chmod("what_failed", stat.S_IRWXU) |
|
216 os.remove("what_failed") |
|
217 except: |
|
218 pass |
|
219 |
|
220 |
|
221 class Suite(TestRun): |
|
222 """A test suite""" |
|
223 |
|
224 python_file_regex = re.compile("(.*)\.py$", re.I) |
|
225 |
|
226 def __init__(self, dir, parent): |
|
227 TestRun.__init__(self) |
|
228 self.suite_dir = dir |
|
229 |
|
230 # Upload directory (if set) |
|
231 self.upload_location = parent.upload_location |
|
232 |
|
233 # Regex for searching for tests |
|
234 |
|
235 self.test_file_regex = parent.test_file_regex |
|
236 self.test_pattern = parent.testpattern |
|
237 |
|
238 |
|
239 def run(self): |
|
240 """run the suite""" |
|
241 |
|
242 self.time_stamp = datetime.datetime.now() |
|
243 self.results = {} |
|
244 self.start_times = {} |
|
245 self.end_times = {} |
|
246 |
|
247 print "\n\nRunning " + str(self.suite_dir) + "..." |
|
248 |
|
249 # Iterate through all files in specified directory |
|
250 for test in os.listdir(self.suite_dir): |
|
251 # Only check '*.py' files |
|
252 name_match = self.python_file_regex.match(test) |
|
253 if name_match is not None: |
|
254 if self.test_file_regex is not None: |
|
255 # Each file that matches -t input is imported if any |
|
256 name_match = self.test_file_regex.match(test) |
|
257 else: |
|
258 name_match = 1 |
|
259 if name_match is not None: |
|
260 import_name = test[:-3] |
|
261 try: |
|
262 self.test_set.append(imp.load_source(import_name, |
|
263 (raptor_tests.ReplaceEnvs(self.suite_dir |
|
264 + "/" + test)))) |
|
265 except: |
|
266 print "\n", (sys.exc_type.__name__ + ":"), \ |
|
267 sys.exc_value, "\n", \ |
|
268 traceback.print_tb(sys.exc_traceback) |
|
269 |
|
270 test_number = 0 |
|
271 test_total = len(self.test_set) |
|
272 if test_total < 1: |
|
273 print "No tests in suite "+self.suite_dir+" matched by specification '"+self.test_pattern+"' (regex: /.*"+self.test_pattern+".*/)\n"; |
|
274 # Run each test, capturing all its details and its results |
|
275 for test in self.test_set: |
|
276 test_number += 1 |
|
277 # Save start/end times and save in dictionary for TMS |
|
278 start_time = datetime.datetime.now() |
|
279 try: |
|
280 print "\n\nTEST " + str(test_number) + "/" + \ |
|
281 str(test_total) + ":\n", |
|
282 test_object = test.run() |
|
283 |
|
284 end_time = datetime.datetime.now() |
|
285 |
|
286 # Add leading 0s |
|
287 test_object.id = raptor_tests.fix_id(test_object.id) |
|
288 |
|
289 # No millisecond function, so need to use microseconds/1000 |
|
290 start_milliseconds = start_time.microsecond |
|
291 end_milliseconds = end_time.microsecond |
|
292 |
|
293 # Add trailing 0's if required |
|
294 start_milliseconds = \ |
|
295 format_milliseconds(start_milliseconds) |
|
296 end_milliseconds = \ |
|
297 format_milliseconds(end_milliseconds) |
|
298 |
|
299 self.start_times[test_object.id] = \ |
|
300 start_time.strftime("%H:%M:%S:" + |
|
301 str(start_milliseconds)) |
|
302 self.end_times[test_object.id] = \ |
|
303 end_time.strftime("%H:%M:%S:" + \ |
|
304 str(end_milliseconds)) |
|
305 |
|
306 run_time = (end_time - start_time) |
|
307 |
|
308 run_time_seconds = (str(run_time.seconds) + "." + \ |
|
309 str(format_milliseconds(run_time.microseconds))) |
|
310 print ("RunTime: " + run_time_seconds + "s") |
|
311 # Add to pass/fail count and save result to dictionary |
|
312 if test_object.result == raptor_tests.SmokeTest.PASS: |
|
313 self.pass_total += 1 |
|
314 self.results[test_object.id] = "Passed" |
|
315 elif test_object.result == raptor_tests.SmokeTest.FAIL: |
|
316 self.fail_total += 1 |
|
317 self.results[test_object.id] = "Failed" |
|
318 self.failed_tests.append(test_object.name) |
|
319 elif test_object.result == raptor_tests.SmokeTest.SKIP: |
|
320 self.skip_total += 1 |
|
321 # Clean epocroot after running each test if --clean option is specified |
|
322 if options.clean: |
|
323 print "\nCLEANING TEST RESULTS..." |
|
324 raptor_tests.clean_epocroot() |
|
325 |
|
326 except: |
|
327 print "\nTEST ERROR:" |
|
328 print (sys.exc_type.__name__ + ":"), \ |
|
329 sys.exc_value, "\n", \ |
|
330 traceback.print_tb(sys.exc_traceback) |
|
331 self.exception_total += 1 |
|
332 self.error_tests.append(str(self.test_set[test_number - 1])) |
|
333 |
|
334 if self.upload_location != None: |
|
335 self.create_csv() |
|
336 |
|
337 end_time_stamp = datetime.datetime.now() |
|
338 |
|
339 runtime = end_time_stamp - self.time_stamp |
|
340 seconds = (str(runtime.seconds) + "." + \ |
|
341 str(format_milliseconds(runtime.microseconds))) |
|
342 if options.upload: |
|
343 self.create_tri(seconds) |
|
344 |
|
345 print ("\n" + str(self.suite_dir) + " RunTime: " + seconds + "s") |
|
346 |
|
347 def create_csv(self): |
|
348 """ |
|
349 This method will create a CSV file with the smoke test's output |
|
350 in order to successfully upload results to TMS QC |
|
351 """ |
|
352 |
|
353 # This sorts the dictionaries by their key values (Test IDs) |
|
354 id_list = run_tests.sort_dict(self.results) |
|
355 |
|
356 self.test_file_name = (self.suite_dir + "_" + \ |
|
357 self.time_stamp.strftime("%Y-%m-%d_%H-%M-%S") + "_" + |
|
358 branch + "_results.csv") |
|
359 # This is the path for file-creation on the server. Includes |
|
360 self.test_path = (self.upload_location + "/csv/" + self.suite_dir + "/" |
|
361 + self.test_file_name) |
|
362 |
|
363 try: |
|
364 |
|
365 if not os.path.isdir(self.upload_location + "/csv/" + |
|
366 self.suite_dir): |
|
367 os.makedirs(self.upload_location + "/csv/" + self.suite_dir) |
|
368 |
|
369 csv_file = \ |
|
370 open(raptor_tests.ReplaceEnvs(os.path.normpath(self.test_path)), |
|
371 "w") |
|
372 csv_file.write("TestCaseID,StartTime,EndTime,Result\n") |
|
373 |
|
374 for test_id in id_list: |
|
375 csv_file.write("PCT-SBSV2-" + self.suite_dir + "-" + test_id + \ |
|
376 "," + str(self.start_times[test_id]) + "," + \ |
|
377 str(self.end_times[test_id]) + "," + \ |
|
378 self.results[test_id] + "\n") |
|
379 csv_file.close() |
|
380 |
|
381 except OSError, e: |
|
382 print "SBS_TESTS: Error:", e |
|
383 |
|
384 |
|
385 def create_tri(self, overall_seconds): |
|
386 """ |
|
387 This method will create a TRI (xml) file containing the location of the |
|
388 CSV file in order to successfully upload results to TMS QC |
|
389 """ |
|
390 # Path for the tri file |
|
391 tri_path = (self.upload_location + "/new/" + self.suite_dir + \ |
|
392 "_" + self.time_stamp.strftime("%Y-%m-%d_%H-%M-%S") + ".xml") |
|
393 run_name_timestamp = self.time_stamp.strftime(self.suite_dir + \ |
|
394 "%Y-%m-%d_%H-%M-%S") |
|
395 date_time_timestamp = self.time_stamp.strftime("%d.%m.%Y %H:%M:%S") |
|
396 test_set_name = "Root\\Product Creation Tools\\Regression\\" + \ |
|
397 "SBS v2 (Raptor)\\" + self.suite_dir + "_" |
|
398 if sys.platform.startswith("win"): |
|
399 test_set_name += ("WinXP_" + branch) |
|
400 else: |
|
401 test_set_name += ("Linux_" + branch) |
|
402 |
|
403 # /mnt/ -> // Fixes the difference in paths for lon-rhdev mounts vs. win |
|
404 if not sys.platform.startswith("win"): |
|
405 if self.test_path.startswith("/mnt/"): |
|
406 self.test_path = self.test_path.replace("mnt", "", 1) |
|
407 |
|
408 try: |
|
409 tri_file = \ |
|
410 open(raptor_tests.ReplaceEnvs(os.path.normpath(tri_path)), \ |
|
411 "w") |
|
412 tri_file.write( |
|
413 "<TestRunInfo>\n" + \ |
|
414 "\t<RunName>\n\t\t" + \ |
|
415 run_name_timestamp + \ |
|
416 "\n\t</RunName>\n" + \ |
|
417 "\t<TestGroup>\n" + \ |
|
418 "\t\tSBSv2 (Non-SITK)\n" + \ |
|
419 "\t</TestGroup>\n" + \ |
|
420 "\t<DateTime>\n\t\t" + \ |
|
421 date_time_timestamp + \ |
|
422 "\n\t</DateTime>\n" + \ |
|
423 "\t<RunDuration>\n\t\t" + \ |
|
424 overall_seconds + \ |
|
425 "\n\t</RunDuration>\n" + \ |
|
426 '\t<TestSet name="' + test_set_name + '">\n' + \ |
|
427 "\t\t<TestResults>\n\t\t\t" + \ |
|
428 self.test_path + \ |
|
429 "\n\t\t</TestResults>\n" + \ |
|
430 "\t</TestSet>\n" + \ |
|
431 "</TestRunInfo>") |
|
432 tri_file.close() |
|
433 print "Tests uploaded to '" + self.upload_location + "' (" + \ |
|
434 branch + ")" |
|
435 except OSError, e: |
|
436 print "SBS_TESTS: Error:", e |
|
437 |
|
438 class SuiteRun(TestRun): |
|
439 """ Represents a 'run' of a number of test suites """ |
|
440 |
|
441 def __init__(self, suitepattern = None, testpattern = None, |
|
442 upload_location = None): |
|
443 TestRun.__init__(self) |
|
444 |
|
445 # Add common directory to list of paths to search for modules |
|
446 sys.path.append(raptor_tests.ReplaceEnvs("$(SBS_HOME)/test/common")) |
|
447 |
|
448 |
|
449 if suitepattern: |
|
450 self.suite_regex = re.compile(".*" + suitepattern + ".*", re.I) |
|
451 else: |
|
452 self.suite_regex = re.compile(".*\_suite$", re.I) |
|
453 |
|
454 if testpattern: |
|
455 self.test_file_regex = re.compile(".*" + testpattern + ".*", |
|
456 re.I) |
|
457 else: |
|
458 self.test_file_regex = None |
|
459 |
|
460 self.suitepattern = suitepattern |
|
461 self.testpattern = testpattern |
|
462 self.upload_location = upload_location |
|
463 |
|
464 |
|
465 |
|
466 def run_tests(self): |
|
467 """ |
|
468 Run all the tests in the specified suite (directory) |
|
469 """ |
|
470 |
|
471 suites = [] |
|
472 for dir in os.listdir("."): |
|
473 name_match = self.suite_regex.match(dir) |
|
474 # Each folder that matches the suite pattern will be looked into |
|
475 # Also checks to make sure the found entry is actually a directory |
|
476 if name_match is not None and os.path.isdir(dir): |
|
477 s = Suite(dir, self) |
|
478 s.run() |
|
479 self.aggregate(s) |
|
480 suites.append(dir) |
|
481 |
|
482 # Print which options were used |
|
483 if options.test_home == None: |
|
484 options_dir = "defaults)" |
|
485 else: |
|
486 options_dir = "'" + options.test_home + "' options file)" |
|
487 print "\n(Tests run using %s" %options_dir |
|
488 |
|
489 # Summarise the entire test run |
|
490 if self.suitepattern and (self.test_total < 1): |
|
491 print "\nNo suites matched specification '" + self.suitepattern + \ |
|
492 "'\n" |
|
493 else: |
|
494 print "Overall summary (%d suites, %d tests):" \ |
|
495 %(len(suites), self.test_total) |
|
496 self.show() |
|
497 self.what_failed() |
|
498 |
|
499 |
|
500 def sort_dict(self, input_dict): |
|
501 """ |
|
502 This method sorts values in a dictionary |
|
503 """ |
|
504 keys = input_dict.keys() |
|
505 keys.sort() |
|
506 return keys |
|
507 |
|
508 |
|
509 # Clean epocroot before running tests |
|
510 raptor_tests.clean_epocroot() |
|
511 run_tests = SuiteRun(suitepattern = options.suite, testpattern = options.tests, |
|
512 upload_location = options.upload) |
|
513 run_tests.run_tests() |
|
514 |