author | timothy.murphy@nokia.com |
Fri, 26 Feb 2010 17:07:56 +0000 | |
branch | fix |
changeset 278 | c38bfd29ee57 |
parent 135 | 0092642f198e |
permissions | -rw-r--r-- |
3 | 1 |
# |
2 |
# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 |
# All rights reserved. |
|
4 |
# This component and the accompanying materials are made available |
|
5 |
# under the terms of the License "Eclipse Public License v1.0" |
|
6 |
# which accompanies this distribution, and is available |
|
7 |
# at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 |
# |
|
9 |
# Initial Contributors: |
|
10 |
# Nokia Corporation - initial contribution. |
|
11 |
# |
|
12 |
# Contributors: |
|
13 |
# |
|
14 |
# Description: |
|
15 |
# |
|
16 |
# Runs the specified suite of raptor tests |
|
17 |
||
18 |
import os |
|
19 |
import sys |
|
20 |
import re |
|
21 |
import imp |
|
22 |
import datetime |
|
23 |
import traceback |
|
24 |
raptor_tests = imp.load_source("raptor_tests", "common/raptor_tests.py") |
|
25 |
||
26 |
# Command line options ######################################################## |
|
27 |
from optparse import OptionParser |
|
28 |
||
29 |
parser = OptionParser( |
|
30 |
prog = "run", |
|
31 |
usage = "%prog [Options]") |
|
32 |
||
33 |
parser.add_option("-u", "--upload", action = "store", type = "string", |
|
34 |
dest = "upload", default = None, |
|
35 |
help = "Path for uploading results (Can be UNC path)") |
|
36 |
parser.add_option("-b", "--branch", action = "store", type = "choice", |
|
37 |
dest = "branch", choices = ["master", "m", "fix", "f", "wip", "w"], |
|
38 |
help = "string indicating which branch is being tested:\n" + \ |
|
39 |
"master, fix or wip. Default is 'fix'") |
|
40 |
parser.add_option("-s", "--suite", action = "store", type = "string", |
|
41 |
dest = "suite", help = "regex to use for selecting test suites") |
|
42 |
parser.add_option("-t", "--tests", action = "store", type = "string", |
|
43 |
dest = "tests", help = "regex to use for selecting tests") |
|
44 |
parser.add_option("-d", "--debug", action = "store_true", dest = "debug_mode", |
|
45 |
default = False, help = "Turns on debug-mode") |
|
46 |
parser.add_option("--test-home", action = "store", type = "string", |
|
47 |
dest = "test_home", |
|
48 |
help = "Location of custom .sbs_init.xml (name of directory in " + |
|
49 |
"'custom_options'): test/custom_options/<test_home>/.sbs_init.xml") |
|
50 |
parser.add_option("--what-failed", action = "store_true", dest = "what_failed", |
|
51 |
help = "Re-run all the tests that failed in the previous test run") |
|
52 |
parser.add_option("--clean", action = "store_true", dest = "clean", |
|
53 |
help = "Clean EPOCROOT after each test is run") |
|
54 |
||
55 |
||
56 |
(options, args) = parser.parse_args() |
|
57 |
||
58 |
# Check for --what-failed and override '-s' and '-t' (including flagless regex) |
|
59 |
if options.what_failed: |
|
60 |
try: |
|
61 |
what_failed_file = open("what_failed", "r") |
|
62 |
what_failed = what_failed_file.readline() |
|
63 |
what_failed_file.close() |
|
64 |
print "Running: run " + what_failed |
|
65 |
||
66 |
first = what_failed.find('"') |
|
67 |
second = what_failed.find('"', (first + 1)) |
|
68 |
options.suite = what_failed[(first + 1):second] |
|
69 |
||
70 |
first = what_failed.find('"', (second + 1)) |
|
71 |
second = what_failed.find('"', (first + 1)) |
|
72 |
options.tests = what_failed[(first + 1):second] |
|
73 |
except: |
|
74 |
# If no file exists, nothing failed, so run as usual |
|
75 |
pass |
|
76 |
||
77 |
# Allow flagless test regex |
|
78 |
if (options.tests == None) and (len(args) > 0): |
|
79 |
options.tests = args[len(args) - 1] |
|
80 |
||
81 |
if options.upload != None: |
|
82 |
if options.branch != None: |
|
83 |
if options.branch == "m": |
|
84 |
branch = "master" |
|
85 |
elif options.branch == "f": |
|
86 |
branch = "fix" |
|
87 |
elif options.branch == "w": |
|
88 |
branch = "wip" |
|
89 |
else: |
|
90 |
branch = options.branch |
|
91 |
else: |
|
92 |
print "Warning: Test branch not set - Use " + \ |
|
93 |
"'-b [master|fix|wip]'\n Using default of 'Fix'..." |
|
94 |
branch = "fix" |
|
95 |
||
96 |
if options.debug_mode: |
|
97 |
raptor_tests.activate_debug() |
|
98 |
||
99 |
||
100 |
# Set $HOME environment variable for finding a custom .sbs_init.xml |
|
101 |
if options.test_home != None: |
|
102 |
home_dir = options.test_home |
|
103 |
if home_dir in os.listdir("./custom_options"): |
|
104 |
os.environ["HOME"] = os.environ["SBS_HOME"] + "/test/custom_options/" \ |
|
105 |
+ home_dir + "/" |
|
106 |
else: |
|
107 |
print "Warning: Path to custom .sbs_init.xml file not found (" + \ |
|
108 |
home_dir + ")\nUsing defaults..." |
|
109 |
options.test_home = None |
|
110 |
||
111 |
||
112 |
def format_milliseconds(microseconds): |
|
113 |
""" format a microsecond time in milliseconds """ |
|
114 |
milliseconds = (microseconds / 1000) |
|
115 |
if milliseconds == 0: |
|
116 |
return "000" |
|
117 |
elif milliseconds < 10: |
|
118 |
return "00" + str(milliseconds) |
|
119 |
elif milliseconds < 100: |
|
120 |
return "0" + str(milliseconds) |
|
121 |
return milliseconds |
|
122 |
||
123 |
||
124 |
||
125 |
class TestRun(object): |
|
126 |
"""Represents any series of tests""" |
|
127 |
def __init__(self): |
|
128 |
self.test_set = [] |
|
129 |
self.failed_tests = [] |
|
130 |
self.error_tests = [] |
|
131 |
self.pass_total = 0 |
|
132 |
self.fail_total = 0 |
|
133 |
self.skip_total = 0 |
|
134 |
self.exception_total = 0 |
|
135 |
self.test_total = 0 |
|
136 |
# For --what-failed: |
|
137 |
self.suites_failed = [] |
|
138 |
self.tests_failed = [] |
|
139 |
||
140 |
def aggregate(self, atestrun): |
|
141 |
""" Aggregate other test results into this one """ |
|
142 |
self.test_set.append(atestrun) |
|
143 |
self.test_total += len(atestrun.test_set) |
|
144 |
||
145 |
def show(self): |
|
146 |
for test_set in self.test_set: |
|
147 |
print "\n\n" + str(test_set.suite_dir) + ":\n" |
|
148 |
||
149 |
# If a suite has failed/erroneous tests, add it to what_failed |
|
150 |
if (test_set.fail_total + test_set.exception_total) > 0: |
|
151 |
self.suites_failed.append(test_set.suite_dir) |
|
152 |
||
153 |
if len(test_set.test_set) < 1: |
|
154 |
print "No tests run" |
|
155 |
else: |
|
156 |
print "PASSED: " + str(test_set.pass_total) |
|
157 |
print "FAILED: " + str(test_set.fail_total) |
|
158 |
if test_set.skip_total > 0: |
|
159 |
print "SKIPPED: " + str(test_set.skip_total) |
|
160 |
if test_set.exception_total > 0: |
|
161 |
print "EXCEPTIONS: " + str(test_set.exception_total) |
|
162 |
||
163 |
if test_set.fail_total > 0: |
|
164 |
print "\nFAILED TESTS:" |
|
165 |
||
166 |
# Add each failed test to what_failed and print it |
|
167 |
for test in test_set.failed_tests: |
|
168 |
self.tests_failed.append("^" + test + ".py") |
|
169 |
print "\t", test |
|
170 |
||
171 |
if test_set.exception_total > 0: |
|
172 |
print "\nERRONEOUS TESTS:" |
|
173 |
||
174 |
# Add each erroneous test to what_failed and print it |
|
175 |
for test in test_set.error_tests: |
|
176 |
first = test.find("'") |
|
177 |
second = test.find("'", (first + 1)) |
|
178 |
self.tests_failed.append("^" + |
|
179 |
test[(first + 1):second] + ".py") |
|
180 |
print "\t", test |
|
181 |
||
182 |
def what_failed(self): |
|
183 |
"Create the file for --what-failed if there were failing tests" |
|
184 |
if len(self.suites_failed) > 0: |
|
185 |
self.what_failed = open("what_failed", "w") |
|
186 |
# Add the suites and tests to the file as command-line options |
|
187 |
self.what_failed.write('-s "') |
|
188 |
loop_number = 0 |
|
189 |
for suite in self.suites_failed: |
|
190 |
loop_number += 1 |
|
191 |
self.what_failed.write(suite) |
|
192 |
||
193 |
# If this is not the last suite, prepare to add another |
|
194 |
if loop_number < len(self.suites_failed): |
|
195 |
self.what_failed.write("|") |
|
196 |
||
197 |
self.what_failed.write('" -t "') |
|
198 |
loop_number = 0 |
|
199 |
for test in self.tests_failed: |
|
200 |
loop_number += 1 |
|
201 |
self.what_failed.write(test) |
|
202 |
||
203 |
# If this is not the last test, prepare to add another |
|
204 |
if loop_number < len(self.tests_failed): |
|
205 |
self.what_failed.write("|") |
|
206 |
self.what_failed.write('"') |
|
207 |
self.what_failed.close() |
|
208 |
||
209 |
else: |
|
210 |
# If there were no failing tests this time, remove any previous file |
|
211 |
try: |
|
212 |
os.remove("what_failed") |
|
213 |
except: |
|
214 |
try: |
|
215 |
os.chmod("what_failed", stat.S_IRWXU) |
|
216 |
os.remove("what_failed") |
|
217 |
except: |
|
218 |
pass |
|
219 |
||
220 |
||
221 |
class Suite(TestRun): |
|
222 |
"""A test suite""" |
|
223 |
||
224 |
python_file_regex = re.compile("(.*)\.py$", re.I) |
|
225 |
||
226 |
def __init__(self, dir, parent): |
|
227 |
TestRun.__init__(self) |
|
228 |
self.suite_dir = dir |
|
229 |
||
230 |
# Upload directory (if set) |
|
231 |
self.upload_location = parent.upload_location |
|
232 |
||
233 |
# Regex for searching for tests |
|
234 |
||
235 |
self.test_file_regex = parent.test_file_regex |
|
236 |
self.test_pattern = parent.testpattern |
|
237 |
||
238 |
||
239 |
def run(self): |
|
240 |
"""run the suite""" |
|
241 |
||
242 |
self.time_stamp = datetime.datetime.now() |
|
243 |
self.results = {} |
|
244 |
self.start_times = {} |
|
245 |
self.end_times = {} |
|
246 |
||
247 |
print "\n\nRunning " + str(self.suite_dir) + "..." |
|
248 |
||
249 |
# Iterate through all files in specified directory |
|
250 |
for test in os.listdir(self.suite_dir): |
|
251 |
# Only check '*.py' files |
|
252 |
name_match = self.python_file_regex.match(test) |
|
253 |
if name_match is not None: |
|
254 |
if self.test_file_regex is not None: |
|
255 |
# Each file that matches -t input is imported if any |
|
256 |
name_match = self.test_file_regex.match(test) |
|
257 |
else: |
|
258 |
name_match = 1 |
|
259 |
if name_match is not None: |
|
260 |
import_name = test[:-3] |
|
261 |
try: |
|
262 |
self.test_set.append(imp.load_source(import_name, |
|
263 |
(raptor_tests.ReplaceEnvs(self.suite_dir |
|
264 |
+ "/" + test)))) |
|
265 |
except: |
|
266 |
print "\n", (sys.exc_type.__name__ + ":"), \ |
|
267 |
sys.exc_value, "\n", \ |
|
268 |
traceback.print_tb(sys.exc_traceback) |
|
269 |
||
270 |
test_number = 0 |
|
271 |
test_total = len(self.test_set) |
|
272 |
if test_total < 1: |
|
273 |
print "No tests in suite "+self.suite_dir+" matched by specification '"+self.test_pattern+"' (regex: /.*"+self.test_pattern+".*/)\n"; |
|
274 |
# Run each test, capturing all its details and its results |
|
275 |
for test in self.test_set: |
|
276 |
test_number += 1 |
|
277 |
# Save start/end times and save in dictionary for TMS |
|
278 |
start_time = datetime.datetime.now() |
|
279 |
try: |
|
9 | 280 |
test_number_text = "\n\nTEST " + str(test_number) + "/" + \ |
281 |
str(test_total) + ":" |
|
282 |
||
283 |
if self.fail_total > 0: |
|
284 |
test_number_text += " So far " + str(self.fail_total) + \ |
|
285 |
" FAILED" |
|
286 |
if self.exception_total > 0: |
|
287 |
test_number_text += " So far " + str(self.exception_total) + \ |
|
288 |
" ERRONEOUS" |
|
289 |
||
290 |
print test_number_text |
|
291 |
||
3 | 292 |
test_object = test.run() |
293 |
||
294 |
end_time = datetime.datetime.now() |
|
295 |
||
296 |
# Add leading 0s |
|
297 |
test_object.id = raptor_tests.fix_id(test_object.id) |
|
298 |
||
299 |
# No millisecond function, so need to use microseconds/1000 |
|
300 |
start_milliseconds = start_time.microsecond |
|
301 |
end_milliseconds = end_time.microsecond |
|
302 |
||
303 |
# Add trailing 0's if required |
|
304 |
start_milliseconds = \ |
|
305 |
format_milliseconds(start_milliseconds) |
|
306 |
end_milliseconds = \ |
|
307 |
format_milliseconds(end_milliseconds) |
|
308 |
||
309 |
self.start_times[test_object.id] = \ |
|
310 |
start_time.strftime("%H:%M:%S:" + |
|
311 |
str(start_milliseconds)) |
|
312 |
self.end_times[test_object.id] = \ |
|
313 |
end_time.strftime("%H:%M:%S:" + \ |
|
314 |
str(end_milliseconds)) |
|
315 |
||
316 |
run_time = (end_time - start_time) |
|
317 |
||
318 |
run_time_seconds = (str(run_time.seconds) + "." + \ |
|
319 |
str(format_milliseconds(run_time.microseconds))) |
|
320 |
print ("RunTime: " + run_time_seconds + "s") |
|
321 |
# Add to pass/fail count and save result to dictionary |
|
322 |
if test_object.result == raptor_tests.SmokeTest.PASS: |
|
323 |
self.pass_total += 1 |
|
324 |
self.results[test_object.id] = "Passed" |
|
325 |
elif test_object.result == raptor_tests.SmokeTest.FAIL: |
|
326 |
self.fail_total += 1 |
|
327 |
self.results[test_object.id] = "Failed" |
|
328 |
self.failed_tests.append(test_object.name) |
|
329 |
elif test_object.result == raptor_tests.SmokeTest.SKIP: |
|
330 |
self.skip_total += 1 |
|
331 |
# Clean epocroot after running each test if --clean option is specified |
|
332 |
if options.clean: |
|
333 |
print "\nCLEANING TEST RESULTS..." |
|
334 |
raptor_tests.clean_epocroot() |
|
335 |
||
336 |
except: |
|
337 |
print "\nTEST ERROR:" |
|
338 |
print (sys.exc_type.__name__ + ":"), \ |
|
339 |
sys.exc_value, "\n", \ |
|
340 |
traceback.print_tb(sys.exc_traceback) |
|
341 |
self.exception_total += 1 |
|
342 |
self.error_tests.append(str(self.test_set[test_number - 1])) |
|
9 | 343 |
|
3 | 344 |
|
345 |
if self.upload_location != None: |
|
346 |
self.create_csv() |
|
347 |
||
348 |
end_time_stamp = datetime.datetime.now() |
|
349 |
||
350 |
runtime = end_time_stamp - self.time_stamp |
|
351 |
seconds = (str(runtime.seconds) + "." + \ |
|
352 |
str(format_milliseconds(runtime.microseconds))) |
|
353 |
if options.upload: |
|
354 |
self.create_tri(seconds) |
|
355 |
||
356 |
print ("\n" + str(self.suite_dir) + " RunTime: " + seconds + "s") |
|
357 |
||
358 |
def create_csv(self): |
|
359 |
""" |
|
360 |
This method will create a CSV file with the smoke test's output |
|
361 |
in order to successfully upload results to TMS QC |
|
362 |
""" |
|
363 |
||
364 |
# This sorts the dictionaries by their key values (Test IDs) |
|
365 |
id_list = run_tests.sort_dict(self.results) |
|
366 |
||
367 |
self.test_file_name = (self.suite_dir + "_" + \ |
|
368 |
self.time_stamp.strftime("%Y-%m-%d_%H-%M-%S") + "_" + |
|
369 |
branch + "_results.csv") |
|
370 |
# This is the path for file-creation on the server. Includes |
|
371 |
self.test_path = (self.upload_location + "/csv/" + self.suite_dir + "/" |
|
372 |
+ self.test_file_name) |
|
373 |
||
374 |
try: |
|
375 |
||
376 |
if not os.path.isdir(self.upload_location + "/csv/" + |
|
377 |
self.suite_dir): |
|
378 |
os.makedirs(self.upload_location + "/csv/" + self.suite_dir) |
|
379 |
||
380 |
csv_file = \ |
|
381 |
open(raptor_tests.ReplaceEnvs(os.path.normpath(self.test_path)), |
|
382 |
"w") |
|
383 |
csv_file.write("TestCaseID,StartTime,EndTime,Result\n") |
|
384 |
||
385 |
for test_id in id_list: |
|
386 |
csv_file.write("PCT-SBSV2-" + self.suite_dir + "-" + test_id + \ |
|
387 |
"," + str(self.start_times[test_id]) + "," + \ |
|
388 |
str(self.end_times[test_id]) + "," + \ |
|
389 |
self.results[test_id] + "\n") |
|
390 |
csv_file.close() |
|
391 |
||
392 |
except OSError, e: |
|
393 |
print "SBS_TESTS: Error:", e |
|
394 |
||
395 |
||
396 |
def create_tri(self, overall_seconds): |
|
397 |
""" |
|
398 |
This method will create a TRI (xml) file containing the location of the |
|
399 |
CSV file in order to successfully upload results to TMS QC |
|
400 |
""" |
|
401 |
# Path for the tri file |
|
402 |
tri_path = (self.upload_location + "/new/" + self.suite_dir + \ |
|
403 |
"_" + self.time_stamp.strftime("%Y-%m-%d_%H-%M-%S") + ".xml") |
|
404 |
run_name_timestamp = self.time_stamp.strftime(self.suite_dir + \ |
|
405 |
"%Y-%m-%d_%H-%M-%S") |
|
406 |
date_time_timestamp = self.time_stamp.strftime("%d.%m.%Y %H:%M:%S") |
|
407 |
test_set_name = "Root\\Product Creation Tools\\Regression\\" + \ |
|
408 |
"SBS v2 (Raptor)\\" + self.suite_dir + "_" |
|
409 |
if sys.platform.startswith("win"): |
|
410 |
test_set_name += ("WinXP_" + branch) |
|
411 |
else: |
|
412 |
test_set_name += ("Linux_" + branch) |
|
413 |
||
414 |
# /mnt/ -> // Fixes the difference in paths for lon-rhdev mounts vs. win |
|
415 |
if not sys.platform.startswith("win"): |
|
416 |
if self.test_path.startswith("/mnt/"): |
|
417 |
self.test_path = self.test_path.replace("mnt", "", 1) |
|
418 |
||
419 |
try: |
|
420 |
tri_file = \ |
|
421 |
open(raptor_tests.ReplaceEnvs(os.path.normpath(tri_path)), \ |
|
422 |
"w") |
|
423 |
tri_file.write( |
|
424 |
"<TestRunInfo>\n" + \ |
|
425 |
"\t<RunName>\n\t\t" + \ |
|
426 |
run_name_timestamp + \ |
|
427 |
"\n\t</RunName>\n" + \ |
|
428 |
"\t<TestGroup>\n" + \ |
|
429 |
"\t\tSBSv2 (Non-SITK)\n" + \ |
|
430 |
"\t</TestGroup>\n" + \ |
|
431 |
"\t<DateTime>\n\t\t" + \ |
|
432 |
date_time_timestamp + \ |
|
433 |
"\n\t</DateTime>\n" + \ |
|
434 |
"\t<RunDuration>\n\t\t" + \ |
|
435 |
overall_seconds + \ |
|
436 |
"\n\t</RunDuration>\n" + \ |
|
437 |
'\t<TestSet name="' + test_set_name + '">\n' + \ |
|
438 |
"\t\t<TestResults>\n\t\t\t" + \ |
|
439 |
self.test_path + \ |
|
440 |
"\n\t\t</TestResults>\n" + \ |
|
441 |
"\t</TestSet>\n" + \ |
|
442 |
"</TestRunInfo>") |
|
443 |
tri_file.close() |
|
444 |
print "Tests uploaded to '" + self.upload_location + "' (" + \ |
|
445 |
branch + ")" |
|
446 |
except OSError, e: |
|
447 |
print "SBS_TESTS: Error:", e |
|
448 |
||
449 |
class SuiteRun(TestRun): |
|
450 |
""" Represents a 'run' of a number of test suites """ |
|
451 |
||
452 |
def __init__(self, suitepattern = None, testpattern = None, |
|
453 |
upload_location = None): |
|
454 |
TestRun.__init__(self) |
|
455 |
||
456 |
# Add common directory to list of paths to search for modules |
|
457 |
sys.path.append(raptor_tests.ReplaceEnvs("$(SBS_HOME)/test/common")) |
|
458 |
||
459 |
||
460 |
if suitepattern: |
|
461 |
self.suite_regex = re.compile(".*" + suitepattern + ".*", re.I) |
|
462 |
else: |
|
463 |
self.suite_regex = re.compile(".*\_suite$", re.I) |
|
464 |
||
465 |
if testpattern: |
|
466 |
self.test_file_regex = re.compile(".*" + testpattern + ".*", |
|
467 |
re.I) |
|
468 |
else: |
|
469 |
self.test_file_regex = None |
|
470 |
||
471 |
self.suitepattern = suitepattern |
|
472 |
self.testpattern = testpattern |
|
473 |
self.upload_location = upload_location |
|
474 |
||
475 |
||
476 |
||
477 |
def run_tests(self): |
|
478 |
""" |
|
479 |
Run all the tests in the specified suite (directory) |
|
480 |
""" |
|
481 |
||
482 |
suites = [] |
|
483 |
for dir in os.listdir("."): |
|
484 |
name_match = self.suite_regex.match(dir) |
|
485 |
# Each folder that matches the suite pattern will be looked into |
|
486 |
# Also checks to make sure the found entry is actually a directory |
|
487 |
if name_match is not None and os.path.isdir(dir): |
|
488 |
s = Suite(dir, self) |
|
489 |
s.run() |
|
490 |
self.aggregate(s) |
|
491 |
suites.append(dir) |
|
492 |
||
493 |
# Print which options were used |
|
494 |
if options.test_home == None: |
|
495 |
options_dir = "defaults)" |
|
496 |
else: |
|
497 |
options_dir = "'" + options.test_home + "' options file)" |
|
498 |
print "\n(Tests run using %s" %options_dir |
|
499 |
||
500 |
# Summarise the entire test run |
|
9 | 501 |
if self.suitepattern and (len(suites) < 1): |
3 | 502 |
print "\nNo suites matched specification '" + self.suitepattern + \ |
503 |
"'\n" |
|
504 |
else: |
|
505 |
print "Overall summary (%d suites, %d tests):" \ |
|
506 |
%(len(suites), self.test_total) |
|
507 |
self.show() |
|
508 |
self.what_failed() |
|
509 |
||
510 |
||
511 |
def sort_dict(self, input_dict): |
|
512 |
""" |
|
513 |
This method sorts values in a dictionary |
|
514 |
""" |
|
515 |
keys = input_dict.keys() |
|
516 |
keys.sort() |
|
517 |
return keys |
|
518 |
||
519 |
||
135
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
520 |
# Make SBS_HOME, EPOCROOT have uppercase drive letters to match os.getcwd() and |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
521 |
# thus stop all those insane test problems which result from one being uppercase |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
522 |
# and the other lowercase |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
523 |
|
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
524 |
if sys.platform.startswith("win"): |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
525 |
sh = os.environ['SBS_HOME'] |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
526 |
if sh[1] == ':': |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
527 |
os.environ['SBS_HOME'] = sh[0].upper() + sh[1:] |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
528 |
er = os.environ['EPOCROOT'] |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
529 |
if er[1] == ':': |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
530 |
os.environ['EPOCROOT'] = er[0].upper() + er[1:] |
0092642f198e
Elimitate those testing messups that result from the case of the drive letter in SBS_HOME or EPOCROOT
raptorbot <raptorbot@systemstesthead.symbian.intra>
parents:
9
diff
changeset
|
531 |
|
3 | 532 |
# Clean epocroot before running tests |
533 |
raptor_tests.clean_epocroot() |
|
534 |
run_tests = SuiteRun(suitepattern = options.suite, testpattern = options.tests, |
|
535 |
upload_location = options.upload) |
|
536 |
run_tests.run_tests() |
|
537 |