|
1 #!/usr/bin/env python |
|
2 # -*- coding: utf-8 -*- |
|
3 |
|
4 #Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies) |
|
5 |
|
6 #This library is free software; you can redistribute it and/or |
|
7 #modify it under the terms of the GNU Library General Public |
|
8 #License as published by the Free Software Foundation; either |
|
9 #version 2 of the License, or (at your option) any later version. |
|
10 |
|
11 #This library is distributed in the hope that it will be useful, |
|
12 #but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 #Library General Public License for more details. |
|
15 |
|
16 #You should have received a copy of the GNU Library General Public License |
|
17 #along with this library; see the file COPYING.LIB. If not, write to |
|
18 #the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, |
|
19 #Boston, MA 02110-1301, USA. |
|
20 |
|
21 from __future__ import with_statement |
|
22 |
|
23 import sys |
|
24 import os |
|
25 import os.path |
|
26 import re |
|
27 import logging |
|
28 from subprocess import Popen, PIPE, STDOUT |
|
29 from optparse import OptionParser |
|
30 |
|
31 |
|
32 class Log(object): |
|
33 def __init__(self, name): |
|
34 self._log = logging.getLogger(name) |
|
35 self.debug = self._log.debug |
|
36 self.warn = self._log.warn |
|
37 self.error = self._log.error |
|
38 self.exception = self._log.exception |
|
39 self.info = self._log.info |
|
40 |
|
41 |
|
42 class Options(Log): |
|
43 """ Option manager. It parses and checks script's parameters, sets an internal variable. """ |
|
44 |
|
45 def __init__(self, args): |
|
46 Log.__init__(self, "Options") |
|
47 log = self._log |
|
48 opt = OptionParser("%prog [options] PathToSearch.\nTry -h or --help.") |
|
49 opt.add_option("-j", "--parallel-level", action="store", type="int", |
|
50 dest="parallel_level", default=None, |
|
51 help="Number of parallel processes executing the Qt's tests. Default: cpu count.") |
|
52 opt.add_option("-v", "--verbose", action="store", type="int", |
|
53 dest="verbose", default=2, |
|
54 help="Verbose level (0 - quiet, 1 - errors only, 2 - infos and warnings, 3 - debug information). Default: %default.") |
|
55 opt.add_option("", "--tests-options", action="store", type="string", |
|
56 dest="tests_options", default="", |
|
57 help="Parameters passed to Qt's tests (for example '-eventdelay 123').") |
|
58 opt.add_option("-o", "--output-file", action="store", type="string", |
|
59 dest="output_file", default="/tmp/qtwebkittests.html", |
|
60 help="File where results will be stored. The file will be overwritten. Default: %default.") |
|
61 opt.add_option("-b", "--browser", action="store", dest="browser", |
|
62 default="xdg-open", |
|
63 help="Browser in which results will be opened. Default %default.") |
|
64 opt.add_option("", "--do-not-open-results", action="store_false", |
|
65 dest="open_results", default=True, |
|
66 help="The results shouldn't pop-up in a browser automatically") |
|
67 opt.add_option("-d", "--developer-mode", action="store_true", |
|
68 dest="developer", default=False, |
|
69 help="Special mode for debugging. In general it simulates human behavior, running all autotests. In the mode everything is executed synchronously, no html output will be generated, no changes or transformation will be applied to stderr or stdout. In this mode options; parallel-level, output-file, browser and do-not-open-results will be ignored.") |
|
70 |
|
71 self._o, self._a = opt.parse_args(args) |
|
72 verbose = self._o.verbose |
|
73 if verbose == 0: |
|
74 logging.basicConfig(level=logging.CRITICAL,) |
|
75 elif verbose == 1: |
|
76 logging.basicConfig(level=logging.ERROR,) |
|
77 elif verbose == 2: |
|
78 logging.basicConfig(level=logging.INFO,) |
|
79 elif verbose == 3: |
|
80 logging.basicConfig(level=logging.DEBUG,) |
|
81 else: |
|
82 logging.basicConfig(level=logging.INFO,) |
|
83 log.warn("Bad verbose level, switching to default.") |
|
84 try: |
|
85 if not os.path.exists(self._a[0]): |
|
86 raise Exception("Given path doesn't exist.") |
|
87 if len(self._a) > 1: |
|
88 raise IndexError("Only one directory could be provided.") |
|
89 self._o.path = self._a[0] |
|
90 except IndexError: |
|
91 log.error("Bad usage. Please try -h or --help.") |
|
92 sys.exit(1) |
|
93 except Exception: |
|
94 log.error("Path '%s' doesn't exist", self._a[0]) |
|
95 sys.exit(2) |
|
96 if self._o.developer: |
|
97 if not self._o.parallel_level is None: |
|
98 log.warn("Developer mode sets parallel-level option to one.") |
|
99 self._o.parallel_level = 1 |
|
100 self._o.open_results = False |
|
101 |
|
102 def __getattr__(self, attr): |
|
103 """ Maps all options properties into this object (remove one level of indirection). """ |
|
104 return getattr(self._o, attr) |
|
105 |
|
106 |
|
107 def run_test(args): |
|
108 """ Runs one given test. |
|
109 args should contain a tuple with 3 elements; |
|
110 TestSuiteResult containing full file name of an autotest executable. |
|
111 str with options that should be passed to the autotest executable |
|
112 bool if true then the stdout will be buffered and separated from the stderr, if it is false |
|
113 then the stdout and the stderr will be merged together and left unbuffered (the TestSuiteResult output will be None). |
|
114 """ |
|
115 log = logging.getLogger("Exec") |
|
116 test_suite, options, buffered = args |
|
117 try: |
|
118 log.info("Running... %s", test_suite.test_file_name()) |
|
119 if buffered: |
|
120 tst = Popen(test_suite.test_file_name() + options, stdout=PIPE, stderr=None, shell=True) |
|
121 else: |
|
122 tst = Popen(test_suite.test_file_name() + options, stdout=None, stderr=STDOUT, shell=True) |
|
123 except OSError, e: |
|
124 log.exception("Can't open an autotest file: '%s'. Skipping the test...", e.filename) |
|
125 else: |
|
126 test_suite.set_output(tst.communicate()[0]) # takes stdout only, in developer mode it would be None. |
|
127 log.info("Finished %s", test_suite.test_file_name()) |
|
128 return test_suite |
|
129 |
|
130 |
|
131 class TestSuiteResult(object): |
|
132 """ Keeps information about a test. """ |
|
133 |
|
134 def __init__(self): |
|
135 self._output = None |
|
136 self._test_file_name = None |
|
137 |
|
138 def set_output(self, xml): |
|
139 if xml: |
|
140 self._output = xml.strip() |
|
141 |
|
142 def output(self): |
|
143 return self._output |
|
144 |
|
145 def set_test_file_name(self, file_name): |
|
146 self._test_file_name = file_name |
|
147 |
|
148 def test_file_name(self): |
|
149 return self._test_file_name |
|
150 |
|
151 |
|
152 class Main(Log): |
|
153 """ The main script. All real work is done in run() method. """ |
|
154 |
|
155 def __init__(self, options): |
|
156 Log.__init__(self, "Main") |
|
157 self._options = options |
|
158 if options.parallel_level > 1 or options.parallel_level is None: |
|
159 try: |
|
160 from multiprocessing import Pool |
|
161 except ImportError: |
|
162 self.warn("Import Error: the multiprocessing module couldn't be loaded (may be lack of python-multiprocessing package?). The Qt autotests will be executed one by one.") |
|
163 options.parallel_level = 1 |
|
164 if options.parallel_level == 1: |
|
165 |
|
166 class Pool(object): |
|
167 """ A hack, created to avoid problems with multiprocessing module, this class is single thread replacement for the multiprocessing.Pool class. """ |
|
168 def __init__(self, processes): |
|
169 pass |
|
170 |
|
171 def imap_unordered(self, func, files): |
|
172 return map(func, files) |
|
173 |
|
174 def map(self, func, files): |
|
175 return map(func, files) |
|
176 |
|
177 self._Pool = Pool |
|
178 |
|
179 def run(self): |
|
180 """ Find && execute && publish results of all test. "All in one" function. """ |
|
181 self.debug("Searching executables...") |
|
182 tests_executables = self.find_tests_paths(self._options.path) |
|
183 self.debug("Found: %s", len(tests_executables)) |
|
184 self.debug("Executing tests...") |
|
185 results = self.run_tests(tests_executables) |
|
186 if not self._options.developer: |
|
187 self.debug("Transforming...") |
|
188 transformed_results = self.transform(results) |
|
189 self.debug("Publishing...") |
|
190 self.announce_results(transformed_results) |
|
191 |
|
192 def find_tests_paths(self, path): |
|
193 """ Finds all tests executables inside the given path. """ |
|
194 executables = [] |
|
195 for root, dirs, files in os.walk(path): |
|
196 # Check only for a file that name starts from 'tst_' and that we can execute. |
|
197 filtered_path = filter(lambda w: w.startswith('tst_') and os.access(os.path.join(root, w), os.X_OK), files) |
|
198 filtered_path = map(lambda w: os.path.join(root, w), filtered_path) |
|
199 for file_name in filtered_path: |
|
200 r = TestSuiteResult() |
|
201 r.set_test_file_name(file_name) |
|
202 executables.append(r) |
|
203 return executables |
|
204 |
|
205 def run_tests(self, files): |
|
206 """ Executes given files by using a pool of workers. """ |
|
207 workers = self._Pool(processes=self._options.parallel_level) |
|
208 # to each file add options. |
|
209 self.debug("Using %s the workers pool, number of workers %i", repr(workers), self._options.parallel_level) |
|
210 package = map(lambda w: [w, self._options.tests_options, not self._options.developer], files) |
|
211 self.debug("Generated packages for workers: %s", repr(package)) |
|
212 results = workers.map(run_test, package) # Collects results. |
|
213 return results |
|
214 |
|
215 def transform(self, results): |
|
216 """ Transforms list of the results to specialized versions. """ |
|
217 stdout = self.convert_to_stdout(results) |
|
218 html = self.convert_to_html(results) |
|
219 return {"stdout": stdout, "html": html} |
|
220 |
|
221 def announce_results(self, results): |
|
222 """ Shows the results. """ |
|
223 self.announce_results_stdout(results['stdout']) |
|
224 self.announce_results_html(results['html']) |
|
225 |
|
226 def announce_results_stdout(self, results): |
|
227 """ Show the results by printing to the stdout.""" |
|
228 print(results) |
|
229 |
|
230 def announce_results_html(self, results): |
|
231 """ Shows the result by creating a html file and calling a web browser to render it. """ |
|
232 with file(self._options.output_file, 'w') as f: |
|
233 f.write(results) |
|
234 if self._options.open_results: |
|
235 Popen(self._options.browser + " " + self._options.output_file, stdout=None, stderr=None, shell=True) |
|
236 |
|
237 def convert_to_stdout(self, results): |
|
238 """ Converts results, that they could be nicely presented in the stdout. """ |
|
239 # Join all results into one piece. |
|
240 txt = "\n\n".join(map(lambda w: w.output(), results)) |
|
241 # Find total count of failed, skipped and passed tests. |
|
242 totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt) |
|
243 totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals) |
|
244 totals = map(str, totals) |
|
245 totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped" |
|
246 # Add a summary. |
|
247 txt += '\n\n\n' + '*' * 70 |
|
248 txt += "\n**" + ("TOTALS: " + totals).center(66) + '**' |
|
249 txt += '\n' + '*' * 70 + '\n' |
|
250 return txt |
|
251 |
|
252 def convert_to_html(self, results): |
|
253 """ Converts results, that they could showed as a html page. """ |
|
254 # Join results into one piece. |
|
255 txt = "\n\n".join(map(lambda w: w.output(), results)) |
|
256 txt = txt.replace('&', '&').replace('<', "<").replace('>', ">") |
|
257 # Add a color and a style. |
|
258 txt = re.sub(r"([* ]+(Finished)[ a-z_A-Z0-9]+[*]+)", |
|
259 lambda w: r"", |
|
260 txt) |
|
261 txt = re.sub(r"([*]+[ a-z_A-Z0-9]+[*]+)", |
|
262 lambda w: "<case class='good'><br><br><b>" + w.group(0) + r"</b></case>", |
|
263 txt) |
|
264 txt = re.sub(r"(Config: Using QTest library)((.)+)", |
|
265 lambda w: "\n<case class='good'><br><i>" + w.group(0) + r"</i> ", |
|
266 txt) |
|
267 txt = re.sub(r"\n(PASS)((.)+)", |
|
268 lambda w: "</case>\n<case class='good'><br><status class='pass'>" + w.group(1) + r"</status>" + w.group(2), |
|
269 txt) |
|
270 txt = re.sub(r"\n(FAIL!)((.)+)", |
|
271 lambda w: "</case>\n<case class='bad'><br><status class='fail'>" + w.group(1) + r"</status>" + w.group(2), |
|
272 txt) |
|
273 txt = re.sub(r"\n(XPASS)((.)+)", |
|
274 lambda w: "</case>\n<case class='bad'><br><status class='xpass'>" + w.group(1) + r"</status>" + w.group(2), |
|
275 txt) |
|
276 txt = re.sub(r"\n(XFAIL)((.)+)", |
|
277 lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2), |
|
278 txt) |
|
279 txt = re.sub(r"\n(SKIP)((.)+)", |
|
280 lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2), |
|
281 txt) |
|
282 txt = re.sub(r"\n(QWARN)((.)+)", |
|
283 lambda w: "</case>\n<case class='bad'><br><status class='warn'>" + w.group(1) + r"</status>" + w.group(2), |
|
284 txt) |
|
285 txt = re.sub(r"\n(RESULT)((.)+)", |
|
286 lambda w: "</case>\n<case class='good'><br><status class='benchmark'>" + w.group(1) + r"</status>" + w.group(2), |
|
287 txt) |
|
288 txt = re.sub(r"\n(QFATAL)((.)+)", |
|
289 lambda w: "</case>\n<case class='bad'><br><status class='crash'>" + w.group(1) + r"</status>" + w.group(2), |
|
290 txt) |
|
291 txt = re.sub(r"\n(Totals:)([0-9', a-z]*)", |
|
292 lambda w: "</case>\n<case class='good'><br><b>" + w.group(1) + r"</b>" + w.group(2) + "</case>", |
|
293 txt) |
|
294 # Find total count of failed, skipped and passed tests. |
|
295 totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt) |
|
296 totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals) |
|
297 totals = map(str, totals) |
|
298 totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped." |
|
299 # Create a header of the html source. |
|
300 txt = """ |
|
301 <html> |
|
302 <head> |
|
303 <script> |
|
304 function init() { |
|
305 // Try to find the right styleSheet (this document could be embedded in an other html doc) |
|
306 for (i = document.styleSheets.length - 1; i >= 0; --i) { |
|
307 if (document.styleSheets[i].cssRules[0].selectorText == "case.good") { |
|
308 resultStyleSheet = i; |
|
309 return; |
|
310 } |
|
311 } |
|
312 // The styleSheet hasn't been found, but it should be the last one. |
|
313 resultStyleSheet = document.styleSheets.length - 1; |
|
314 } |
|
315 |
|
316 function hide() { |
|
317 document.styleSheets[resultStyleSheet].cssRules[0].style.display='none'; |
|
318 } |
|
319 |
|
320 function show() { |
|
321 document.styleSheets[resultStyleSheet].cssRules[0].style.display=''; |
|
322 } |
|
323 |
|
324 </script> |
|
325 <style type="text/css"> |
|
326 case.good {color:black} |
|
327 case.bad {color:black} |
|
328 status.pass {color:green} |
|
329 status.crash {color:red} |
|
330 status.fail {color:red} |
|
331 status.xpass {color:663300} |
|
332 status.xfail {color:004500} |
|
333 status.benchmark {color:000088} |
|
334 status.warn {color:orange} |
|
335 status.crash {color:red; text-decoration:blink; background-color:black} |
|
336 </style> |
|
337 </head> |
|
338 <body onload="init()"> |
|
339 <center> |
|
340 <h1>Qt's autotests results</h1>%(totals)s<br> |
|
341 <hr> |
|
342 <form> |
|
343 <input type="button" value="Show failures only" onclick="hide()"/> |
|
344 |
|
345 <input type="button" value="Show all" onclick="show()"/> |
|
346 </form> |
|
347 </center> |
|
348 <hr> |
|
349 %(results)s |
|
350 </body> |
|
351 </html>""" % {"totals": totals, "results": txt} |
|
352 return txt |
|
353 |
|
354 |
|
355 if __name__ == '__main__': |
|
356 options = Options(sys.argv[1:]) |
|
357 main = Main(options) |
|
358 main.run() |