0
|
1 |
# Copyright (c) 2008 Nokia Corporation
|
|
2 |
#
|
|
3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
# you may not use this file except in compliance with the License.
|
|
5 |
# You may obtain a copy of the License at
|
|
6 |
#
|
|
7 |
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8 |
#
|
|
9 |
# Unless required by applicable law or agreed to in writing, software
|
|
10 |
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
# See the License for the specific language governing permissions and
|
|
13 |
# limitations under the License.
|
|
14 |
|
|
15 |
# This script parses the output of the regrtest.py script and
|
|
16 |
# transforms it into the XML format suitable for the CruiseControl web
|
|
17 |
# interface.
|
|
18 |
|
|
19 |
import re
|
|
20 |
import sys
|
|
21 |
import os
|
|
22 |
import pprint
|
|
23 |
|
|
24 |
expected_skip_list = ['test_aepack',
|
|
25 |
'test_al',
|
|
26 |
'test_applesingle',
|
|
27 |
'test_cd',
|
|
28 |
'test_cl',
|
|
29 |
'test_cmd_line',
|
|
30 |
'test_commands',
|
|
31 |
'test_crypt',
|
|
32 |
'test_ctypes',
|
|
33 |
'test_hotshot',
|
|
34 |
'test_plistlib',
|
|
35 |
'test_sundry',
|
|
36 |
'test_bsddb',
|
|
37 |
'test_bsddb185',
|
|
38 |
'test_bsddb3',
|
|
39 |
'test_bz2',
|
|
40 |
'test_dbm',
|
|
41 |
'test_gdbm',
|
|
42 |
'test_gl',
|
|
43 |
'test_imageop',
|
|
44 |
'test_rgbimg',
|
|
45 |
'test_audioop',
|
|
46 |
'test_gettext',
|
|
47 |
'test_curses',
|
|
48 |
'test_dl',
|
|
49 |
'test_fork1',
|
|
50 |
'test_grp',
|
|
51 |
'test_imgfile',
|
|
52 |
'test_ioctl',
|
|
53 |
'test_largefile',
|
|
54 |
'test_linuxaudiodev',
|
|
55 |
'test_macfs',
|
|
56 |
'test_macostools',
|
|
57 |
'test_macpath',
|
|
58 |
'test_mhlib',
|
|
59 |
'test_mmap',
|
|
60 |
'test_nis',
|
|
61 |
'test_openpty',
|
|
62 |
'test_ossaudiodev',
|
|
63 |
'test_pep277',
|
|
64 |
'test_poll',
|
|
65 |
'test_popen',
|
|
66 |
'test_popen2',
|
|
67 |
'test_pty',
|
|
68 |
'test_pwd',
|
|
69 |
'test_resource',
|
|
70 |
'test_scriptpackages',
|
|
71 |
'test_signal',
|
|
72 |
'test_startfile',
|
|
73 |
'test_sqlite',
|
|
74 |
'test_subprocess',
|
|
75 |
'test_sunaudiodev',
|
|
76 |
'test_tcl',
|
|
77 |
'test_threadsignals',
|
|
78 |
'test_wait3',
|
|
79 |
'test_wait4',
|
|
80 |
'test_winreg',
|
|
81 |
'test_winsound',
|
|
82 |
'test_zipfile64']
|
|
83 |
|
|
84 |
|
|
85 |
def replaceXMLentities(s):
|
|
86 |
s = s.replace('&', '&')
|
|
87 |
s = s.replace('<', '<')
|
|
88 |
s = s.replace('>', '>')
|
|
89 |
s = s.replace("'", ''')
|
|
90 |
s = s.replace('"', '"')
|
|
91 |
|
|
92 |
def replace_non_printable(obj):
|
|
93 |
if ord(obj.group(0)) > 31 and ord(obj.group(0)) < 126:
|
|
94 |
return obj.group(0)
|
|
95 |
else:
|
|
96 |
return '?'
|
|
97 |
return re.sub('[^ a-zA-Z0-9\n]', replace_non_printable, s)
|
|
98 |
|
|
99 |
# This regexp matches both the plain test start header "test_foo" and
|
|
100 |
# the "Re-running test 'test_foo' in verbose mode" header.
|
|
101 |
re_run = re.compile("^(Re-running test '|)(test_[^ \n]+)(' in verbose mode|)$")
|
|
102 |
# Matches the end of test outputs and beginning of reporting
|
|
103 |
re_finish = re.compile("^[0-9]+ tests (OK.|skipped:)$")
|
|
104 |
re_unexpected = re.compile("test (test_.*) produced unexpected output:(.*)")
|
|
105 |
re_skipped = re.compile("(test_.*) skipped --")
|
|
106 |
re_crashed = re.compile("test (test_.*) crashed --")
|
|
107 |
re_failed = re.compile("test (test_.*) failed --")
|
|
108 |
re_time = re.compile("Ran ([0-9]+) test[s]{0,1} in (.*)s")
|
|
109 |
re_unexp_skips = re.compile("(.*) skip[s]{0,1} unexpected on")
|
|
110 |
re_sis_build_time = re.compile("Sis build time :(.*)")
|
|
111 |
|
|
112 |
# Build information that has to be displayed on CC should be in the
|
|
113 |
# regrtest_emu log in this format
|
|
114 |
re_build_info = re.compile("Build Info -- Name : <(.*)>, Value : <(.*)>")
|
|
115 |
|
|
116 |
# A metric should be printed to regrtest log in the format :
|
|
117 |
# "Measurement -- Name : <>, Value : <>, Unit : <>, Threshold : <>,
|
|
118 |
# higher_is_better : <>"
|
|
119 |
# Note : All special characters in the above string are compulsary
|
|
120 |
# Threshold should always be a positive number
|
|
121 |
# higher_is_better should be either 'yes' or 'no'
|
|
122 |
re_measurement = re.compile("Measurement -- Name : <(.*)>, Value : <(.*)>," +
|
|
123 |
" Unit : <(.*)>, Threshold : <(.*)>, higher_is_better : <(.*)>")
|
|
124 |
|
|
125 |
new_state = old_state = {'passed': [],
|
|
126 |
'failed': [],
|
|
127 |
'skipped_expected': [],
|
|
128 |
'skipped_unexpected': []}
|
|
129 |
changeset = {}
|
|
130 |
results = {}
|
|
131 |
current_case = None
|
|
132 |
unexp_skips = None
|
|
133 |
unexp_skip_list = []
|
|
134 |
measurements = {}
|
|
135 |
sis_build_time = ''
|
|
136 |
dev_metrics_log = None
|
|
137 |
build_info = {}
|
|
138 |
|
|
139 |
# Remove 'regrtest_' & '_xxx.log' from the log name
|
|
140 |
# Some sample target names : aalto, emulator, merlin
|
|
141 |
target_name = sys.argv[1][9:-8]
|
|
142 |
|
|
143 |
# regrtest_xxx.log and the corresponding xml file is placed here.
|
|
144 |
base_dir = 'build\\test'
|
|
145 |
|
|
146 |
log_directory = 'C:\\Program Files\\CruiseControl\\logs'
|
|
147 |
if not os.path.exists(log_directory):
|
|
148 |
os.makedirs(log_directory)
|
|
149 |
dev_metrics_log_file = os.path.join(log_directory, 'ngopy_metrics.log')
|
|
150 |
|
|
151 |
# sys.argv[1] is the log filename passed when calling this script
|
|
152 |
# Eg: regrtest_aino_3_1.log, regrtest_merlin_3_2.log
|
|
153 |
regrtest_log = open(os.path.join(base_dir, sys.argv[1]), 'rt')
|
|
154 |
# XML filenames will be -- aalto_resuls.xml, merlin_results.xml etc.
|
|
155 |
regrtest_xml = open(os.path.join(base_dir, target_name + '_results.xml'), 'w')
|
|
156 |
|
|
157 |
# state_file contains the state (passed/failed/skipped) of each test case
|
|
158 |
# in the previous build
|
|
159 |
try:
|
|
160 |
state_file = open(os.path.join(log_directory, 'state_' +
|
|
161 |
target_name + '.txt'), 'rt')
|
|
162 |
except IOError:
|
|
163 |
state_file = None
|
|
164 |
|
|
165 |
# Logging of metrics not required for emulator and linux builds
|
|
166 |
if target_name not in ['linux', 'emulator']:
|
|
167 |
dev_metrics_log = open(dev_metrics_log_file, 'a+')
|
|
168 |
|
|
169 |
# First split output file based on the case names
|
|
170 |
for line in regrtest_log:
|
|
171 |
if re_finish.match(line):
|
|
172 |
current_case=None
|
|
173 |
continue
|
|
174 |
m = re_run.match(line)
|
|
175 |
if m:
|
|
176 |
# beginning of processing for new case
|
|
177 |
case_name = m.group(2)
|
|
178 |
assert case_name
|
|
179 |
if case_name != 'test_pystone':
|
|
180 |
current_case = {'time': "1", 'output': []}
|
|
181 |
results[case_name] = current_case
|
|
182 |
else:
|
|
183 |
x1 = re_time.match(line)
|
|
184 |
if x1:
|
|
185 |
current_case['time'] = x1.group(2)
|
|
186 |
if current_case:
|
|
187 |
current_case['output'].append(line)
|
|
188 |
|
|
189 |
sis_build_time_match = re_sis_build_time.match(line)
|
|
190 |
if sis_build_time_match:
|
|
191 |
sis_build_time = sis_build_time_match.group(1)
|
|
192 |
continue
|
|
193 |
|
|
194 |
re_build_info_match = re_build_info.match(line)
|
|
195 |
if re_build_info_match:
|
|
196 |
build_info[re_build_info_match.group(1)] = \
|
|
197 |
re_build_info_match.group(2)
|
|
198 |
continue
|
|
199 |
|
|
200 |
re_measurement_match = re_measurement.match(line)
|
|
201 |
if re_measurement_match:
|
|
202 |
measurements[re_measurement_match.group(1)] = {}
|
|
203 |
measurements[re_measurement_match.group(1)]['value'] = \
|
|
204 |
float(re_measurement_match.group(2))
|
|
205 |
measurements[re_measurement_match.group(1)]['unit'] = \
|
|
206 |
re_measurement_match.group(3)
|
|
207 |
measurements[re_measurement_match.group(1)]['threshold'] = \
|
|
208 |
float(re_measurement_match.group(4))
|
|
209 |
measurements[re_measurement_match.group(1)]['higher_is_better'] = \
|
|
210 |
re_measurement_match.group(5)
|
|
211 |
measurements[re_measurement_match.group(1)]['direction'] = 'Neutral'
|
|
212 |
measurements[re_measurement_match.group(1)]['delta'] = 0
|
|
213 |
continue
|
|
214 |
# For linux we believe regrtest, but for emu and devices we decide what is
|
|
215 |
# expected and unexpected skip (refer : expected_skip_list)
|
|
216 |
if target_name == 'linux':
|
|
217 |
if unexp_skips:
|
|
218 |
unexp_skip_list.extend(line.strip().rsplit(' '))
|
|
219 |
unexp_skips=None
|
|
220 |
unexp_skips = re_unexp_skips.match(line)
|
|
221 |
regrtest_log.close()
|
|
222 |
|
|
223 |
# Analyze further to determine result from the test
|
|
224 |
count_unexpected = 0
|
|
225 |
count_skipped = 0
|
|
226 |
count_crashed = 0
|
|
227 |
count_failed = 0
|
|
228 |
count_passed = 0
|
|
229 |
count_skipped_unexpected = 0
|
|
230 |
count_skipped_expected = 0
|
|
231 |
|
|
232 |
for case_name, result in results.items():
|
|
233 |
out = ''.join(result['output'])
|
|
234 |
result['output'] = out
|
|
235 |
# if others don't match, the case is assumed to be passed
|
|
236 |
if re_unexpected.search(out):
|
|
237 |
result['state'] = 'unexpected_output'
|
|
238 |
count_unexpected += 1
|
|
239 |
elif re_skipped.search(out):
|
|
240 |
result['state'] = 'skipped'
|
|
241 |
count_skipped += 1
|
|
242 |
elif re_crashed.search(out):
|
|
243 |
result['state'] = 'crashed'
|
|
244 |
count_crashed += 1
|
|
245 |
elif re_failed.search(out):
|
|
246 |
result['state'] = 'failed'
|
|
247 |
count_failed += 1
|
|
248 |
else:
|
|
249 |
result['state'] = 'passed'
|
|
250 |
count_passed += 1
|
|
251 |
|
|
252 |
# Report results
|
|
253 |
print "Full results:"
|
|
254 |
pprint.pprint(results)
|
|
255 |
states = set(results[x]['state'] for x in results)
|
|
256 |
print "Summary:"
|
|
257 |
for state in states:
|
|
258 |
cases = [x for x in results if results[x]['state'] == state]
|
|
259 |
print "%d %s: %s" % (len(cases), state, ' '.join(cases))
|
|
260 |
|
|
261 |
total_cases = count_unexpected + count_skipped + count_crashed + \
|
|
262 |
count_failed + count_passed
|
|
263 |
failed_testcases = count_unexpected + count_crashed + count_failed
|
|
264 |
|
|
265 |
testcase_metric_names = ['Number of Failed Test Cases',
|
|
266 |
'Number of Skipped Test Cases - Unexpected',
|
|
267 |
'Number of Successful Test Cases',
|
|
268 |
'Number of Skipped Test Cases - Expected']
|
|
269 |
|
|
270 |
# Initialize the 'measurements' dictionary related data for test case counts
|
|
271 |
# and then dump values. skipped_expected_count and skipped_unexpected_count
|
|
272 |
# are assigned later as our expected and unexpected list is not the same as the
|
|
273 |
# one calculated by regrtest in the log.
|
|
274 |
for item in testcase_metric_names:
|
|
275 |
measurements[item] = {}
|
|
276 |
measurements[item]['value'] = 0
|
|
277 |
measurements[item]['threshold'] = 1
|
|
278 |
if item == 'Number of Successful Test Cases':
|
|
279 |
measurements[item]['higher_is_better'] = 'yes'
|
|
280 |
else:
|
|
281 |
measurements[item]['higher_is_better'] = 'no'
|
|
282 |
measurements[item]['delta'] = 0
|
|
283 |
measurements[item]['direction'] = 'Neutral'
|
|
284 |
measurements[item]['unit'] = ''
|
|
285 |
|
|
286 |
measurements['Number of Failed Test Cases']['value'] = failed_testcases
|
|
287 |
measurements['Number of Successful Test Cases']['value'] = count_passed
|
|
288 |
|
|
289 |
regrtest_xml.write('<xml>\n')
|
|
290 |
regrtest_xml.write('<testsuites>\n')
|
|
291 |
regrtest_xml.write(' <testsuite_%(target)s name="testcases_%(target)s"\
|
|
292 |
tests="%(total_cases)s" time="%(time)s" >\n' % {'target': target_name,
|
|
293 |
'total_cases': total_cases, 'time': sis_build_time})
|
|
294 |
|
|
295 |
if state_file:
|
|
296 |
old_state = eval(state_file.read())
|
|
297 |
state_file.close()
|
|
298 |
|
|
299 |
|
|
300 |
def check_state_change(testcase, state):
|
|
301 |
if testcase not in old_state[state]:
|
|
302 |
return 'yes'
|
|
303 |
else:
|
|
304 |
return 'no'
|
|
305 |
|
|
306 |
# Each testcase has a 'new' attribute to indicate if it moved to this
|
|
307 |
# state in this build
|
|
308 |
for i in sorted(results.keys()):
|
|
309 |
if ((results[i]['state'] == 'failed') or
|
|
310 |
(results[i]['state'] == 'unexpected') or
|
|
311 |
(results[i]['state'] == 'crashed')):
|
|
312 |
state_changed = check_state_change(i, 'failed')
|
|
313 |
regrtest_xml.write(' <testcase name="' + i + '" time="1" ' +
|
|
314 |
'new="' + state_changed + '"> <failure>' +
|
|
315 |
replaceXMLentities(results[i]['output']) +
|
|
316 |
'</failure></testcase>\n')
|
|
317 |
new_state['failed'].append(i)
|
|
318 |
|
|
319 |
for i in sorted(results.keys()):
|
|
320 |
if (results[i]['state'] == 'skipped'):
|
|
321 |
if i not in expected_skip_list and target_name != 'linux':
|
|
322 |
unexp_skip_list.append(i)
|
|
323 |
if i in unexp_skip_list:
|
|
324 |
state_changed = check_state_change(i, 'skipped_unexpected')
|
|
325 |
regrtest_xml.write(' <testcase name="' + i + '" time="1" ' +
|
|
326 |
'new="' + state_changed + '"> <skipped_unexpected>' +
|
|
327 |
replaceXMLentities(results[i]['output']) +
|
|
328 |
'</skipped_unexpected></testcase>\n')
|
|
329 |
new_state['skipped_unexpected'].append(i)
|
|
330 |
count_skipped_unexpected += 1
|
|
331 |
|
|
332 |
for i in sorted(results.keys()):
|
|
333 |
if (results[i]['state'] == 'passed'):
|
|
334 |
state_changed = check_state_change(i, 'passed')
|
|
335 |
regrtest_xml.write(' <testcase name="' + i + '" time="1" ' +
|
|
336 |
'new="' + state_changed + '"> <success>' +
|
|
337 |
replaceXMLentities(results[i]['output']) +
|
|
338 |
'</success></testcase>\n')
|
|
339 |
new_state['passed'].append(i)
|
|
340 |
|
|
341 |
for i in sorted(results.keys()):
|
|
342 |
if (results[i]['state'] == 'skipped'):
|
|
343 |
if i not in unexp_skip_list:
|
|
344 |
state_changed = check_state_change(i, 'skipped_expected')
|
|
345 |
regrtest_xml.write(' <testcase name="' + i + '" time="1" ' +
|
|
346 |
'new="' + state_changed + '"> <skipped_expected>' +
|
|
347 |
replaceXMLentities(results[i]['output']) +
|
|
348 |
'</skipped_expected></testcase>\n')
|
|
349 |
new_state['skipped_expected'].append(i)
|
|
350 |
count_skipped_expected += 1
|
|
351 |
|
|
352 |
measurements['Number of Skipped Test Cases - Expected']['value'] = \
|
|
353 |
count_skipped_expected
|
|
354 |
measurements['Number of Skipped Test Cases - Unexpected']['value'] = \
|
|
355 |
count_skipped_unexpected
|
|
356 |
|
|
357 |
# 'measurements' dictionary's 'delta' will contain the difference in metrics
|
|
358 |
# w.r.t the previous run. 'delta' can be positive or negative
|
|
359 |
for item in measurements.keys():
|
|
360 |
new_state[item] = measurements[item]['value']
|
|
361 |
if item in old_state:
|
|
362 |
measurements[item]['delta'] = new_state[item] - old_state[item]
|
|
363 |
|
|
364 |
# Identify the test cases which have changed states (Eg. Failed->Passed )
|
|
365 |
for state in ['failed', 'passed', 'skipped_expected', 'skipped_unexpected']:
|
|
366 |
s1 = set(new_state[state])
|
|
367 |
s2 = set(old_state[state])
|
|
368 |
s1.difference_update(s2)
|
|
369 |
changeset[state] = list(s1)
|
|
370 |
|
|
371 |
# Adding the test cases that have changed states as a measurement and also in
|
|
372 |
# state_changes tag. 'threshold' and 'direction' are omitted as color coding
|
|
373 |
# in CC is not necessary.
|
|
374 |
regrtest_xml.write(' <state_changes>\n')
|
|
375 |
for state in ['failed', 'passed', 'skipped_expected', 'skipped_unexpected']:
|
|
376 |
regrtest_xml.write(' <' + state + ' count="' +
|
|
377 |
str(len(changeset[state])) + '">\n')
|
|
378 |
regrtest_xml.write(repr(changeset[state]))
|
|
379 |
regrtest_xml.write(' </' + state + '>\n')
|
|
380 |
if changeset[state]:
|
|
381 |
measurements[state.capitalize() + ' - New'] = {}
|
|
382 |
measurements[state.capitalize() + ' - New']['value'] = \
|
|
383 |
repr(changeset[state])
|
|
384 |
measurements[state.capitalize() + ' - New']['delta'] = 0
|
|
385 |
measurements[state.capitalize() + ' - New']['unit'] = ''
|
|
386 |
regrtest_xml.write(' </state_changes>\n')
|
|
387 |
|
|
388 |
regrtest_xml.write(' <measurements>\n')
|
|
389 |
# direction - dictates the color coding in CruiseControl. it will be set only
|
|
390 |
# when the delta crosses threshold. direction can be 'Good', 'Bad', 'Neutral'
|
|
391 |
for item in measurements.keys():
|
|
392 |
if measurements[item]['delta'] == 0:
|
|
393 |
measurements[item]['direction'] = 'Neutral'
|
|
394 |
elif measurements[item]['higher_is_better'] == 'yes':
|
|
395 |
if measurements[item]['delta'] >= measurements[item]['threshold']:
|
|
396 |
measurements[item]['direction'] = 'Good'
|
|
397 |
elif measurements[item]['delta'] <= -(measurements[item]['threshold']):
|
|
398 |
measurements[item]['direction'] = 'Bad'
|
|
399 |
else:
|
|
400 |
if measurements[item]['delta'] >= measurements[item]['threshold']:
|
|
401 |
measurements[item]['direction'] = 'Bad'
|
|
402 |
elif measurements[item]['delta'] <= -(measurements[item]['threshold']):
|
|
403 |
measurements[item]['direction'] = 'Good'
|
|
404 |
|
|
405 |
|
|
406 |
def write_measurement(item, log_metrics=False):
|
|
407 |
regrtest_xml.write(' <measurement>\n')
|
|
408 |
regrtest_xml.write(' <name>' + item + '</name>\n')
|
|
409 |
regrtest_xml.write(' <value>' +
|
|
410 |
str(measurements[item]['value']) + ' ' +
|
|
411 |
str(measurements[item]['unit']) + '</value>\n')
|
|
412 |
regrtest_xml.write(' <direction>' +
|
|
413 |
measurements[item]['direction'] + '</direction>\n')
|
|
414 |
if measurements[item]['delta']:
|
|
415 |
regrtest_xml.write(' <delta>' +
|
|
416 |
str(measurements[item]['delta']) + '</delta>\n')
|
|
417 |
regrtest_xml.write(' </measurement>\n')
|
|
418 |
|
|
419 |
# Update the device specific log file, which is used to draw metrics graph
|
|
420 |
if dev_metrics_log and log_metrics:
|
|
421 |
dev_metrics_log.write(',%s=%s' %
|
|
422 |
(item, str(measurements[item]['value'])))
|
|
423 |
|
|
424 |
# Write the testcase related metrics first and then the memory/time benchmark
|
|
425 |
# related data so that CC also displays in that order
|
|
426 |
s = set(measurements.keys())
|
|
427 |
for item in testcase_metric_names:
|
|
428 |
write_measurement(item)
|
|
429 |
s.difference_update(set(testcase_metric_names))
|
|
430 |
|
|
431 |
if dev_metrics_log:
|
|
432 |
dev_metrics_log.write('Device=' + target_name)
|
|
433 |
dev_metrics_log.write(',Time=' + sis_build_time)
|
|
434 |
|
|
435 |
for item in s:
|
|
436 |
write_measurement(item, True)
|
|
437 |
regrtest_xml.write(' </measurements>\n')
|
|
438 |
|
|
439 |
regrtest_xml.write(' <build_info>\n')
|
|
440 |
for item in build_info.keys():
|
|
441 |
regrtest_xml.write(' <item>\n')
|
|
442 |
regrtest_xml.write(' <name>\n')
|
|
443 |
regrtest_xml.write(item)
|
|
444 |
regrtest_xml.write(' </name>\n')
|
|
445 |
regrtest_xml.write(' <value>\n')
|
|
446 |
regrtest_xml.write(str(build_info[item]) + '\n')
|
|
447 |
regrtest_xml.write(' </value>\n')
|
|
448 |
regrtest_xml.write(' </item>\n')
|
|
449 |
regrtest_xml.write(' </build_info>\n')
|
|
450 |
|
|
451 |
if dev_metrics_log:
|
|
452 |
dev_metrics_log.write('\n')
|
|
453 |
dev_metrics_log.close()
|
|
454 |
|
|
455 |
regrtest_xml.write(' </testsuite_' + target_name + '>\n')
|
|
456 |
state_file = open(os.path.join(log_directory, 'state_' +
|
|
457 |
target_name + '.txt'), 'wt')
|
|
458 |
state_file.write(repr(new_state))
|
|
459 |
state_file.close()
|
|
460 |
regrtest_xml.write('</testsuites>\n')
|
|
461 |
regrtest_xml.write('</xml>')
|
|
462 |
regrtest_xml.close()
|