|
1 """Script to generate reports on translator classes from Doxygen sources. |
|
2 |
|
3 The main purpose of the script is to extract the information from sources |
|
4 related to internationalization (the translator classes). It uses the |
|
5 information to generate documentation (language.doc, |
|
6 translator_report.txt) from templates (language.tpl, maintainers.txt). |
|
7 |
|
8 Simply run the script without parameters to get the reports and |
|
9 documentation for all supported languages. If you want to generate the |
|
10 translator report only for some languages, pass their codes as arguments |
|
11 to the script. In that case, the language.doc will not be generated. |
|
12 Example: |
|
13 |
|
14 python translator.py en nl cz |
|
15 |
|
16 Originally, the script was written in Perl and was known as translator.pl. |
|
17 The last Perl version was dated 2002/05/21 (plus some later corrections) |
|
18 |
|
19 $Id: translator.py 708 2009-10-04 20:20:24Z dimitri $ |
|
20 |
|
21 Petr Prikryl (prikrylp@skil.cz) |
|
22 |
|
23 History: |
|
24 -------- |
|
25 2002/05/21 - This was the last Perl version. |
|
26 2003/05/16 - List of language marks can be passed as arguments. |
|
27 2004/01/24 - Total reimplementation started: classes TrManager, and Transl. |
|
28 2004/02/05 - First version that produces translator report. No language.doc yet. |
|
29 2004/02/10 - First fully functional version that generates both the translator |
|
30 report and the documentation. It is a bit slower than the |
|
31 Perl version, but is much less tricky and much more flexible. |
|
32 It also solves some problems that were not solved by the Perl |
|
33 version. The translator report content should be more useful |
|
34 for developers. |
|
35 2004/02/11 - Some tuning-up to provide more useful information. |
|
36 2004/04/16 - Added new tokens to the tokenizer (to remove some warnings). |
|
37 2004/05/25 - Added from __future__ import generators not to force Python 2.3. |
|
38 2004/06/03 - Removed dependency on textwrap module. |
|
39 2004/07/07 - Fixed the bug in the fill() function. |
|
40 2004/07/21 - Better e-mail mangling for HTML part of language.doc. |
|
41 - Plural not used for reporting a single missing method. |
|
42 - Removal of not used translator adapters is suggested only |
|
43 when the report is not restricted to selected languages |
|
44 explicitly via script arguments. |
|
45 2004/07/26 - Better reporting of not-needed adapters. |
|
46 2004/10/04 - Reporting of not called translator methods added. |
|
47 2004/10/05 - Modified to check only doxygen/src sources for the previous report. |
|
48 2005/02/28 - Slight modification to generate "mailto.txt" auxiliary file. |
|
49 2005/08/15 - Doxygen's root directory determined primarily from DOXYGEN |
|
50 environment variable. When not found, then relatively to the script. |
|
51 2007/03/20 - The "translate me!" searched in comments and reported if found. |
|
52 2008/06/09 - Warning when the MAX_DOT_GRAPH_HEIGHT is still part of trLegendDocs(). |
|
53 2009/05/09 - Changed HTML output to fit it with XHTML DTD |
|
54 2009/09/02 - Added percentage info to the report (implemented / to be implemented). |
|
55 """ |
|
56 |
|
57 from __future__ import generators |
|
58 import os, re, sys |
|
59 |
|
60 |
|
61 def fill(s): |
|
62 """Returns string formated to the wrapped paragraph multiline string. |
|
63 |
|
64 Replaces whitespaces by one space and then uses he textwrap.fill().""" |
|
65 |
|
66 # Replace all whitespace by spaces, remove whitespaces that are not |
|
67 # necessary, strip the left and right whitespaces, and break the string |
|
68 # to list of words. |
|
69 rexWS = re.compile(r'\s+') |
|
70 lst = rexWS.sub(' ', s).strip().split() |
|
71 |
|
72 # If the list is not empty, put the words together and form the lines |
|
73 # of maximum 70 characters. Build the list of lines. |
|
74 lines = [] |
|
75 if lst: |
|
76 line = lst.pop(0) # no separation space in front of the first word |
|
77 for word in lst: |
|
78 if len(line) + len(word) < 70: |
|
79 line += ' ' + word |
|
80 else: |
|
81 lines.append(line) # another full line formed |
|
82 line = word # next line started |
|
83 lines.append(line) # the last line |
|
84 return '\n'.join(lines) |
|
85 |
|
86 |
|
87 # The following function dedent() is the verbatim copy from the textwrap.py |
|
88 # module. The textwrap.py was introduced in Python 2.3. To make this script |
|
89 # working also in older Python versions, I have decided to copy it. |
|
90 # Notice that the textwrap.py is copyrighted: |
|
91 # |
|
92 # Copyright (C) 1999-2001 Gregory P. Ward. |
|
93 # Copyright (C) 2002, 2003 Python Software Foundation. |
|
94 # Written by Greg Ward <gward@python.net> |
|
95 # |
|
96 # The explicit permission to use the code here was sent by Guido van Rossum |
|
97 # (4th June, 2004). |
|
98 # |
|
99 def dedent(text): |
|
100 """dedent(text : string) -> string |
|
101 |
|
102 Remove any whitespace than can be uniformly removed from the left |
|
103 of every line in `text`. |
|
104 |
|
105 This can be used e.g. to make triple-quoted strings line up with |
|
106 the left edge of screen/whatever, while still presenting it in the |
|
107 source code in indented form. |
|
108 |
|
109 For example: |
|
110 |
|
111 def test(): |
|
112 # end first line with \ to avoid the empty line! |
|
113 s = '''\ |
|
114 hello |
|
115 world |
|
116 ''' |
|
117 print repr(s) # prints ' hello\n world\n ' |
|
118 print repr(dedent(s)) # prints 'hello\n world\n' |
|
119 """ |
|
120 lines = text.expandtabs().split('\n') |
|
121 margin = None |
|
122 for line in lines: |
|
123 content = line.lstrip() |
|
124 if not content: |
|
125 continue |
|
126 indent = len(line) - len(content) |
|
127 if margin is None: |
|
128 margin = indent |
|
129 else: |
|
130 margin = min(margin, indent) |
|
131 |
|
132 if margin is not None and margin > 0: |
|
133 for i in range(len(lines)): |
|
134 lines[i] = lines[i][margin:] |
|
135 |
|
136 return '\n'.join(lines) |
|
137 |
|
138 |
|
139 class Transl: |
|
140 """One instance is build for each translator. |
|
141 |
|
142 The abbreviation of the source file--part after 'translator_'--is used as |
|
143 the identification of the object. The empty string is used for the |
|
144 abstract Translator class from translator.h. The other information is |
|
145 extracted from inside the source file.""" |
|
146 |
|
147 def __init__(self, fname, manager): |
|
148 """Bind to the manager and initialize.""" |
|
149 |
|
150 # Store the filename and the reference to the manager object. |
|
151 self.fname = fname |
|
152 self.manager = manager |
|
153 |
|
154 # The instance is responsible for loading the source file, so it checks |
|
155 # for its existence and quits if something goes wrong. |
|
156 if not os.path.isfile(fname): |
|
157 sys.stderr.write("\a\nFile '%s' not found!\n" % fname) |
|
158 sys.exit(1) |
|
159 |
|
160 # Initialize the other collected information. |
|
161 self.classId = None |
|
162 self.baseClassId = None |
|
163 self.readableStatus = None # 'up-to-date', '1.2.3', '1.3', etc. |
|
164 self.status = None # '', '1.2.03', '1.3.00', etc. |
|
165 self.lang = None # like 'Brasilian' |
|
166 self.langReadable = None # like 'Brasilian Portuguese' |
|
167 self.note = None # like 'should be cleaned up' |
|
168 self.prototypeDic = {} # uniPrototype -> prototype |
|
169 self.translateMeText = 'translate me!' |
|
170 self.translateMeFlag = False # comments with "translate me!" found |
|
171 self.txtMAX_DOT_GRAPH_HEIGHT_flag = False # found in string in trLegendDocs() |
|
172 self.obsoleteMethods = None # list of prototypes to be removed |
|
173 self.missingMethods = None # list of prototypes to be implemented |
|
174 self.implementedMethods = None # list of implemented required methods |
|
175 self.adaptMinClass = None # The newest adapter class that can be used |
|
176 |
|
177 |
|
178 def __tokenGenerator(self): |
|
179 """Generator that reads the file and yields tokens as 4-tuples. |
|
180 |
|
181 The tokens have the form (tokenId, tokenString, lineNo). The |
|
182 last returned token has the form ('eof', None, None). When trying |
|
183 to access next token afer that, the exception would be raised.""" |
|
184 |
|
185 # Set the dictionary for recognizing tokenId for keywords, separators |
|
186 # and the similar categories. The key is the string to be recognized, |
|
187 # the value says its token identification. |
|
188 tokenDic = { 'class': 'class', |
|
189 'const': 'const', |
|
190 'public': 'public', |
|
191 'protected': 'protected', |
|
192 'private': 'private', |
|
193 'static': 'static', |
|
194 'virtual': 'virtual', |
|
195 ':': 'colon', |
|
196 ';': 'semic', |
|
197 ',': 'comma', |
|
198 '[': 'lsqbra', |
|
199 ']': 'rsqbra', |
|
200 '(': 'lpar', |
|
201 ')': 'rpar', |
|
202 '{': 'lcurly', |
|
203 '}': 'rcurly', |
|
204 '=': 'assign', |
|
205 '*': 'star', |
|
206 '&': 'amp', |
|
207 '+': 'plus', |
|
208 '-': 'minus', |
|
209 '!': 'excl', |
|
210 '?': 'qmark', |
|
211 '<': 'lt', |
|
212 '>': 'gt', |
|
213 "'": 'quot', |
|
214 '"': 'dquot', |
|
215 '.': 'dot', |
|
216 '%': 'perc', |
|
217 '~': 'tilde', |
|
218 '^': 'caret', |
|
219 } |
|
220 |
|
221 # Regular expression for recognizing identifiers. |
|
222 rexId = re.compile(r'^[a-zA-Z]\w*$') |
|
223 |
|
224 # Open the file for reading and extracting tokens until the eof. |
|
225 # Initialize the finite automaton. |
|
226 f = file(self.fname) |
|
227 lineNo = 0 |
|
228 line = '' # init -- see the pos initialization below |
|
229 linelen = 0 # init |
|
230 pos = 100 # init -- pos after the end of line |
|
231 status = 0 |
|
232 |
|
233 tokenId = None # init |
|
234 tokenStr = '' # init -- the characters will be appended. |
|
235 tokenLineNo = 0 |
|
236 |
|
237 while status != 777: |
|
238 |
|
239 # Get the next character. Read next line first, if necessary. |
|
240 if pos < linelen: |
|
241 c = line[pos] |
|
242 else: |
|
243 lineNo += 1 |
|
244 line = f.readline() |
|
245 linelen = len(line) |
|
246 pos = 0 |
|
247 if line == '': # eof |
|
248 status = 777 |
|
249 else: |
|
250 c = line[pos] |
|
251 |
|
252 # Consume the character based on the status |
|
253 |
|
254 if status == 0: # basic status |
|
255 |
|
256 # This is the initial status. If tokenId is set, yield the |
|
257 # token here and only here (except when eof is found). |
|
258 # Initialize the token variables after the yield. |
|
259 if tokenId: |
|
260 # If it is an unknown item, it can still be recognized |
|
261 # here. Keywords and separators are the example. |
|
262 if tokenId == 'unknown': |
|
263 if tokenDic.has_key(tokenStr): |
|
264 tokenId = tokenDic[tokenStr] |
|
265 elif tokenStr.isdigit(): |
|
266 tokenId = 'num' |
|
267 elif rexId.match(tokenStr): |
|
268 tokenId = 'id' |
|
269 else: |
|
270 msg = '\aWarning: unknown token "' + tokenStr + '"' |
|
271 msg += '\tfound on line %d' % tokenLineNo |
|
272 msg += ' in "' + self.fname + '".\n' |
|
273 sys.stderr.write(msg) |
|
274 |
|
275 yield (tokenId, tokenStr, tokenLineNo) |
|
276 |
|
277 # If it is a comment that contains the self.translateMeText |
|
278 # string, set the flag -- the situation will be reported. |
|
279 if tokenId == 'comment' and tokenStr.find(self.translateMeText) >= 0: |
|
280 self.translateMeFlag = True |
|
281 |
|
282 tokenId = None |
|
283 tokenStr = '' |
|
284 tokenLineNo = 0 |
|
285 |
|
286 # Now process the character. When we just skip it (spaces), |
|
287 # stay in this status. All characters that will be part of |
|
288 # some token cause moving to the specific status. And only |
|
289 # when moving to the status == 0 (or the final state 777), |
|
290 # the token is yielded. With respect to that the automaton |
|
291 # behaves as Moore's one (output bound to status). When |
|
292 # collecting tokens, the automaton is the Mealy's one |
|
293 # (actions bound to transitions). |
|
294 if c.isspace(): |
|
295 pass # just skip whitespace characters |
|
296 elif c == '/': # Possibly comment starts here, but |
|
297 tokenId = 'unknown' # it could be only a slash in code. |
|
298 tokenStr = c |
|
299 tokenLineNo = lineNo |
|
300 status = 1 |
|
301 elif c == '#': |
|
302 tokenId = 'preproc' # preprocessor directive |
|
303 tokenStr = c |
|
304 tokenLineNo = lineNo |
|
305 status = 5 |
|
306 elif c == '"': # string starts here |
|
307 tokenId = 'string' |
|
308 tokenStr = c |
|
309 tokenLineNo = lineNo |
|
310 status = 6 |
|
311 elif c == "'": # char literal starts here |
|
312 tokenId = 'charlit' |
|
313 tokenStr = c |
|
314 tokenLineNo = lineNo |
|
315 status = 8 |
|
316 elif tokenDic.has_key(c): # known one-char token |
|
317 tokenId = tokenDic[c] |
|
318 tokenStr = c |
|
319 tokenLineNo = lineNo |
|
320 # stay in this state to yield token immediately |
|
321 else: |
|
322 tokenId = 'unknown' # totally unknown |
|
323 tokenStr = c |
|
324 tokenLineNo = lineNo |
|
325 status = 333 |
|
326 |
|
327 pos += 1 # move position in any case |
|
328 |
|
329 elif status == 1: # possibly a comment |
|
330 if c == '/': # ... definitely the C++ comment |
|
331 tokenId = 'comment' |
|
332 tokenStr += c |
|
333 pos += 1 |
|
334 status = 2 |
|
335 elif c == '*': # ... definitely the C comment |
|
336 tokenId = 'comment' |
|
337 tokenStr += c |
|
338 pos += 1 |
|
339 status = 3 |
|
340 else: |
|
341 status = 0 # unrecognized, don't move pos |
|
342 |
|
343 elif status == 2: # inside the C++ comment |
|
344 if c == '\n': # the end of C++ comment |
|
345 status = 0 # yield the token |
|
346 else: |
|
347 tokenStr += c # collect the C++ comment |
|
348 pos += 1 |
|
349 |
|
350 elif status == 3: # inside the C comment |
|
351 if c == '*': # possibly the end of the C comment |
|
352 tokenStr += c |
|
353 status = 4 |
|
354 else: |
|
355 tokenStr += c # collect the C comment |
|
356 pos += 1 |
|
357 |
|
358 elif status == 4: # possibly the end of the C comment |
|
359 if c == '/': # definitely the end of the C comment |
|
360 tokenStr += c |
|
361 status = 0 # yield the token |
|
362 elif c == '*': # more stars inside the comment |
|
363 tokenStr += c |
|
364 else: |
|
365 tokenStr += c # this cannot be the end of comment |
|
366 status = 3 |
|
367 pos += 1 |
|
368 |
|
369 elif status == 5: # inside the preprocessor directive |
|
370 if c == '\n': # the end of the preproc. command |
|
371 status = 0 # yield the token |
|
372 else: |
|
373 tokenStr += c # collect the preproc |
|
374 pos += 1 |
|
375 |
|
376 elif status == 6: # inside the string |
|
377 if c == '\\': # escaped char inside the string |
|
378 tokenStr += c |
|
379 status = 7 |
|
380 elif c == '"': # end of the string |
|
381 tokenStr += c |
|
382 status = 0 |
|
383 else: |
|
384 tokenStr += c # collect the chars of the string |
|
385 pos += 1 |
|
386 |
|
387 elif status == 7: # escaped char inside the string |
|
388 tokenStr += c # collect the char of the string |
|
389 status = 6 |
|
390 pos += 1 |
|
391 |
|
392 elif status == 8: # inside the char literal |
|
393 tokenStr += c # collect the char of the literal |
|
394 status = 9 |
|
395 pos += 1 |
|
396 |
|
397 elif status == 9: # end of char literal expected |
|
398 if c == "'": # ... and found |
|
399 tokenStr += c |
|
400 status = 0 |
|
401 pos += 1 |
|
402 else: |
|
403 tokenId = 'error' # end of literal was expected |
|
404 tokenStr += c |
|
405 status = 0 |
|
406 |
|
407 elif status == 333: # start of the unknown token |
|
408 if c.isspace(): |
|
409 pos += 1 |
|
410 status = 0 # tokenId may be determined later |
|
411 elif tokenDic.has_key(c): # separator, don't move pos |
|
412 status = 0 |
|
413 else: |
|
414 tokenStr += c # collect |
|
415 pos += 1 |
|
416 |
|
417 # We should have finished in the final status. If some token |
|
418 # have been extracted, yield it first. |
|
419 assert(status == 777) |
|
420 if tokenId: |
|
421 yield (tokenId, tokenStr, tokenLineNo) |
|
422 tokenId = None |
|
423 tokenStr = '' |
|
424 tokenLineNo = 0 |
|
425 |
|
426 # The file content is processed. Close the file. Then always yield |
|
427 # the eof token. |
|
428 f.close() |
|
429 yield ('eof', None, None) |
|
430 |
|
431 |
|
432 def __collectClassInfo(self, tokenIterator): |
|
433 """Collect the information about the class and base class. |
|
434 |
|
435 The tokens including the opening left curly brace of the class are |
|
436 consumed.""" |
|
437 |
|
438 status = 0 # initial state |
|
439 |
|
440 while status != 777: # final state |
|
441 |
|
442 # Always assume that the previous tokens were processed. Get |
|
443 # the next one. |
|
444 tokenId, tokenStr, tokenLineNo = tokenIterator.next() |
|
445 |
|
446 # Process the token and never return back. |
|
447 if status == 0: # waiting for the 'class' keyword. |
|
448 if tokenId == 'class': |
|
449 status = 1 |
|
450 |
|
451 elif status == 1: # expecting the class identification |
|
452 if tokenId == 'id': |
|
453 self.classId = tokenStr |
|
454 status = 2 |
|
455 else: |
|
456 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
457 |
|
458 elif status == 2: # expecting the curly brace or base class info |
|
459 if tokenId == 'lcurly': |
|
460 status = 777 # correctly finished |
|
461 elif tokenId == 'colon': |
|
462 status = 3 |
|
463 else: |
|
464 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
465 |
|
466 elif status == 3: # expecting the 'public' in front of base class id |
|
467 if tokenId == 'public': |
|
468 status = 4 |
|
469 else: |
|
470 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
471 |
|
472 elif status == 4: # expecting the base class id |
|
473 if tokenId == 'id': |
|
474 self.baseClassId = tokenStr |
|
475 status = 5 |
|
476 else: |
|
477 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
478 |
|
479 elif status == 5: # expecting the curly brace and quitting |
|
480 if tokenId == 'lcurly': |
|
481 status = 777 # correctly finished |
|
482 elif tokenId == 'comment': |
|
483 pass |
|
484 else: |
|
485 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
486 |
|
487 # Extract the status of the TranslatorXxxx class. The readable form |
|
488 # will be used in reports the status form is a string that can be |
|
489 # compared lexically (unified length, padding with zeros, etc.). |
|
490 if self.baseClassId: |
|
491 lst = self.baseClassId.split('_') |
|
492 if lst[0] == 'Translator': |
|
493 self.readableStatus = 'up-to-date' |
|
494 self.status = '' |
|
495 elif lst[0] == 'TranslatorAdapter': |
|
496 self.status = lst[1] + '.' + lst[2] |
|
497 self.readableStatus = self.status |
|
498 if len(lst) > 3: # add the last part of the number |
|
499 self.status += '.' + ('%02d' % int(lst[3])) |
|
500 self.readableStatus += '.' + lst[3] |
|
501 else: |
|
502 self.status += '.00' |
|
503 elif lst[0] == 'TranslatorEnglish': |
|
504 # Obsolete or Based on English. |
|
505 if self.classId[-2:] == 'En': |
|
506 self.readableStatus = 'English based' |
|
507 self.status = 'En' |
|
508 else: |
|
509 self.readableStatus = 'obsolete' |
|
510 self.status = '0.0.00' |
|
511 |
|
512 # Check whether status was set, or set 'strange'. |
|
513 if self.status == None: |
|
514 self.status = 'strange' |
|
515 if not self.readableStatus: |
|
516 self.readableStatus = 'strange' |
|
517 |
|
518 # Extract the name of the language and the readable form. |
|
519 self.lang = self.classId[10:] # without 'Translator' |
|
520 if self.lang == 'Brazilian': |
|
521 self.langReadable = 'Brazilian Portuguese' |
|
522 elif self.lang == 'Chinesetraditional': |
|
523 self.langReadable = 'Chinese Traditional' |
|
524 else: |
|
525 self.langReadable = self.lang |
|
526 |
|
527 |
|
528 |
|
529 def __unexpectedToken(self, status, tokenId, tokenLineNo): |
|
530 """Reports unexpected token and quits with exit code 1.""" |
|
531 |
|
532 import inspect |
|
533 calledFrom = inspect.stack()[1][3] |
|
534 msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n" |
|
535 msg = msg % (tokenId, tokenLineNo, self.fname) |
|
536 msg += 'status = %d in %s()\n' % (status, calledFrom) |
|
537 sys.stderr.write(msg) |
|
538 sys.exit(1) |
|
539 |
|
540 |
|
541 def collectPureVirtualPrototypes(self): |
|
542 """Returns dictionary 'unified prototype' -> 'full prototype'. |
|
543 |
|
544 The method is expected to be called only for the translator.h. It |
|
545 extracts only the pure virtual method and build the dictionary where |
|
546 key is the unified prototype without argument identifiers.""" |
|
547 |
|
548 # Prepare empty dictionary that will be returned. |
|
549 resultDic = {} |
|
550 |
|
551 # Start the token generator which parses the class source file. |
|
552 tokenIterator = self.__tokenGenerator() |
|
553 |
|
554 # Collect the class and the base class identifiers. |
|
555 self.__collectClassInfo(tokenIterator) |
|
556 assert(self.classId == 'Translator') |
|
557 |
|
558 # Let's collect readable form of the public virtual pure method |
|
559 # prototypes in the readable form -- as defined in translator.h. |
|
560 # Let's collect also unified form of the same prototype that omits |
|
561 # everything that can be omitted, namely 'virtual' and argument |
|
562 # identifiers. |
|
563 prototype = '' # readable prototype (with everything) |
|
564 uniPrototype = '' # unified prototype (without arg. identifiers) |
|
565 |
|
566 # Collect the pure virtual method prototypes. Stop on the closing |
|
567 # curly brace followed by the semicolon (end of class). |
|
568 status = 0 |
|
569 curlyCnt = 0 # counter for the level of curly braces |
|
570 |
|
571 # Loop until the final state 777 is reached. The errors are processed |
|
572 # immediately. In this implementation, it always quits the application. |
|
573 while status != 777: |
|
574 |
|
575 # Get the next token. |
|
576 tokenId, tokenStr, tokenLineNo = tokenIterator.next() |
|
577 |
|
578 if status == 0: # waiting for 'public:' |
|
579 if tokenId == 'public': |
|
580 status = 1 |
|
581 |
|
582 elif status == 1: # colon after the 'public' |
|
583 if tokenId == 'colon': |
|
584 status = 2 |
|
585 else: |
|
586 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
587 |
|
588 elif status == 2: # waiting for 'virtual' |
|
589 if tokenId == 'virtual': |
|
590 prototype = tokenStr # but not to unified prototype |
|
591 status = 3 |
|
592 elif tokenId == 'comment': |
|
593 pass |
|
594 elif tokenId == 'rcurly': |
|
595 status = 11 # expected end of class |
|
596 else: |
|
597 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
598 |
|
599 elif status == 3: # return type of the method expected |
|
600 if tokenId == 'id': |
|
601 prototype += ' ' + tokenStr |
|
602 uniPrototype = tokenStr # start collecting the unified prototype |
|
603 status = 4 |
|
604 elif tokenId == 'tilde': |
|
605 status = 4 |
|
606 else: |
|
607 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
608 |
|
609 elif status == 4: # method identifier expected |
|
610 if tokenId == 'id': |
|
611 prototype += ' ' + tokenStr |
|
612 uniPrototype += ' ' + tokenStr |
|
613 status = 5 |
|
614 else: |
|
615 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
616 |
|
617 elif status == 5: # left bracket of the argument list expected |
|
618 if tokenId == 'lpar': |
|
619 prototype += tokenStr |
|
620 uniPrototype += tokenStr |
|
621 status = 6 |
|
622 else: |
|
623 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
624 |
|
625 elif status == 6: # collecting arguments of the method |
|
626 if tokenId == 'rpar': |
|
627 prototype += tokenStr |
|
628 uniPrototype += tokenStr |
|
629 status = 7 |
|
630 elif tokenId == 'const': |
|
631 prototype += tokenStr |
|
632 uniPrototype += tokenStr |
|
633 status = 12 |
|
634 elif tokenId == 'id': # type identifier |
|
635 prototype += tokenStr |
|
636 uniPrototype += tokenStr |
|
637 status = 13 |
|
638 else: |
|
639 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
640 |
|
641 elif status == 7: # assignment expected or left curly brace |
|
642 if tokenId == 'assign': |
|
643 status = 8 |
|
644 elif tokenId == 'lcurly': |
|
645 curlyCnt = 1 # method body entered |
|
646 status = 10 |
|
647 else: |
|
648 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
649 |
|
650 elif status == 8: # zero expected |
|
651 if tokenId == 'num' and tokenStr == '0': |
|
652 status = 9 |
|
653 else: |
|
654 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
655 |
|
656 elif status == 9: # after semicolon, produce the dic item |
|
657 if tokenId == 'semic': |
|
658 assert(not resultDic.has_key(uniPrototype)) |
|
659 resultDic[uniPrototype] = prototype |
|
660 status = 2 |
|
661 else: |
|
662 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
663 |
|
664 elif status == 10: # consuming the body of the method |
|
665 if tokenId == 'rcurly': |
|
666 curlyCnt -= 1 |
|
667 if curlyCnt == 0: |
|
668 status = 2 # body consumed |
|
669 elif tokenId == 'lcurly': |
|
670 curlyCnt += 1 |
|
671 |
|
672 elif status == 11: # probably the end of class |
|
673 if tokenId == 'semic': |
|
674 status = 777 |
|
675 else: |
|
676 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
677 |
|
678 elif status == 12: # type id for argument expected |
|
679 if tokenId == 'id': |
|
680 prototype += ' ' + tokenStr |
|
681 uniPrototype += ' ' + tokenStr |
|
682 status = 13 |
|
683 else: |
|
684 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
685 |
|
686 elif status == 13: # namespace qualification or * or & expected |
|
687 if tokenId == 'colon': # was namespace id |
|
688 prototype += tokenStr |
|
689 uniPrototype += tokenStr |
|
690 status = 14 |
|
691 elif tokenId == 'star' or tokenId == 'amp': # pointer or reference |
|
692 prototype += ' ' + tokenStr |
|
693 uniPrototype += ' ' + tokenStr |
|
694 status = 16 |
|
695 elif tokenId == 'id': # argument identifier |
|
696 prototype += ' ' + tokenStr |
|
697 # don't put this into unified prototype |
|
698 status = 17 |
|
699 else: |
|
700 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
701 |
|
702 elif status == 14: # second colon for namespace:: expected |
|
703 if tokenId == 'colon': |
|
704 prototype += tokenStr |
|
705 uniPrototype += tokenStr |
|
706 status = 15 |
|
707 else: |
|
708 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
709 |
|
710 elif status == 15: # type after namespace:: expected |
|
711 if tokenId == 'id': |
|
712 prototype += tokenStr |
|
713 uniPrototype += tokenStr |
|
714 status = 13 |
|
715 else: |
|
716 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
717 |
|
718 elif status == 16: # argument identifier expected |
|
719 if tokenId == 'id': |
|
720 prototype += ' ' + tokenStr |
|
721 # don't put this into unified prototype |
|
722 status = 17 |
|
723 else: |
|
724 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
725 |
|
726 elif status == 17: # comma or ')' after argument identifier expected |
|
727 if tokenId == 'comma': |
|
728 prototype += ', ' |
|
729 uniPrototype += ', ' |
|
730 status = 6 |
|
731 elif tokenId == 'rpar': |
|
732 prototype += tokenStr |
|
733 uniPrototype += tokenStr |
|
734 status = 7 |
|
735 else: |
|
736 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
737 |
|
738 # Eat the rest of the source to cause closing the file. |
|
739 while tokenId != 'eof': |
|
740 tokenId, tokenStr, tokenLineNo = tokenIterator.next() |
|
741 |
|
742 # Return the resulting dictionary with 'uniPrototype -> prototype'. |
|
743 return resultDic |
|
744 |
|
745 |
|
746 def __collectPublicMethodPrototypes(self, tokenIterator): |
|
747 """Collects prototypes of public methods and fills self.prototypeDic. |
|
748 |
|
749 The dictionary is filled by items: uniPrototype -> prototype. |
|
750 The method is expected to be called only for TranslatorXxxx classes, |
|
751 i.e. for the classes that implement translation to some language. |
|
752 It assumes that the openning curly brace of the class was already |
|
753 consumed. The source is consumed until the end of the class. |
|
754 The caller should consume the source until the eof to cause closing |
|
755 the source file.""" |
|
756 |
|
757 assert(self.classId != 'Translator') |
|
758 assert(self.baseClassId != None) |
|
759 |
|
760 # The following finite automaton slightly differs from the one |
|
761 # inside self.collectPureVirtualPrototypes(). It produces the |
|
762 # dictionary item just after consuming the body of the method |
|
763 # (transition from from state 10 to state 2). It also does not allow |
|
764 # definitions of public pure virtual methods, except for |
|
765 # TranslatorAdapterBase (states 8 and 9). Argument identifier inside |
|
766 # method argument lists can be omitted or commented. |
|
767 # |
|
768 # Let's collect readable form of all public method prototypes in |
|
769 # the readable form -- as defined in the source file. |
|
770 # Let's collect also unified form of the same prototype that omits |
|
771 # everything that can be omitted, namely 'virtual' and argument |
|
772 # identifiers. |
|
773 prototype = '' # readable prototype (with everything) |
|
774 uniPrototype = '' # unified prototype (without arg. identifiers) |
|
775 warning = '' # warning message -- if something special detected |
|
776 methodId = None # processed method id |
|
777 |
|
778 # Collect the method prototypes. Stop on the closing |
|
779 # curly brace followed by the semicolon (end of class). |
|
780 status = 0 |
|
781 curlyCnt = 0 # counter for the level of curly braces |
|
782 |
|
783 # Loop until the final state 777 is reached. The errors are processed |
|
784 # immediately. In this implementation, it always quits the application. |
|
785 while status != 777: |
|
786 |
|
787 # Get the next token. |
|
788 tokenId, tokenStr, tokenLineNo = tokenIterator.next() |
|
789 |
|
790 if status == 0: # waiting for 'public:' |
|
791 if tokenId == 'public': |
|
792 status = 1 |
|
793 elif tokenId == 'eof': # non-public things until the eof |
|
794 status = 777 |
|
795 |
|
796 elif status == 1: # colon after the 'public' |
|
797 if tokenId == 'colon': |
|
798 status = 2 |
|
799 else: |
|
800 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
801 |
|
802 elif status == 2: # waiting for 'virtual' (can be omitted) |
|
803 if tokenId == 'virtual': |
|
804 prototype = tokenStr # but not to unified prototype |
|
805 status = 3 |
|
806 elif tokenId == 'id': # 'virtual' was omitted |
|
807 prototype = tokenStr |
|
808 uniPrototype = tokenStr # start collecting the unified prototype |
|
809 status = 4 |
|
810 elif tokenId == 'comment': |
|
811 pass |
|
812 elif tokenId == 'protected' or tokenId == 'private': |
|
813 status = 0 |
|
814 elif tokenId == 'rcurly': |
|
815 status = 11 # expected end of class |
|
816 else: |
|
817 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
818 |
|
819 elif status == 3: # return type of the method expected |
|
820 if tokenId == 'id': |
|
821 prototype += ' ' + tokenStr |
|
822 uniPrototype = tokenStr # start collecting the unified prototype |
|
823 status = 4 |
|
824 else: |
|
825 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
826 |
|
827 elif status == 4: # method identifier expected |
|
828 if tokenId == 'id': |
|
829 prototype += ' ' + tokenStr |
|
830 uniPrototype += ' ' + tokenStr |
|
831 methodId = tokenStr # for reporting |
|
832 status = 5 |
|
833 else: |
|
834 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
835 |
|
836 elif status == 5: # left bracket of the argument list expected |
|
837 if tokenId == 'lpar': |
|
838 prototype += tokenStr |
|
839 uniPrototype += tokenStr |
|
840 status = 6 |
|
841 else: |
|
842 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
843 |
|
844 elif status == 6: # collecting arguments of the method |
|
845 if tokenId == 'rpar': |
|
846 prototype += tokenStr |
|
847 uniPrototype += tokenStr |
|
848 status = 7 |
|
849 elif tokenId == 'const': |
|
850 prototype += tokenStr |
|
851 uniPrototype += tokenStr |
|
852 status = 12 |
|
853 elif tokenId == 'id': # type identifier |
|
854 prototype += tokenStr |
|
855 uniPrototype += tokenStr |
|
856 status = 13 |
|
857 else: |
|
858 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
859 |
|
860 elif status == 7: # left curly brace expected |
|
861 if tokenId == 'lcurly': |
|
862 curlyCnt = 1 # method body entered |
|
863 status = 10 |
|
864 elif tokenId == 'comment': |
|
865 pass |
|
866 elif tokenId == 'assign': # allowed only for TranslatorAdapterBase |
|
867 assert(self.classId == 'TranslatorAdapterBase') |
|
868 status = 8 |
|
869 else: |
|
870 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
871 |
|
872 elif status == 8: # zero expected (TranslatorAdapterBase) |
|
873 assert(self.classId == 'TranslatorAdapterBase') |
|
874 if tokenId == 'num' and tokenStr == '0': |
|
875 status = 9 |
|
876 else: |
|
877 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
878 |
|
879 elif status == 9: # after semicolon (TranslatorAdapterBase) |
|
880 assert(self.classId == 'TranslatorAdapterBase') |
|
881 if tokenId == 'semic': |
|
882 status = 2 |
|
883 else: |
|
884 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
885 |
|
886 elif status == 10: # consuming the body of the method, then dic item |
|
887 if tokenId == 'rcurly': |
|
888 curlyCnt -= 1 |
|
889 if curlyCnt == 0: |
|
890 # Insert new dictionary item. |
|
891 assert(not self.prototypeDic.has_key(uniPrototype)) |
|
892 self.prototypeDic[uniPrototype] = prototype |
|
893 status = 2 # body consumed |
|
894 methodId = None # outside of any method |
|
895 elif tokenId == 'lcurly': |
|
896 curlyCnt += 1 |
|
897 |
|
898 # Warn in special case. |
|
899 elif methodId == 'trLegendDocs' and tokenId == 'string' \ |
|
900 and tokenStr.find('MAX_DOT_GRAPH_HEIGHT') >= 0: |
|
901 self.txtMAX_DOT_GRAPH_HEIGHT_flag = True |
|
902 |
|
903 |
|
904 elif status == 11: # probably the end of class |
|
905 if tokenId == 'semic': |
|
906 status = 777 |
|
907 else: |
|
908 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
909 |
|
910 elif status == 12: # type id for argument expected |
|
911 if tokenId == 'id': |
|
912 prototype += ' ' + tokenStr |
|
913 uniPrototype += ' ' + tokenStr |
|
914 status = 13 |
|
915 else: |
|
916 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
917 |
|
918 elif status == 13: # :: or * or & or id or ) expected |
|
919 if tokenId == 'colon': # was namespace id |
|
920 prototype += tokenStr |
|
921 uniPrototype += tokenStr |
|
922 status = 14 |
|
923 elif tokenId == 'star' or tokenId == 'amp': # pointer or reference |
|
924 prototype += ' ' + tokenStr |
|
925 uniPrototype += ' ' + tokenStr |
|
926 status = 16 |
|
927 elif tokenId == 'id': # argument identifier |
|
928 prototype += ' ' + tokenStr |
|
929 # don't put this into unified prototype |
|
930 status = 17 |
|
931 elif tokenId == 'comment': # probably commented-out identifier |
|
932 prototype += tokenStr |
|
933 elif tokenId == 'rpar': |
|
934 prototype += tokenStr |
|
935 uniPrototype += tokenStr |
|
936 status = 7 |
|
937 elif tokenId == 'comma': |
|
938 prototype += ', ' |
|
939 uniPrototype += ', ' |
|
940 status = 6 |
|
941 else: |
|
942 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
943 |
|
944 elif status == 14: # second colon for namespace:: expected |
|
945 if tokenId == 'colon': |
|
946 prototype += tokenStr |
|
947 uniPrototype += tokenStr |
|
948 status = 15 |
|
949 else: |
|
950 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
951 |
|
952 elif status == 15: # type after namespace:: expected |
|
953 if tokenId == 'id': |
|
954 prototype += tokenStr |
|
955 uniPrototype += tokenStr |
|
956 status = 13 |
|
957 else: |
|
958 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
959 |
|
960 elif status == 16: # argument identifier or ) expected |
|
961 if tokenId == 'id': |
|
962 prototype += ' ' + tokenStr |
|
963 # don't put this into unified prototype |
|
964 status = 17 |
|
965 elif tokenId == 'rpar': |
|
966 prototype += tokenStr |
|
967 uniPrototype += tokenStr |
|
968 status = 7 |
|
969 elif tokenId == 'comment': |
|
970 prototype += tokenStr |
|
971 else: |
|
972 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
973 |
|
974 elif status == 17: # comma or ')' after argument identifier expected |
|
975 if tokenId == 'comma': |
|
976 prototype += ', ' |
|
977 uniPrototype += ', ' |
|
978 status = 6 |
|
979 elif tokenId == 'rpar': |
|
980 prototype += tokenStr |
|
981 uniPrototype += tokenStr |
|
982 status = 7 |
|
983 else: |
|
984 self.__unexpectedToken(status, tokenId, tokenLineNo) |
|
985 |
|
986 |
|
987 |
|
988 def collectAdapterPrototypes(self): |
|
989 """Returns the dictionary of prototypes implemented by adapters. |
|
990 |
|
991 It is created to process the translator_adapter.h. The returned |
|
992 dictionary has the form: unifiedPrototype -> (version, classId) |
|
993 thus by looking for the prototype, we get the information what is |
|
994 the newest (least adapting) adapter that is sufficient for |
|
995 implementing the method.""" |
|
996 |
|
997 # Start the token generator which parses the class source file. |
|
998 assert(os.path.split(self.fname)[1] == 'translator_adapter.h') |
|
999 tokenIterator = self.__tokenGenerator() |
|
1000 |
|
1001 # Get the references to the involved dictionaries. |
|
1002 reqDic = self.manager.requiredMethodsDic |
|
1003 |
|
1004 # Create the empty dictionary that will be returned. |
|
1005 adaptDic = {} |
|
1006 |
|
1007 |
|
1008 # Loop through the source of the adapter file until no other adapter |
|
1009 # class is found. |
|
1010 while True: |
|
1011 try: |
|
1012 # Collect the class and the base class identifiers. |
|
1013 self.__collectClassInfo(tokenIterator) |
|
1014 |
|
1015 # Extract the comparable version of the adapter class. |
|
1016 # Note: The self.status as set by self.__collectClassInfo() |
|
1017 # contains similar version, but is related to the base class, |
|
1018 # not to the class itself. |
|
1019 lst = self.classId.split('_') |
|
1020 version = '' |
|
1021 if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise |
|
1022 version = lst[1] + '.' + lst[2] |
|
1023 if len(lst) > 3: # add the last part of the number |
|
1024 version += '.' + ('%02d' % int(lst[3])) |
|
1025 else: |
|
1026 version += '.00' |
|
1027 |
|
1028 # Collect the prototypes of implemented public methods. |
|
1029 self.__collectPublicMethodPrototypes(tokenIterator) |
|
1030 |
|
1031 # For the required methods, update the dictionary of methods |
|
1032 # implemented by the adapter. |
|
1033 for protoUni in self.prototypeDic: |
|
1034 if reqDic.has_key(protoUni): |
|
1035 # This required method will be marked as implemented |
|
1036 # by this adapter class. This implementation assumes |
|
1037 # that newer adapters do not reimplement any required |
|
1038 # methods already implemented by older adapters. |
|
1039 assert(not adaptDic.has_key(protoUni)) |
|
1040 adaptDic[protoUni] = (version, self.classId) |
|
1041 |
|
1042 # Clear the dictionary object and the information related |
|
1043 # to the class as the next adapter class is to be processed. |
|
1044 self.prototypeDic.clear() |
|
1045 self.classId = None |
|
1046 self.baseClassId = None |
|
1047 |
|
1048 except StopIteration: |
|
1049 break |
|
1050 |
|
1051 # Return the result dictionary. |
|
1052 return adaptDic |
|
1053 |
|
1054 |
|
1055 def processing(self): |
|
1056 """Processing of the source file -- only for TranslatorXxxx classes.""" |
|
1057 |
|
1058 # Start the token generator which parses the class source file. |
|
1059 tokenIterator = self.__tokenGenerator() |
|
1060 |
|
1061 # Collect the class and the base class identifiers. |
|
1062 self.__collectClassInfo(tokenIterator) |
|
1063 assert(self.classId != 'Translator') |
|
1064 assert(self.classId[:17] != 'TranslatorAdapter') |
|
1065 |
|
1066 # Collect the prototypes of implemented public methods. |
|
1067 self.__collectPublicMethodPrototypes(tokenIterator) |
|
1068 |
|
1069 # Eat the rest of the source to cause closing the file. |
|
1070 while True: |
|
1071 try: |
|
1072 t = tokenIterator.next() |
|
1073 except StopIteration: |
|
1074 break |
|
1075 |
|
1076 # Shorthands for the used dictionaries. |
|
1077 reqDic = self.manager.requiredMethodsDic |
|
1078 adaptDic = self.manager.adaptMethodsDic |
|
1079 myDic = self.prototypeDic |
|
1080 |
|
1081 # Build the list of obsolete methods. |
|
1082 self.obsoleteMethods = [] |
|
1083 for p in myDic: |
|
1084 if not reqDic.has_key(p): |
|
1085 self.obsoleteMethods.append(p) |
|
1086 |
|
1087 # Build the list of missing methods and the list of implemented |
|
1088 # required methods. |
|
1089 self.missingMethods = [] |
|
1090 self.implementedMethods = [] |
|
1091 for p in reqDic: |
|
1092 if myDic.has_key(p): |
|
1093 self.implementedMethods.append(p) |
|
1094 else: |
|
1095 self.missingMethods.append(p) |
|
1096 |
|
1097 # Check whether adapter must be used or suggest the newest one. |
|
1098 # Change the status and set the note accordingly. |
|
1099 if self.baseClassId != 'Translator': |
|
1100 if not self.missingMethods: |
|
1101 self.note = 'Change the base class to Translator.' |
|
1102 self.status = '' |
|
1103 self.readableStatus = 'up-to-date' |
|
1104 elif self.baseClassId != 'TranslatorEnglish': |
|
1105 # The translator uses some of the adapters. |
|
1106 # Look at the missing methods and check what adapter |
|
1107 # implements them. Remember the one with the lowest version. |
|
1108 adaptMinVersion = '9.9.99' |
|
1109 adaptMinClass = 'TranslatorAdapter_9_9_99' |
|
1110 for uniProto in self.missingMethods: |
|
1111 if adaptDic.has_key(uniProto): |
|
1112 version, cls = adaptDic[uniProto] |
|
1113 if version < adaptMinVersion: |
|
1114 adaptMinVersion = version |
|
1115 adaptMinClass = cls |
|
1116 |
|
1117 # Test against the current status -- preserve the self.status. |
|
1118 # Possibly, the translator implements enough methods to |
|
1119 # use some newer adapter. |
|
1120 status = self.status |
|
1121 |
|
1122 # If the version of the used adapter is smaller than |
|
1123 # the required, set the note and update the status as if |
|
1124 # the newer adapter was used. |
|
1125 if adaptMinVersion > status: |
|
1126 self.note = 'Change the base class to %s.' % adaptMinClass |
|
1127 self.status = adaptMinVersion |
|
1128 self.adaptMinClass = adaptMinClass |
|
1129 self.readableStatus = adaptMinVersion # simplified |
|
1130 |
|
1131 # If everything seems OK, some explicit warning flags still could |
|
1132 # be set. |
|
1133 if not self.note and self.status == '' and \ |
|
1134 (self.translateMeFlag or self.txtMAX_DOT_GRAPH_HEIGHT_flag): |
|
1135 self.note = '' |
|
1136 if self.translateMeFlag: |
|
1137 self.note += 'The "%s" found in a comment.' % self.translateMeText |
|
1138 if self.note != '': |
|
1139 self.note += '\n\t\t' |
|
1140 if self.txtMAX_DOT_GRAPH_HEIGHT_flag: |
|
1141 self.note += 'The MAX_DOT_GRAPH_HEIGHT found in trLegendDocs()' |
|
1142 |
|
1143 # If everything seems OK, but there are obsolete methods, set |
|
1144 # the note to clean-up source. This note will be used only when |
|
1145 # the previous code did not set another note (priority). |
|
1146 if not self.note and self.status == '' and self.obsoleteMethods: |
|
1147 self.note = 'Remove the obsolete methods (never used).' |
|
1148 |
|
1149 |
|
1150 def report(self, fout): |
|
1151 """Returns the report part for the source as a multiline string. |
|
1152 |
|
1153 No output for up-to-date translators without problem.""" |
|
1154 |
|
1155 # If there is nothing to report, return immediately. |
|
1156 if self.status == '' and not self.note: |
|
1157 return |
|
1158 |
|
1159 # Report the number of not implemented methods. |
|
1160 fout.write('\n\n\n') |
|
1161 fout.write(self.classId + ' (' + self.baseClassId + ')') |
|
1162 percentImplemented = 100 # init |
|
1163 allNum = len(self.manager.requiredMethodsDic) |
|
1164 if self.missingMethods: |
|
1165 num = len(self.missingMethods) |
|
1166 percentImplemented = 100 * (allNum - num) / allNum |
|
1167 fout.write(' %d' % num) |
|
1168 fout.write(' method') |
|
1169 if num > 1: |
|
1170 fout.write('s') |
|
1171 fout.write(' to implement (%d %%)' % (100 * num / allNum)) |
|
1172 fout.write('\n' + '-' * len(self.classId)) |
|
1173 |
|
1174 # Write the info about the implemented required methods. |
|
1175 fout.write('\n\n Implements %d' % len(self.implementedMethods)) |
|
1176 fout.write(' of the required methods (%d %%).' % percentImplemented) |
|
1177 |
|
1178 # Report the missing method, but only when it is not English-based |
|
1179 # translator. |
|
1180 if self.missingMethods and self.status != 'En': |
|
1181 fout.write('\n\n Missing methods (should be implemented):\n') |
|
1182 reqDic = self.manager.requiredMethodsDic |
|
1183 for p in self.missingMethods: |
|
1184 fout.write('\n ' + reqDic[p]) |
|
1185 |
|
1186 # Always report obsolete methods. |
|
1187 if self.obsoleteMethods: |
|
1188 fout.write('\n\n Obsolete methods (should be removed, never used):\n') |
|
1189 myDic = self.prototypeDic |
|
1190 for p in self.obsoleteMethods: |
|
1191 fout.write('\n ' + myDic[p]) |
|
1192 |
|
1193 # For English-based translator, report the implemented methods. |
|
1194 if self.status == 'En' and self.implementedMethods: |
|
1195 fout.write('\n\n This English-based translator implements ') |
|
1196 fout.write('the following methods:\n') |
|
1197 reqDic = self.manager.requiredMethodsDic |
|
1198 for p in self.implementedMethods: |
|
1199 fout.write('\n ' + reqDic[p]) |
|
1200 |
|
1201 |
|
1202 def getmtime(self): |
|
1203 """Returns the last modification time of the source file.""" |
|
1204 assert(os.path.isfile(self.fname)) |
|
1205 return os.path.getmtime(self.fname) |
|
1206 |
|
1207 |
|
1208 class TrManager: |
|
1209 """Collects basic info and builds subordinate Transl objects.""" |
|
1210 |
|
1211 def __init__(self): |
|
1212 """Determines paths, creates and initializes structures. |
|
1213 |
|
1214 The arguments of the script may explicitly say what languages should |
|
1215 be processed. Write the two letter identifications that are used |
|
1216 for composing the source filenames, so... |
|
1217 |
|
1218 python translator.py cz |
|
1219 |
|
1220 this will process only translator_cz.h source. |
|
1221 """ |
|
1222 |
|
1223 # Determine the path to the script and its name. |
|
1224 self.script = os.path.abspath(sys.argv[0]) |
|
1225 self.script_path, self.script_name = os.path.split(self.script) |
|
1226 self.script_path = os.path.abspath(self.script_path) |
|
1227 |
|
1228 # Determine the absolute path to the Doxygen's root subdirectory. |
|
1229 # If DOXYGEN environment variable is not found, the directory is |
|
1230 # determined from the path of the script. |
|
1231 doxy_default = os.path.join(self.script_path, '..') |
|
1232 self.doxy_path = os.path.abspath(os.getenv('DOXYGEN', doxy_default)) |
|
1233 |
|
1234 # Get the explicit arguments of the script. |
|
1235 self.script_argLst = sys.argv[1:] |
|
1236 |
|
1237 # Build the path names based on the Doxygen's root knowledge. |
|
1238 self.doc_path = os.path.join(self.doxy_path, 'doc') |
|
1239 self.src_path = os.path.join(self.doxy_path, 'src') |
|
1240 |
|
1241 # Create the empty dictionary for Transl object identitied by the |
|
1242 # class identifier of the translator. |
|
1243 self.__translDic = {} |
|
1244 |
|
1245 # Create the None dictionary of required methods. The key is the |
|
1246 # unified prototype, the value is the full prototype. Set inside |
|
1247 # the self.__build(). |
|
1248 self.requiredMethodsDic = None |
|
1249 |
|
1250 # Create the empty dictionary that says what method is implemented |
|
1251 # by what adapter. |
|
1252 self.adaptMethodsDic = {} |
|
1253 |
|
1254 # The last modification time will capture the modification of this |
|
1255 # script, of the translator.h, of the translator_adapter.h (see the |
|
1256 # self.__build() for the last two) of all the translator_xx.h files |
|
1257 # and of the template for generating the documentation. So, this |
|
1258 # time can be compared with modification time of the generated |
|
1259 # documentation to decide, whether the doc should be re-generated. |
|
1260 self.lastModificationTime = os.path.getmtime(self.script) |
|
1261 |
|
1262 # Set the names of the translator report text file, of the template |
|
1263 # for generating "Internationalization" document, for the generated |
|
1264 # file itself, and for the maintainers list. |
|
1265 self.translatorReportFileName = 'translator_report.txt' |
|
1266 self.maintainersFileName = 'maintainers.txt' |
|
1267 self.languageTplFileName = 'language.tpl' |
|
1268 self.languageDocFileName = 'language.doc' |
|
1269 |
|
1270 # The information about the maintainers will be stored |
|
1271 # in the dictionary with the following name. |
|
1272 self.__maintainersDic = None |
|
1273 |
|
1274 # Define the other used structures and variables for information. |
|
1275 self.langLst = None # including English based |
|
1276 self.supportedLangReadableStr = None # coupled En-based as a note |
|
1277 self.numLang = None # excluding coupled En-based |
|
1278 self.doxVersion = None # Doxygen version |
|
1279 |
|
1280 # Build objects where each one is responsible for one translator. |
|
1281 self.__build() |
|
1282 |
|
1283 |
|
1284 def __build(self): |
|
1285 """Find the translator files and build the objects for translators.""" |
|
1286 |
|
1287 # The translator.h must exist (the Transl object will check it), |
|
1288 # create the object for it and let it build the dictionary of |
|
1289 # required methods. |
|
1290 tr = Transl(os.path.join(self.src_path, 'translator.h'), self) |
|
1291 self.requiredMethodsDic = tr.collectPureVirtualPrototypes() |
|
1292 tim = tr.getmtime() |
|
1293 if tim > self.lastModificationTime: |
|
1294 self.lastModificationTime = tim |
|
1295 |
|
1296 # The translator_adapter.h must exist (the Transl object will check it), |
|
1297 # create the object for it and store the reference in the dictionary. |
|
1298 tr = Transl(os.path.join(self.src_path, 'translator_adapter.h'), self) |
|
1299 self.adaptMethodsDic = tr.collectAdapterPrototypes() |
|
1300 tim = tr.getmtime() |
|
1301 if tim > self.lastModificationTime: |
|
1302 self.lastModificationTime = tim |
|
1303 |
|
1304 # Create the list of the filenames with language translator sources. |
|
1305 # If the explicit arguments of the script were typed, process only |
|
1306 # those files. |
|
1307 if self.script_argLst: |
|
1308 lst = ['translator_' + x + '.h' for x in self.script_argLst] |
|
1309 for fname in lst: |
|
1310 if not os.path.isfile(os.path.join(self.src_path, fname)): |
|
1311 sys.stderr.write("\a\nFile '%s' not found!\n" % fname) |
|
1312 sys.exit(1) |
|
1313 else: |
|
1314 lst = os.listdir(self.src_path) |
|
1315 lst = filter(lambda x: x[:11] == 'translator_' |
|
1316 and x[-2:] == '.h' |
|
1317 and x != 'translator_adapter.h', lst) |
|
1318 |
|
1319 # Build the object for the translator_xx.h files, and process the |
|
1320 # content of the file. Then insert the object to the dictionary |
|
1321 # accessed via classId. |
|
1322 for fname in lst: |
|
1323 fullname = os.path.join(self.src_path, fname) |
|
1324 tr = Transl(fullname, self) |
|
1325 tr.processing() |
|
1326 assert(tr.classId != 'Translator') |
|
1327 self.__translDic[tr.classId] = tr |
|
1328 |
|
1329 # Extract the global information of the processed info. |
|
1330 self.__extractProcessedInfo() |
|
1331 |
|
1332 |
|
1333 def __extractProcessedInfo(self): |
|
1334 """Build lists and strings of the processed info.""" |
|
1335 |
|
1336 # Build the auxiliary list with strings compound of the status, |
|
1337 # readable form of the language, and classId. |
|
1338 statLst = [] |
|
1339 for obj in self.__translDic.values(): |
|
1340 assert(obj.classId != 'Translator') |
|
1341 s = obj.status + '|' + obj.langReadable + '|' + obj.classId |
|
1342 statLst.append(s) |
|
1343 |
|
1344 # Sort the list and extract the object identifiers (classId's) for |
|
1345 # the up-to-date translators and English-based translators. |
|
1346 statLst.sort() |
|
1347 self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|'] |
|
1348 self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En'] |
|
1349 |
|
1350 # Reverse the list and extract the TranslatorAdapter based translators. |
|
1351 statLst.reverse() |
|
1352 self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()] |
|
1353 |
|
1354 # Build the list of tuples that contain (langReadable, obj). |
|
1355 # Sort it by readable name. |
|
1356 self.langLst = [] |
|
1357 for obj in self.__translDic.values(): |
|
1358 self.langLst.append((obj.langReadable, obj)) |
|
1359 self.langLst.sort(lambda a, b: cmp(a[0], b[0])) |
|
1360 |
|
1361 # Create the list with readable language names. If the language has |
|
1362 # also the English-based version, modify the item by appending |
|
1363 # the note. Number of the supported languages is equal to the length |
|
1364 # of the list. |
|
1365 langReadableLst = [] |
|
1366 for name, obj in self.langLst: |
|
1367 if obj.status == 'En': continue |
|
1368 |
|
1369 # Append the 'En' to the classId to possibly obtain the classId |
|
1370 # of the English-based object. If the object exists, modify the |
|
1371 # name for the readable list of supported languages. |
|
1372 classIdEn = obj.classId + 'En' |
|
1373 if self.__translDic.has_key(classIdEn): |
|
1374 name += ' (+En)' |
|
1375 |
|
1376 # Append the result name of the language, possibly with note. |
|
1377 langReadableLst.append(name) |
|
1378 |
|
1379 # Create the multiline string of readable language names, |
|
1380 # with punctuation, wrapped to paragraph. |
|
1381 if len(langReadableLst) == 1: |
|
1382 s = langReadableLst[0] |
|
1383 elif len(langReadableLst) == 2: |
|
1384 s = ' and '.join(langReadableLst) |
|
1385 else: |
|
1386 s = ', '.join(langReadableLst[:-1]) + ', and ' |
|
1387 s += langReadableLst[-1] |
|
1388 |
|
1389 self.supportedLangReadableStr = fill(s + '.') |
|
1390 |
|
1391 # Find the number of the supported languages. The English based |
|
1392 # languages are not counted if the non-English based also exists. |
|
1393 self.numLang = len(self.langLst) |
|
1394 for name, obj in self.langLst: |
|
1395 if obj.status == 'En': |
|
1396 classId = obj.classId[:-2] |
|
1397 if self.__translDic.has_key(classId): |
|
1398 self.numLang -= 1 # the couple will be counted as one |
|
1399 |
|
1400 # Extract the version of Doxygen. |
|
1401 f = file(os.path.join(self.doxy_path, 'VERSION')) |
|
1402 self.doxVersion = f.readline().strip() |
|
1403 f.close() |
|
1404 |
|
1405 # Update the last modification time. |
|
1406 for tr in self.__translDic.values(): |
|
1407 tim = tr.getmtime() |
|
1408 if tim > self.lastModificationTime: |
|
1409 self.lastModificationTime = tim |
|
1410 |
|
1411 |
|
1412 def __getNoTrSourceFilesLst(self): |
|
1413 """Returns the list of sources to be checked. |
|
1414 |
|
1415 All .cpp files and also .h files that do not declare or define |
|
1416 the translator methods are included in the list. The file names |
|
1417 are searched in doxygen/src directory. |
|
1418 """ |
|
1419 files = [] |
|
1420 for item in os.listdir(self.src_path): |
|
1421 # Split the bare name to get the extension. |
|
1422 name, ext = os.path.splitext(item) |
|
1423 ext = ext.lower() |
|
1424 |
|
1425 # Include only .cpp and .h files (case independent) and exclude |
|
1426 # the files where the checked identifiers are defined. |
|
1427 if ext == '.cpp' or (ext == '.h' and name.find('translator') == -1): |
|
1428 fname = os.path.join(self.src_path, item) |
|
1429 assert os.path.isfile(fname) # assumes no directory with the ext |
|
1430 files.append(fname) # full name |
|
1431 return files |
|
1432 |
|
1433 |
|
1434 def __removeUsedInFiles(self, fname, dic): |
|
1435 """Removes items for method identifiers that are found in fname. |
|
1436 |
|
1437 The method reads the content of the file as one string and searches |
|
1438 for all identifiers from dic. The identifiers that were found in |
|
1439 the file are removed from the dictionary. |
|
1440 |
|
1441 Note: If more files is to be checked, the files where most items are |
|
1442 probably used should be checked first and the resulting reduced |
|
1443 dictionary should be used for checking the next files (speed up). |
|
1444 """ |
|
1445 lst_in = dic.keys() # identifiers to be searched for |
|
1446 |
|
1447 # Read content of the file as one string. |
|
1448 assert os.path.isfile(fname) |
|
1449 f = file(fname) |
|
1450 cont = f.read() |
|
1451 f.close() |
|
1452 |
|
1453 # Remove the items for identifiers that were found in the file. |
|
1454 while lst_in: |
|
1455 item = lst_in.pop(0) |
|
1456 if cont.find(item) != -1: |
|
1457 del dic[item] |
|
1458 |
|
1459 |
|
1460 def __checkForNotUsedTrMethods(self): |
|
1461 """Returns the dictionary of not used translator methods. |
|
1462 |
|
1463 The method can be called only after self.requiredMethodsDic has been |
|
1464 built. The stripped prototypes are the values, the method identifiers |
|
1465 are the keys. |
|
1466 """ |
|
1467 # Build the dictionary of the required method prototypes with |
|
1468 # method identifiers used as keys. |
|
1469 trdic = {} |
|
1470 for prototype in self.requiredMethodsDic.keys(): |
|
1471 ri = prototype.split('(')[0] |
|
1472 identifier = ri.split()[1].strip() |
|
1473 trdic[identifier] = prototype |
|
1474 |
|
1475 # Build the list of source files where translator method identifiers |
|
1476 # can be used. |
|
1477 files = self.__getNoTrSourceFilesLst() |
|
1478 |
|
1479 # Loop through the files and reduce the dictionary of id -> proto. |
|
1480 for fname in files: |
|
1481 self.__removeUsedInFiles(fname, trdic) |
|
1482 |
|
1483 # Return the dictionary of not used translator methods. |
|
1484 return trdic |
|
1485 |
|
1486 |
|
1487 |
|
1488 |
|
1489 def __email(self, classId): |
|
1490 """Returns the first maintainer for the translator class""" |
|
1491 return self.__maintainersDic[classId][0][1] |
|
1492 |
|
1493 |
|
1494 def generateTranslatorReport(self): |
|
1495 """Generates the translator report.""" |
|
1496 |
|
1497 output = os.path.join(self.doc_path, self.translatorReportFileName) |
|
1498 |
|
1499 # Open the textual report file for the output. |
|
1500 f = file(output, 'w') |
|
1501 |
|
1502 # Output the information about the version. |
|
1503 f.write('(' + self.doxVersion + ')\n\n') |
|
1504 |
|
1505 # Output the information about the number of the supported languages |
|
1506 # and the list of the languages, or only the note about the explicitly |
|
1507 # given languages to process. |
|
1508 if self.script_argLst: |
|
1509 f.write('The report was generated for the following, explicitly') |
|
1510 f.write(' identified languages:\n\n') |
|
1511 f.write(self.supportedLangReadableStr + '\n\n') |
|
1512 else: |
|
1513 f.write('Doxygen supports the following ') |
|
1514 f.write(str(self.numLang)) |
|
1515 f.write(' languages (sorted alphabetically):\n\n') |
|
1516 f.write(self.supportedLangReadableStr + '\n\n') |
|
1517 |
|
1518 # Write the summary about the status of language translators (how |
|
1519 # many translators) are up-to-date, etc. |
|
1520 s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst) |
|
1521 s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst) |
|
1522 s += 'and %d are English based.' % len(self.EnBasedIdLst) |
|
1523 f.write(fill(s) + '\n\n') |
|
1524 |
|
1525 # The e-mail addresses of the maintainers will be collected to |
|
1526 # the auxiliary file in the order of translator classes listed |
|
1527 # in the translator report. |
|
1528 fmail = file('mailto.txt', 'w') |
|
1529 |
|
1530 # Write the list of up-to-date translator classes. |
|
1531 if self.upToDateIdLst: |
|
1532 s = '''The following translator classes are up-to-date (sorted |
|
1533 alphabetically). This means that they derive from the |
|
1534 Translator class and they implement all %d of the required |
|
1535 methods. Anyway, there still may be some details listed even |
|
1536 for them:''' |
|
1537 s = s % len(self.requiredMethodsDic) |
|
1538 f.write('-' * 70 + '\n') |
|
1539 f.write(fill(s) + '\n\n') |
|
1540 |
|
1541 mailtoLst = [] |
|
1542 for x in self.upToDateIdLst: |
|
1543 obj = self.__translDic[x] |
|
1544 f.write(' ' + obj.classId) |
|
1545 if obj.note: |
|
1546 f.write(' -- ' + obj.note) |
|
1547 f.write('\n') |
|
1548 mailtoLst.append(self.__email(obj.classId)) |
|
1549 |
|
1550 fmail.write('up-to-date\n') |
|
1551 fmail.write('; '.join(mailtoLst)) |
|
1552 |
|
1553 # Write the list of the adapter based classes. The very obsolete |
|
1554 # translators that derive from TranslatorEnglish are included. |
|
1555 if self.adaptIdLst: |
|
1556 s = '''The following translator classes need some maintenance |
|
1557 (the most obsolete at the end). The other info shows the |
|
1558 estimation of Doxygen version when the class was last |
|
1559 updated and number of methods that must be implemented to |
|
1560 become up-to-date:''' |
|
1561 f.write('\n' + '-' * 70 + '\n') |
|
1562 f.write(fill(s) + '\n\n') |
|
1563 |
|
1564 # Find also whether some adapter classes may be removed. |
|
1565 adaptMinVersion = '9.9.99' |
|
1566 |
|
1567 mailtoLst = [] |
|
1568 numRequired = len(self.requiredMethodsDic) |
|
1569 for x in self.adaptIdLst: |
|
1570 obj = self.__translDic[x] |
|
1571 f.write(' %-30s' % obj.classId) |
|
1572 f.write(' %-6s' % obj.readableStatus) |
|
1573 numimpl = len(obj.missingMethods) |
|
1574 pluralS = '' |
|
1575 if numimpl > 1: pluralS = 's' |
|
1576 percent = 100 * numimpl / numRequired |
|
1577 f.write('\t%2d method%s to implement (%d %%)' % ( |
|
1578 numimpl, pluralS, percent)) |
|
1579 if obj.note: |
|
1580 f.write('\n\tNote: ' + obj.note + '\n') |
|
1581 f.write('\n') |
|
1582 mailtoLst.append(self.__email(obj.classId)) # to maintainer |
|
1583 |
|
1584 # Check the level of required adapter classes. |
|
1585 if obj.status != '0.0.00' and obj.status < adaptMinVersion: |
|
1586 adaptMinVersion = obj.status |
|
1587 |
|
1588 fmail.write('\n\ntranslator based\n') |
|
1589 fmail.write('; '.join(mailtoLst)) |
|
1590 |
|
1591 # Set the note if some old translator adapters are not needed |
|
1592 # any more. Do it only when the script is called without arguments, |
|
1593 # i.e. all languages were checked against the needed translator |
|
1594 # adapters. |
|
1595 if not self.script_argLst: |
|
1596 to_remove = {} |
|
1597 for version, adaptClassId in self.adaptMethodsDic.values(): |
|
1598 if version < adaptMinVersion: |
|
1599 to_remove[adaptClassId] = True |
|
1600 |
|
1601 if to_remove: |
|
1602 lst = to_remove.keys() |
|
1603 lst.sort() |
|
1604 plural = len(lst) > 1 |
|
1605 note = 'Note: The adapter class' |
|
1606 if plural: note += 'es' |
|
1607 note += ' ' + ', '.join(lst) |
|
1608 if not plural: |
|
1609 note += ' is' |
|
1610 else: |
|
1611 note += ' are' |
|
1612 note += ' not used and can be removed.' |
|
1613 f.write('\n' + fill(note) + '\n') |
|
1614 |
|
1615 # Write the list of the English-based classes. |
|
1616 if self.EnBasedIdLst: |
|
1617 s = '''The following translator classes derive directly from the |
|
1618 TranslatorEnglish. The class identifier has the suffix 'En' |
|
1619 that says that this is intentional. Usually, there is also |
|
1620 a non-English based version of the translator for |
|
1621 the language:''' |
|
1622 f.write('\n' + '-' * 70 + '\n') |
|
1623 f.write(fill(s) + '\n\n') |
|
1624 |
|
1625 for x in self.EnBasedIdLst: |
|
1626 obj = self.__translDic[x] |
|
1627 f.write(' ' + obj.classId) |
|
1628 f.write('\timplements %d methods' % len(obj.implementedMethods)) |
|
1629 if obj.note: |
|
1630 f.write(' -- ' + obj.note) |
|
1631 f.write('\n') |
|
1632 |
|
1633 # Check for not used translator methods and generate warning if found. |
|
1634 # The check is rather time consuming, so it is not done when report |
|
1635 # is restricted to explicitly given language identifiers. |
|
1636 if not self.script_argLst: |
|
1637 dic = self.__checkForNotUsedTrMethods() |
|
1638 if dic: |
|
1639 s = '''WARNING: The following translator methods are declared |
|
1640 in the Translator class but their identifiers do not appear |
|
1641 in source files. The situation should be checked. The .cpp |
|
1642 files and .h files excluding the '*translator*' files |
|
1643 in doxygen/src directory were simply searched for occurence |
|
1644 of the method identifiers:''' |
|
1645 f.write('\n' + '=' * 70 + '\n') |
|
1646 f.write(fill(s) + '\n\n') |
|
1647 |
|
1648 keys = dic.keys() |
|
1649 keys.sort() |
|
1650 for key in keys: |
|
1651 f.write(' ' + dic[key] + '\n') |
|
1652 f.write('\n') |
|
1653 |
|
1654 # Write the details for the translators. |
|
1655 f.write('\n' + '=' * 70) |
|
1656 f.write('\nDetails for translators (classes sorted alphabetically):\n') |
|
1657 |
|
1658 cls = self.__translDic.keys() |
|
1659 cls.sort() |
|
1660 |
|
1661 for c in cls: |
|
1662 obj = self.__translDic[c] |
|
1663 assert(obj.classId != 'Translator') |
|
1664 obj.report(f) |
|
1665 |
|
1666 # Close the report file and the auxiliary file with e-mails. |
|
1667 f.close() |
|
1668 fmail.close() |
|
1669 |
|
1670 |
|
1671 def __loadMaintainers(self): |
|
1672 """Load and process the file with the maintainers. |
|
1673 |
|
1674 Fills the dictionary classId -> [(name, e-mail), ...].""" |
|
1675 |
|
1676 fname = os.path.join(self.doc_path, self.maintainersFileName) |
|
1677 |
|
1678 # Include the maintainers file to the group of files checked with |
|
1679 # respect to the modification time. |
|
1680 tim = os.path.getmtime(fname) |
|
1681 if tim > self.lastModificationTime: |
|
1682 self.lastModificationTime = tim |
|
1683 |
|
1684 # Process the content of the maintainers file. |
|
1685 f = file(fname) |
|
1686 inside = False # inside the record for the language |
|
1687 lineReady = True |
|
1688 classId = None |
|
1689 maintainersLst = None |
|
1690 self.__maintainersDic = {} |
|
1691 while lineReady: |
|
1692 line = f.readline() # next line |
|
1693 lineReady = line != '' # when eof, then line == '' |
|
1694 |
|
1695 line = line.strip() # eof should also behave as separator |
|
1696 if line != '' and line[0] == '%': # skip the comment line |
|
1697 continue |
|
1698 |
|
1699 if not inside: # if outside of the record |
|
1700 if line != '': # should be language identifier |
|
1701 classId = line |
|
1702 maintainersLst = [] |
|
1703 inside = True |
|
1704 # Otherwise skip empty line that do not act as separator. |
|
1705 |
|
1706 else: # if inside the record |
|
1707 if line == '': # separator found |
|
1708 inside = False |
|
1709 else: |
|
1710 # If it is the first maintainer, create the empty list. |
|
1711 if not self.__maintainersDic.has_key(classId): |
|
1712 self.__maintainersDic[classId] = [] |
|
1713 |
|
1714 # Split the information about the maintainer and append |
|
1715 # the tuple. |
|
1716 lst = line.split(':', 1) |
|
1717 assert(len(lst) == 2) |
|
1718 t = (lst[0].strip(), lst[1].strip()) |
|
1719 self.__maintainersDic[classId].append(t) |
|
1720 f.close() |
|
1721 |
|
1722 |
|
1723 def generateLanguageDoc(self): |
|
1724 """Checks the modtime of files and generates language.doc.""" |
|
1725 self.__loadMaintainers() |
|
1726 |
|
1727 # Check the last modification time of the template file. It is the |
|
1728 # last file from the group that decide whether the documentation |
|
1729 # should or should not be generated. |
|
1730 fTplName = os.path.join(self.doc_path, self.languageTplFileName) |
|
1731 tim = os.path.getmtime(fTplName) |
|
1732 if tim > self.lastModificationTime: |
|
1733 self.lastModificationTime = tim |
|
1734 |
|
1735 # If the generated documentation exists and is newer than any of |
|
1736 # the source files from the group, do not generate it and quit |
|
1737 # quietly. |
|
1738 fDocName = os.path.join(self.doc_path, self.languageDocFileName) |
|
1739 if os.path.isfile(fDocName): |
|
1740 if os.path.getmtime(fDocName) > self.lastModificationTime: |
|
1741 return |
|
1742 |
|
1743 # The document or does not exist or is older than some of the |
|
1744 # sources. It must be generated again. |
|
1745 # |
|
1746 # Read the template of the documentation, and remove the first |
|
1747 # attention lines. |
|
1748 f = file(fTplName) |
|
1749 line = f.readline() |
|
1750 while line[0] != '/': |
|
1751 line = f.readline() |
|
1752 doctpl = line + f.read() |
|
1753 f.close() |
|
1754 |
|
1755 # Fill the tplDic by symbols that will be inserted into the |
|
1756 # document template. |
|
1757 tplDic = {} |
|
1758 |
|
1759 s = 'Do not edit this file. It was generated by the %s script.' % self.script_name |
|
1760 tplDic['editnote'] = s |
|
1761 |
|
1762 tplDic['doxVersion'] = self.doxVersion |
|
1763 tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr |
|
1764 tplDic['translatorReportFileName'] = self.translatorReportFileName |
|
1765 |
|
1766 ahref = '<a href="../doc/' + self.translatorReportFileName |
|
1767 ahref += '"\n><code>doxygen/doc/' + self.translatorReportFileName |
|
1768 ahref += '</code></a>' |
|
1769 tplDic['translatorReportLink'] = ahref |
|
1770 tplDic['numLangStr'] = str(self.numLang) |
|
1771 |
|
1772 # Define templates for HTML table parts of the documentation. |
|
1773 htmlTableTpl = '''\ |
|
1774 \\htmlonly |
|
1775 <table align="center" cellspacing="0" cellpadding="0" border="0"> |
|
1776 <tr bgcolor="#000000"> |
|
1777 <td> |
|
1778 <table cellspacing="1" cellpadding="2" border="0"> |
|
1779 <tr bgcolor="#4040c0"> |
|
1780 <td ><b><font size="+1" color="#ffffff"> Language </font></b></td> |
|
1781 <td ><b><font size="+1" color="#ffffff"> Maintainer </font></b></td> |
|
1782 <td ><b><font size="+1" color="#ffffff"> Contact address </font> |
|
1783 <font size="-2" color="#ffffff">(replace the at and dot)</font></b></td> |
|
1784 <td ><b><font size="+1" color="#ffffff"> Status </font></b></td> |
|
1785 </tr> |
|
1786 <!-- table content begin --> |
|
1787 %s |
|
1788 <!-- table content end --> |
|
1789 </table> |
|
1790 </td> |
|
1791 </tr> |
|
1792 </table> |
|
1793 \\endhtmlonly |
|
1794 ''' |
|
1795 htmlTableTpl = dedent(htmlTableTpl) |
|
1796 htmlTrTpl = '\n <tr bgcolor="#ffffff">%s\n </tr>' |
|
1797 htmlTdTpl = '\n <td>%s</td>' |
|
1798 |
|
1799 # Loop through transl objects in the order of sorted readable names |
|
1800 # and add generate the content of the HTML table. |
|
1801 trlst = [] |
|
1802 for name, obj in self.langLst: |
|
1803 # Fill the table data elements for one row. The first element |
|
1804 # contains the readable name of the language. |
|
1805 lst = [ htmlTdTpl % obj.langReadable ] |
|
1806 |
|
1807 # The next two elements contain the list of maintainers |
|
1808 # and the list of their mangled e-mails. For English-based |
|
1809 # translators that are coupled with the non-English based, |
|
1810 # insert the 'see' note. |
|
1811 mm = None # init |
|
1812 ee = None # init |
|
1813 if obj.status == 'En': |
|
1814 # Check whether there is the coupled non-English. |
|
1815 classId = obj.classId[:-2] |
|
1816 if self.__translDic.has_key(classId): |
|
1817 lang = self.__translDic[classId].langReadable |
|
1818 mm = 'see the %s language' % lang |
|
1819 ee = ' ' |
|
1820 |
|
1821 if not mm and self.__maintainersDic.has_key(obj.classId): |
|
1822 lm = [ m[0] for m in self.__maintainersDic[obj.classId] ] |
|
1823 mm = '<br/>'.join(lm) |
|
1824 le = [ m[1] for m in self.__maintainersDic[obj.classId] ] |
|
1825 ee = '<br/>'.join(le) |
|
1826 |
|
1827 # Mangle the e-mail and replace the entity references. |
|
1828 if ee and ee != ' ': |
|
1829 # More than one maintainer address separated by <br> can be used. |
|
1830 emails = ee.split('<br/>') |
|
1831 mangled_list = [] |
|
1832 for email in emails: |
|
1833 name, domain = email.split('@') |
|
1834 domain = domain.replace('.', ' dot ') |
|
1835 mangled_list.append(name + ' at ' + domain) |
|
1836 ee = '<br/>'.join(mangled_list) |
|
1837 |
|
1838 if mm: |
|
1839 mm = mm.replace('č', 'č') |
|
1840 mm = mm.replace('ř', 'ř') |
|
1841 mm = mm.replace('š', 'š') |
|
1842 mm = mm.replace('ž', 'ž') |
|
1843 |
|
1844 # Append the maintainer and e-mail elements. |
|
1845 lst.append(htmlTdTpl % mm) |
|
1846 lst.append(htmlTdTpl % ee) |
|
1847 |
|
1848 # The last element contains the readable form of the status. |
|
1849 lst.append(htmlTdTpl % obj.readableStatus) |
|
1850 |
|
1851 # Join the table data to one table row. |
|
1852 trlst.append(htmlTrTpl % (''.join(lst))) |
|
1853 |
|
1854 # Join the table rows and insert into the template. |
|
1855 htmlTable = htmlTableTpl % (''.join(trlst)) |
|
1856 |
|
1857 # Define templates for LaTeX table parts of the documentation. |
|
1858 latexTableTpl = r''' |
|
1859 \latexonly |
|
1860 \begin{tabular}{|l|l|l|l|} |
|
1861 \hline |
|
1862 {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\ |
|
1863 \hline |
|
1864 %s |
|
1865 \hline |
|
1866 \end{tabular} |
|
1867 \endlatexonly |
|
1868 ''' |
|
1869 latexTableTpl = dedent(latexTableTpl) |
|
1870 latexLineTpl = '\n' + r' %s & %s & {\tt\tiny %s} & %s \\' |
|
1871 |
|
1872 # Loop through transl objects in the order of sorted readable names |
|
1873 # and add generate the content of the LaTeX table. |
|
1874 trlst = [] |
|
1875 for name, obj in self.langLst: |
|
1876 # For LaTeX, more maintainers for the same language are |
|
1877 # placed on separate rows in the table. The line separator |
|
1878 # in the table is placed explicitly above the first |
|
1879 # maintainer. Prepare the arguments for the LaTeX row template. |
|
1880 maintainers = [] |
|
1881 if self.__maintainersDic.has_key(obj.classId): |
|
1882 maintainers = self.__maintainersDic[obj.classId] |
|
1883 |
|
1884 lang = obj.langReadable |
|
1885 maintainer = None # init |
|
1886 email = None # init |
|
1887 if obj.status == 'En': |
|
1888 # Check whether there is the coupled non-English. |
|
1889 classId = obj.classId[:-2] |
|
1890 if self.__translDic.has_key(classId): |
|
1891 langNE = self.__translDic[classId].langReadable |
|
1892 maintainer = 'see the %s language' % langNE |
|
1893 email = '~' |
|
1894 |
|
1895 if not maintainer and self.__maintainersDic.has_key(obj.classId): |
|
1896 lm = [ m[0] for m in self.__maintainersDic[obj.classId] ] |
|
1897 maintainer = maintainers[0][0] |
|
1898 email = maintainers[0][1] |
|
1899 |
|
1900 status = obj.readableStatus |
|
1901 |
|
1902 # Use the template to produce the line of the table and insert |
|
1903 # the hline plus the constructed line into the table content. |
|
1904 trlst.append('\n \\hline') |
|
1905 trlst.append(latexLineTpl % (lang, maintainer, email, status)) |
|
1906 |
|
1907 # List the other maintainers for the language. Do not set |
|
1908 # lang and status for them. |
|
1909 lang = '~' |
|
1910 status = '~' |
|
1911 for m in maintainers[1:]: |
|
1912 maintainer = m[0] |
|
1913 email = m[1] |
|
1914 trlst.append(latexLineTpl % (lang, maintainer, email, status)) |
|
1915 |
|
1916 # Join the table lines and insert into the template. |
|
1917 latexTable = latexTableTpl % (''.join(trlst)) |
|
1918 |
|
1919 # Do the LaTeX replacements. |
|
1920 latexTable = latexTable.replace('á', "\\'{a}") |
|
1921 latexTable = latexTable.replace('Á', "\\'{A}") |
|
1922 latexTable = latexTable.replace('ä', '\\"{a}') |
|
1923 latexTable = latexTable.replace('ö', '\\"{o}') |
|
1924 latexTable = latexTable.replace('ø', '\\o{}') |
|
1925 latexTable = latexTable.replace('č', '\\v{c}') |
|
1926 latexTable = latexTable.replace('ř', '\\v{r}') |
|
1927 latexTable = latexTable.replace('š', '\\v{s}') |
|
1928 latexTable = latexTable.replace('ž', '\\v{z}') |
|
1929 latexTable = latexTable.replace('_', '\\_') |
|
1930 |
|
1931 # Put the HTML and LaTeX parts together and define the dic item. |
|
1932 tplDic['informationTable'] = htmlTable + '\n' + latexTable |
|
1933 |
|
1934 # Insert the symbols into the document template and write it down. |
|
1935 f = file(fDocName, 'w') |
|
1936 f.write(doctpl % tplDic) |
|
1937 f.close() |
|
1938 |
|
1939 if __name__ == '__main__': |
|
1940 |
|
1941 # Create the manager, build the transl objects, and parse the related |
|
1942 # sources. |
|
1943 trMan = TrManager() |
|
1944 |
|
1945 # Generate the language.doc. |
|
1946 trMan.generateLanguageDoc() |
|
1947 |
|
1948 # Generate the translator report. |
|
1949 trMan.generateTranslatorReport() |