1 """Script to generate reports on translator classes from Doxygen sources.
\r
3 The main purpose of the script is to extract the information from sources
\r
4 related to internationalization (the translator classes). It uses the
\r
5 information to generate documentation (language.doc,
\r
6 translator_report.txt) from templates (language.tpl, maintainers.txt).
\r
8 Simply run the script without parameters to get the reports and
\r
9 documentation for all supported languages. If you want to generate the
\r
10 translator report only for some languages, pass their codes as arguments
\r
11 to the script. In that case, the language.doc will not be generated.
\r
14 python translator.py en nl cz
\r
16 Originally, the script was written in Perl and was known as translator.pl.
\r
17 The last Perl version was dated 2002/05/21 (plus some later corrections)
\r
19 Petr Prikryl (prikryl at atlas dot cz)
\r
23 2002/05/21 - This was the last Perl version.
\r
24 2003/05/16 - List of language marks can be passed as arguments.
\r
25 2004/01/24 - Total reimplementation started: classes TrManager, and Transl.
\r
26 2004/02/05 - First version that produces translator report. No language.doc yet.
\r
27 2004/02/10 - First fully functional version that generates both the translator
\r
28 report and the documentation. It is a bit slower than the
\r
29 Perl version, but is much less tricky and much more flexible.
\r
30 It also solves some problems that were not solved by the Perl
\r
31 version. The translator report content should be more useful
\r
33 2004/02/11 - Some tuning-up to provide more useful information.
\r
34 2004/04/16 - Added new tokens to the tokenizer (to remove some warnings).
\r
35 2004/05/25 - Added from __future__ import generators not to force Python 2.3.
\r
36 2004/06/03 - Removed dependency on textwrap module.
\r
37 2004/07/07 - Fixed the bug in the fill() function.
\r
38 2004/07/21 - Better e-mail mangling for HTML part of language.doc.
\r
39 - Plural not used for reporting a single missing method.
\r
40 - Removal of not used translator adapters is suggested only
\r
41 when the report is not restricted to selected languages
\r
42 explicitly via script arguments.
\r
43 2004/07/26 - Better reporting of not-needed adapters.
\r
44 2004/10/04 - Reporting of not called translator methods added.
\r
45 2004/10/05 - Modified to check only doxygen/src sources for the previous report.
\r
46 2005/02/28 - Slight modification to generate "mailto.txt" auxiliary file.
\r
47 2005/08/15 - Doxygen's root directory determined primarily from DOXYGEN
\r
48 environment variable. When not found, then relatively to the script.
\r
49 2007/03/20 - The "translate me!" searched in comments and reported if found.
\r
50 2008/06/09 - Warning when the MAX_DOT_GRAPH_HEIGHT is still part of trLegendDocs().
\r
51 2009/05/09 - Changed HTML output to fit it with XHTML DTD
\r
52 2009/09/02 - Added percentage info to the report (implemented / to be implemented).
\r
53 2010/02/09 - Added checking/suggestion 'Reimplementation using UTF-8 suggested.
\r
54 2010/03/03 - Added [unreachable] prefix used in maintainers.txt.
\r
55 2010/05/28 - BOM skipped; minor code cleaning.
\r
56 2010/05/31 - e-mail mangled already in maintainers.txt
\r
57 2010/08/20 - maintainers.txt to UTF-8, related processing of unicode strings
\r
58 - [any mark] introduced instead of [unreachable] only
\r
59 - marks highlighted in HTML
\r
60 2010/08/30 - Highlighting in what will be the table in langhowto.html modified.
\r
61 2010/09/27 - The underscore in \latexonly part of the generated language.doc
\r
62 was prefixed by backslash (was LaTeX related error).
\r
63 2013/02/19 - Better diagnostics when translator_xx.h is too crippled.
\r
64 2013/06/25 - TranslatorDecoder checks removed after removing the class.
\r
65 2013/09/04 - Coloured status in langhowto. *ALMOST up-to-date* category
\r
66 of translators introduced.
\r
67 2014/06/16 - unified for Python 2.6+ and 3.0+
\r
70 from __future__ import print_function
\r
79 def xopen(fname, mode='r', encoding='utf-8-sig'):
\r
80 '''Unified file opening for Python 2 an Python 3.
\r
82 Python 2 does not have the encoding argument. Python 3 has one, and
\r
83 the default 'utf-8-sig' is used (skips the BOM automatically).
\r
86 if sys.version_info[0] == 2:
\r
87 return open(fname, mode=mode) # Python 2 without encoding
\r
89 return open(fname, mode=mode, encoding=encoding) # Python 3 with encoding
\r
93 """Returns string formatted to the wrapped paragraph multiline string.
\r
95 Replaces whitespaces by one space and then uses he textwrap.fill()."""
\r
97 # Replace all whitespace by spaces, remove whitespaces that are not
\r
98 # necessary, strip the left and right whitespaces, and break the string
\r
100 rexWS = re.compile(r'\s+')
\r
101 lst = rexWS.sub(' ', s).strip().split()
\r
103 # If the list is not empty, put the words together and form the lines
\r
104 # of maximum 70 characters. Build the list of lines.
\r
107 line = lst.pop(0) # no separation space in front of the first word
\r
109 if len(line) + len(word) < 70:
\r
112 lines.append(line) # another full line formed
\r
113 line = word # next line started
\r
114 lines.append(line) # the last line
\r
115 return '\n'.join(lines)
\r
119 """One instance is build for each translator.
\r
121 The abbreviation of the source file--part after 'translator_'--is used as
\r
122 the identification of the object. The empty string is used for the
\r
123 abstract Translator class from translator.h. The other information is
\r
124 extracted from inside the source file."""
\r
126 def __init__(self, fname, manager):
\r
127 """Bind to the manager and initialize."""
\r
129 # Store the filename and the reference to the manager object.
\r
131 self.manager = manager
\r
133 # The instance is responsible for loading the source file, so it checks
\r
134 # for its existence and quits if something goes wrong.
\r
135 if not os.path.isfile(fname):
\r
136 sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
\r
139 # Initialize the other collected information.
\r
140 self.classId = None
\r
141 self.baseClassId = None
\r
142 self.readableStatus = None # 'up-to-date', '1.2.3', '1.3', etc.
\r
143 self.status = None # '', '1.2.03', '1.3.00', etc.
\r
144 self.lang = None # like 'Brasilian'
\r
145 self.langReadable = None # like 'Brasilian Portuguese'
\r
146 self.note = None # like 'should be cleaned up'
\r
147 self.prototypeDic = {} # uniPrototype -> prototype
\r
148 self.translateMeText = 'translate me!'
\r
149 self.translateMeFlag = False # comments with "translate me!" found
\r
150 self.txtMAX_DOT_GRAPH_HEIGHT_flag = False # found in string in trLegendDocs()
\r
151 self.obsoleteMethods = None # list of prototypes to be removed
\r
152 self.missingMethods = None # list of prototypes to be implemented
\r
153 self.implementedMethods = None # list of implemented required methods
\r
154 self.adaptMinClass = None # The newest adapter class that can be used
\r
156 def __tokenGenerator(self):
\r
157 """Generator that reads the file and yields tokens as 4-tuples.
\r
159 The tokens have the form (tokenId, tokenString, lineNo). The
\r
160 last returned token has the form ('eof', None, None). When trying
\r
161 to access next token after that, the exception would be raised."""
\r
163 # Set the dictionary for recognizing tokenId for keywords, separators
\r
164 # and the similar categories. The key is the string to be recognized,
\r
165 # the value says its token identification.
\r
166 tokenDic = { 'class': 'class',
\r
168 'public': 'public',
\r
169 'protected': 'protected',
\r
170 'private': 'private',
\r
171 'static': 'static',
\r
172 'virtual': 'virtual',
\r
199 # Regular expression for recognizing identifiers.
\r
200 rexId = re.compile(r'^[a-zA-Z]\w*$')
\r
202 # Open the file for reading and extracting tokens until the eof.
\r
203 # Initialize the finite automaton.
\r
204 f = xopen(self.fname)
\r
206 line = '' # init -- see the pos initialization below
\r
208 pos = 100 # init -- pos after the end of line
\r
211 tokenId = None # init
\r
212 tokenStr = '' # init -- the characters will be appended.
\r
215 while status != 777:
\r
217 # Get the next character. Read next line first, if necessary.
\r
222 line = f.readline()
\r
223 linelen = len(line)
\r
225 if line == '': # eof
\r
230 # Consume the character based on the status
\r
232 if status == 0: # basic status
\r
234 # This is the initial status. If tokenId is set, yield the
\r
235 # token here and only here (except when eof is found).
\r
236 # Initialize the token variables after the yield.
\r
238 # If it is an unknown item, it can still be recognized
\r
239 # here. Keywords and separators are the example.
\r
240 if tokenId == 'unknown':
\r
241 if tokenStr in tokenDic:
\r
242 tokenId = tokenDic[tokenStr]
\r
243 elif tokenStr.isdigit():
\r
245 elif rexId.match(tokenStr):
\r
248 msg = '\aWarning: unknown token "' + tokenStr + '"'
\r
249 msg += '\tfound on line %d' % tokenLineNo
\r
250 msg += ' in "' + self.fname + '".\n'
\r
251 sys.stderr.write(msg)
\r
253 yield (tokenId, tokenStr, tokenLineNo)
\r
255 # If it is a comment that contains the self.translateMeText
\r
256 # string, set the flag -- the situation will be reported.
\r
257 if tokenId == 'comment' and tokenStr.find(self.translateMeText) >= 0:
\r
258 self.translateMeFlag = True
\r
264 # Now process the character. When we just skip it (spaces),
\r
265 # stay in this status. All characters that will be part of
\r
266 # some token cause moving to the specific status. And only
\r
267 # when moving to the status == 0 (or the final state 777),
\r
268 # the token is yielded. With respect to that the automaton
\r
269 # behaves as Moore's one (output bound to status). When
\r
270 # collecting tokens, the automaton is the Mealy's one
\r
271 # (actions bound to transitions).
\r
273 pass # just skip whitespace characters
\r
274 elif c == '/': # Possibly comment starts here, but
\r
275 tokenId = 'unknown' # it could be only a slash in code.
\r
277 tokenLineNo = lineNo
\r
280 tokenId = 'preproc' # preprocessor directive
\r
282 tokenLineNo = lineNo
\r
284 elif c == '"': # string starts here
\r
287 tokenLineNo = lineNo
\r
289 elif c == "'": # char literal starts here
\r
290 tokenId = 'charlit'
\r
292 tokenLineNo = lineNo
\r
294 elif c in tokenDic: # known one-char token
\r
295 tokenId = tokenDic[c]
\r
297 tokenLineNo = lineNo
\r
298 # stay in this state to yield token immediately
\r
300 tokenId = 'unknown' # totally unknown
\r
302 tokenLineNo = lineNo
\r
305 pos += 1 # move position in any case
\r
307 elif status == 1: # possibly a comment
\r
308 if c == '/': # ... definitely the C++ comment
\r
309 tokenId = 'comment'
\r
313 elif c == '*': # ... definitely the C comment
\r
314 tokenId = 'comment'
\r
319 status = 0 # unrecognized, don't move pos
\r
321 elif status == 2: # inside the C++ comment
\r
322 if c == '\n': # the end of C++ comment
\r
323 status = 0 # yield the token
\r
325 tokenStr += c # collect the C++ comment
\r
328 elif status == 3: # inside the C comment
\r
329 if c == '*': # possibly the end of the C comment
\r
333 tokenStr += c # collect the C comment
\r
336 elif status == 4: # possibly the end of the C comment
\r
337 if c == '/': # definitely the end of the C comment
\r
339 status = 0 # yield the token
\r
340 elif c == '*': # more stars inside the comment
\r
343 tokenStr += c # this cannot be the end of comment
\r
347 elif status == 5: # inside the preprocessor directive
\r
348 if c == '\n': # the end of the preproc. command
\r
349 status = 0 # yield the token
\r
351 tokenStr += c # collect the preproc
\r
354 elif status == 6: # inside the string
\r
355 if c == '\\': # escaped char inside the string
\r
358 elif c == '"': # end of the string
\r
362 tokenStr += c # collect the chars of the string
\r
365 elif status == 7: # escaped char inside the string
\r
366 tokenStr += c # collect the char of the string
\r
370 elif status == 8: # inside the char literal
\r
371 tokenStr += c # collect the char of the literal
\r
375 elif status == 9: # end of char literal expected
\r
376 if c == "'": # ... and found
\r
381 tokenId = 'error' # end of literal was expected
\r
385 elif status == 333: # start of the unknown token
\r
388 status = 0 # tokenId may be determined later
\r
389 elif c in tokenDic: # separator, don't move pos
\r
392 tokenStr += c # collect
\r
395 # We should have finished in the final status. If some token
\r
396 # have been extracted, yield it first.
\r
397 assert(status == 777)
\r
399 yield (tokenId, tokenStr, tokenLineNo)
\r
404 # The file content is processed. Close the file. Then always yield
\r
407 yield ('eof', None, None)
\r
410 def __collectClassInfo(self, tokenIterator):
\r
411 """Collect the information about the class and base class.
\r
413 The tokens including the opening left curly brace of the class are
\r
416 status = 0 # initial state
\r
418 while status != 777: # final state
\r
420 # Always assume that the previous tokens were processed. Get
\r
422 tokenId, tokenStr, tokenLineNo = next(tokenIterator)
\r
424 # Process the token and never return back.
\r
425 if status == 0: # waiting for the 'class' keyword.
\r
426 if tokenId == 'class':
\r
429 elif status == 1: # expecting the class identification
\r
430 if tokenId == 'id':
\r
431 self.classId = tokenStr
\r
434 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
436 elif status == 2: # expecting the curly brace or base class info
\r
437 if tokenId == 'lcurly':
\r
438 status = 777 # correctly finished
\r
439 elif tokenId == 'colon':
\r
442 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
444 elif status == 3: # expecting the 'public' in front of base class id
\r
445 if tokenId == 'public':
\r
448 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
450 elif status == 4: # expecting the base class id
\r
451 if tokenId == 'id':
\r
452 self.baseClassId = tokenStr
\r
455 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
457 elif status == 5: # expecting the curly brace and quitting
\r
458 if tokenId == 'lcurly':
\r
459 status = 777 # correctly finished
\r
460 elif tokenId == 'comment':
\r
463 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
465 # Extract the status of the TranslatorXxxx class. The readable form
\r
466 # will be used in reports the status form is a string that can be
\r
467 # compared lexically (unified length, padding with zeros, etc.).
\r
468 if self.baseClassId:
\r
469 lst = self.baseClassId.split('_')
\r
470 if lst[0] == 'Translator':
\r
471 self.readableStatus = 'up-to-date'
\r
473 elif lst[0] == 'TranslatorAdapter':
\r
474 self.status = lst[1] + '.' + lst[2]
\r
475 self.readableStatus = self.status
\r
476 if len(lst) > 3: # add the last part of the number
\r
477 self.status += '.' + ('%02d' % int(lst[3]))
\r
478 self.readableStatus += '.' + lst[3]
\r
480 self.status += '.00'
\r
481 elif lst[0] == 'TranslatorEnglish':
\r
482 # Obsolete or Based on English.
\r
483 if self.classId[-2:] == 'En':
\r
484 self.readableStatus = 'English based'
\r
487 self.readableStatus = 'obsolete'
\r
488 self.status = '0.0.00'
\r
490 # Check whether status was set, or set 'strange'.
\r
491 if self.status == None:
\r
492 self.status = 'strange'
\r
493 if not self.readableStatus:
\r
494 self.readableStatus = 'strange'
\r
496 # Extract the name of the language and the readable form.
\r
497 self.lang = self.classId[10:] # without 'Translator'
\r
498 if self.lang == 'Brazilian':
\r
499 self.langReadable = 'Brazilian Portuguese'
\r
500 elif self.lang == 'Chinesetraditional':
\r
501 self.langReadable = 'Chinese Traditional'
\r
503 self.langReadable = self.lang
\r
506 def __unexpectedToken(self, status, tokenId, tokenLineNo):
\r
507 """Reports unexpected token and quits with exit code 1."""
\r
510 calledFrom = inspect.stack()[1][3]
\r
511 msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n"
\r
512 msg = msg % (tokenId, tokenLineNo, self.fname)
\r
513 msg += 'status = %d in %s()\n' % (status, calledFrom)
\r
514 sys.stderr.write(msg)
\r
518 def collectPureVirtualPrototypes(self):
\r
519 """Returns dictionary 'unified prototype' -> 'full prototype'.
\r
521 The method is expected to be called only for the translator.h. It
\r
522 extracts only the pure virtual method and build the dictionary where
\r
523 key is the unified prototype without argument identifiers."""
\r
525 # Prepare empty dictionary that will be returned.
\r
528 # Start the token generator which parses the class source file.
\r
529 tokenIterator = self.__tokenGenerator()
\r
531 # Collect the class and the base class identifiers.
\r
532 self.__collectClassInfo(tokenIterator)
\r
533 assert(self.classId == 'Translator')
\r
535 # Let's collect readable form of the public virtual pure method
\r
536 # prototypes in the readable form -- as defined in translator.h.
\r
537 # Let's collect also unified form of the same prototype that omits
\r
538 # everything that can be omitted, namely 'virtual' and argument
\r
540 prototype = '' # readable prototype (with everything)
\r
541 uniPrototype = '' # unified prototype (without arg. identifiers)
\r
543 # Collect the pure virtual method prototypes. Stop on the closing
\r
544 # curly brace followed by the semicolon (end of class).
\r
546 curlyCnt = 0 # counter for the level of curly braces
\r
548 # Loop until the final state 777 is reached. The errors are processed
\r
549 # immediately. In this implementation, it always quits the application.
\r
550 while status != 777:
\r
552 # Get the next token.
\r
553 tokenId, tokenStr, tokenLineNo = next(tokenIterator)
\r
555 if status == 0: # waiting for 'public:'
\r
556 if tokenId == 'public':
\r
559 elif status == 1: # colon after the 'public'
\r
560 if tokenId == 'colon':
\r
563 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
565 elif status == 2: # waiting for 'virtual'
\r
566 if tokenId == 'virtual':
\r
567 prototype = tokenStr # but not to unified prototype
\r
569 elif tokenId == 'comment':
\r
571 elif tokenId == 'rcurly':
\r
572 status = 11 # expected end of class
\r
574 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
576 elif status == 3: # return type of the method expected
\r
577 if tokenId == 'id':
\r
578 prototype += ' ' + tokenStr
\r
579 uniPrototype = tokenStr # start collecting the unified prototype
\r
581 elif tokenId == 'tilde':
\r
584 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
586 elif status == 4: # method identifier expected
\r
587 if tokenId == 'id':
\r
588 prototype += ' ' + tokenStr
\r
589 uniPrototype += ' ' + tokenStr
\r
592 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
594 elif status == 5: # left bracket of the argument list expected
\r
595 if tokenId == 'lpar':
\r
596 prototype += tokenStr
\r
597 uniPrototype += tokenStr
\r
600 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
602 elif status == 6: # collecting arguments of the method
\r
603 if tokenId == 'rpar':
\r
604 prototype += tokenStr
\r
605 uniPrototype += tokenStr
\r
607 elif tokenId == 'const':
\r
608 prototype += tokenStr
\r
609 uniPrototype += tokenStr
\r
611 elif tokenId == 'id': # type identifier
\r
612 prototype += tokenStr
\r
613 uniPrototype += tokenStr
\r
616 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
618 elif status == 7: # assignment expected or left curly brace
\r
619 if tokenId == 'assign':
\r
621 elif tokenId == 'lcurly':
\r
622 curlyCnt = 1 # method body entered
\r
625 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
627 elif status == 8: # zero expected
\r
628 if tokenId == 'num' and tokenStr == '0':
\r
631 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
633 elif status == 9: # after semicolon, produce the dic item
\r
634 if tokenId == 'semic':
\r
635 assert(uniPrototype not in resultDic)
\r
636 resultDic[uniPrototype] = prototype
\r
639 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
641 elif status == 10: # consuming the body of the method
\r
642 if tokenId == 'rcurly':
\r
645 status = 2 # body consumed
\r
646 elif tokenId == 'lcurly':
\r
649 elif status == 11: # probably the end of class
\r
650 if tokenId == 'semic':
\r
653 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
655 elif status == 12: # type id for argument expected
\r
656 if tokenId == 'id':
\r
657 prototype += ' ' + tokenStr
\r
658 uniPrototype += ' ' + tokenStr
\r
661 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
663 elif status == 13: # namespace qualification or * or & expected
\r
664 if tokenId == 'colon': # was namespace id
\r
665 prototype += tokenStr
\r
666 uniPrototype += tokenStr
\r
668 elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
\r
669 prototype += ' ' + tokenStr
\r
670 uniPrototype += ' ' + tokenStr
\r
672 elif tokenId == 'id': # argument identifier
\r
673 prototype += ' ' + tokenStr
\r
674 # don't put this into unified prototype
\r
677 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
679 elif status == 14: # second colon for namespace:: expected
\r
680 if tokenId == 'colon':
\r
681 prototype += tokenStr
\r
682 uniPrototype += tokenStr
\r
685 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
687 elif status == 15: # type after namespace:: expected
\r
688 if tokenId == 'id':
\r
689 prototype += tokenStr
\r
690 uniPrototype += tokenStr
\r
693 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
695 elif status == 16: # argument identifier expected
\r
696 if tokenId == 'id':
\r
697 prototype += ' ' + tokenStr
\r
698 # don't put this into unified prototype
\r
701 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
703 elif status == 17: # comma or ')' after argument identifier expected
\r
704 if tokenId == 'comma':
\r
706 uniPrototype += ', '
\r
708 elif tokenId == 'rpar':
\r
709 prototype += tokenStr
\r
710 uniPrototype += tokenStr
\r
713 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
715 # Eat the rest of the source to cause closing the file.
\r
716 while tokenId != 'eof':
\r
717 tokenId, tokenStr, tokenLineNo = next(tokenIterator)
\r
719 # Return the resulting dictionary with 'uniPrototype -> prototype'.
\r
723 def __collectPublicMethodPrototypes(self, tokenIterator):
\r
724 """Collects prototypes of public methods and fills self.prototypeDic.
\r
726 The dictionary is filled by items: uniPrototype -> prototype.
\r
727 The method is expected to be called only for TranslatorXxxx classes,
\r
728 i.e. for the classes that implement translation to some language.
\r
729 It assumes that the opening curly brace of the class was already
\r
730 consumed. The source is consumed until the end of the class.
\r
731 The caller should consume the source until the eof to cause closing
\r
732 the source file."""
\r
734 assert(self.classId != 'Translator')
\r
735 assert(self.baseClassId != None)
\r
737 # The following finite automaton slightly differs from the one
\r
738 # inside self.collectPureVirtualPrototypes(). It produces the
\r
739 # dictionary item just after consuming the body of the method
\r
740 # (transition from state 10 to state 2). It also does not allow
\r
741 # definitions of public pure virtual methods, except for
\r
742 # TranslatorAdapterBase (states 8 and 9). Argument identifier inside
\r
743 # method argument lists can be omitted or commented.
\r
745 # Let's collect readable form of all public method prototypes in
\r
746 # the readable form -- as defined in the source file.
\r
747 # Let's collect also unified form of the same prototype that omits
\r
748 # everything that can be omitted, namely 'virtual' and argument
\r
750 prototype = '' # readable prototype (with everything)
\r
751 uniPrototype = '' # unified prototype (without arg. identifiers)
\r
752 warning = '' # warning message -- if something special detected
\r
753 methodId = None # processed method id
\r
755 # Collect the method prototypes. Stop on the closing
\r
756 # curly brace followed by the semicolon (end of class).
\r
758 curlyCnt = 0 # counter for the level of curly braces
\r
760 # Loop until the final state 777 is reached. The errors are processed
\r
761 # immediately. In this implementation, it always quits the application.
\r
762 while status != 777:
\r
764 # Get the next token.
\r
765 tokenId, tokenStr, tokenLineNo = next(tokenIterator)
\r
767 if status == 0: # waiting for 'public:'
\r
768 if tokenId == 'public':
\r
770 elif tokenId == 'eof': # non-public things until the eof
\r
773 elif status == 1: # colon after the 'public'
\r
774 if tokenId == 'colon':
\r
777 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
779 elif status == 2: # waiting for 'virtual' (can be omitted)
\r
780 if tokenId == 'virtual':
\r
781 prototype = tokenStr # but not to unified prototype
\r
783 elif tokenId == 'id': # 'virtual' was omitted
\r
784 prototype = tokenStr
\r
785 uniPrototype = tokenStr # start collecting the unified prototype
\r
787 elif tokenId == 'comment':
\r
789 elif tokenId == 'protected' or tokenId == 'private':
\r
791 elif tokenId == 'rcurly':
\r
792 status = 11 # expected end of class
\r
794 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
796 elif status == 3: # return type of the method expected
\r
797 if tokenId == 'id':
\r
798 prototype += ' ' + tokenStr
\r
799 uniPrototype = tokenStr # start collecting the unified prototype
\r
802 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
804 elif status == 4: # method identifier expected
\r
805 if tokenId == 'id':
\r
806 prototype += ' ' + tokenStr
\r
807 uniPrototype += ' ' + tokenStr
\r
808 methodId = tokenStr # for reporting
\r
811 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
813 elif status == 5: # left bracket of the argument list expected
\r
814 if tokenId == 'lpar':
\r
815 prototype += tokenStr
\r
816 uniPrototype += tokenStr
\r
819 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
821 elif status == 6: # collecting arguments of the method
\r
822 if tokenId == 'rpar':
\r
823 prototype += tokenStr
\r
824 uniPrototype += tokenStr
\r
826 elif tokenId == 'const':
\r
827 prototype += tokenStr
\r
828 uniPrototype += tokenStr
\r
830 elif tokenId == 'id': # type identifier
\r
831 prototype += tokenStr
\r
832 uniPrototype += tokenStr
\r
835 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
837 elif status == 7: # left curly brace expected
\r
838 if tokenId == 'lcurly':
\r
839 curlyCnt = 1 # method body entered
\r
841 elif tokenId == 'comment':
\r
843 elif tokenId == 'assign': # allowed only for TranslatorAdapterBase
\r
844 assert(self.classId == 'TranslatorAdapterBase')
\r
847 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
849 elif status == 8: # zero expected (TranslatorAdapterBase)
\r
850 assert(self.classId == 'TranslatorAdapterBase')
\r
851 if tokenId == 'num' and tokenStr == '0':
\r
854 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
856 elif status == 9: # after semicolon (TranslatorAdapterBase)
\r
857 assert(self.classId == 'TranslatorAdapterBase')
\r
858 if tokenId == 'semic':
\r
861 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
863 elif status == 10: # consuming the body of the method, then dic item
\r
864 if tokenId == 'rcurly':
\r
867 # Check for possible copy/paste error when name
\r
868 # of the method was not corrected (i.e. the same
\r
869 # name already exists).
\r
870 if uniPrototype in self.prototypeDic:
\r
871 msg = "'%s' prototype found again (duplicity)\n"
\r
872 msg += "in '%s'.\n" % self.fname
\r
873 msg = msg % uniPrototype
\r
874 sys.stderr.write(msg)
\r
877 assert(uniPrototype not in self.prototypeDic)
\r
878 # Insert new dictionary item.
\r
879 self.prototypeDic[uniPrototype] = prototype
\r
880 status = 2 # body consumed
\r
881 methodId = None # outside of any method
\r
882 elif tokenId == 'lcurly':
\r
885 # Warn in special case.
\r
886 elif methodId == 'trLegendDocs' and tokenId == 'string' \
\r
887 and tokenStr.find('MAX_DOT_GRAPH_HEIGHT') >= 0:
\r
888 self.txtMAX_DOT_GRAPH_HEIGHT_flag = True
\r
891 elif status == 11: # probably the end of class
\r
892 if tokenId == 'semic':
\r
895 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
897 elif status == 12: # type id for argument expected
\r
898 if tokenId == 'id':
\r
899 prototype += ' ' + tokenStr
\r
900 uniPrototype += ' ' + tokenStr
\r
903 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
905 elif status == 13: # :: or * or & or id or ) expected
\r
906 if tokenId == 'colon': # was namespace id
\r
907 prototype += tokenStr
\r
908 uniPrototype += tokenStr
\r
910 elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
\r
911 prototype += ' ' + tokenStr
\r
912 uniPrototype += ' ' + tokenStr
\r
914 elif tokenId == 'id': # argument identifier
\r
915 prototype += ' ' + tokenStr
\r
916 # don't put this into unified prototype
\r
918 elif tokenId == 'comment': # probably commented-out identifier
\r
919 prototype += tokenStr
\r
920 elif tokenId == 'rpar':
\r
921 prototype += tokenStr
\r
922 uniPrototype += tokenStr
\r
924 elif tokenId == 'comma':
\r
926 uniPrototype += ', '
\r
929 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
931 elif status == 14: # second colon for namespace:: expected
\r
932 if tokenId == 'colon':
\r
933 prototype += tokenStr
\r
934 uniPrototype += tokenStr
\r
937 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
939 elif status == 15: # type after namespace:: expected
\r
940 if tokenId == 'id':
\r
941 prototype += tokenStr
\r
942 uniPrototype += tokenStr
\r
945 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
947 elif status == 16: # argument identifier or ) expected
\r
948 if tokenId == 'id':
\r
949 prototype += ' ' + tokenStr
\r
950 # don't put this into unified prototype
\r
952 elif tokenId == 'rpar':
\r
953 prototype += tokenStr
\r
954 uniPrototype += tokenStr
\r
956 elif tokenId == 'comment':
\r
957 prototype += tokenStr
\r
959 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
961 elif status == 17: # comma or ')' after argument identifier expected
\r
962 if tokenId == 'comma':
\r
964 uniPrototype += ', '
\r
966 elif tokenId == 'rpar':
\r
967 prototype += tokenStr
\r
968 uniPrototype += tokenStr
\r
971 self.__unexpectedToken(status, tokenId, tokenLineNo)
\r
975 def collectAdapterPrototypes(self):
\r
976 """Returns the dictionary of prototypes implemented by adapters.
\r
978 It is created to process the translator_adapter.h. The returned
\r
979 dictionary has the form: unifiedPrototype -> (version, classId)
\r
980 thus by looking for the prototype, we get the information what is
\r
981 the newest (least adapting) adapter that is sufficient for
\r
982 implementing the method."""
\r
984 # Start the token generator which parses the class source file.
\r
985 assert(os.path.split(self.fname)[1] == 'translator_adapter.h')
\r
986 tokenIterator = self.__tokenGenerator()
\r
988 # Get the references to the involved dictionaries.
\r
989 reqDic = self.manager.requiredMethodsDic
\r
991 # Create the empty dictionary that will be returned.
\r
995 # Loop through the source of the adapter file until no other adapter
\r
999 # Collect the class and the base class identifiers.
\r
1000 self.__collectClassInfo(tokenIterator)
\r
1002 # Extract the comparable version of the adapter class.
\r
1003 # Note: The self.status as set by self.__collectClassInfo()
\r
1004 # contains similar version, but is related to the base class,
\r
1005 # not to the class itself.
\r
1006 lst = self.classId.split('_')
\r
1008 if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise
\r
1009 version = lst[1] + '.' + lst[2]
\r
1010 if len(lst) > 3: # add the last part of the number
\r
1011 version += '.' + ('%02d' % int(lst[3]))
\r
1015 # Collect the prototypes of implemented public methods.
\r
1016 self.__collectPublicMethodPrototypes(tokenIterator)
\r
1018 # For the required methods, update the dictionary of methods
\r
1019 # implemented by the adapter.
\r
1020 for protoUni in self.prototypeDic:
\r
1021 if protoUni in reqDic:
\r
1022 # This required method will be marked as implemented
\r
1023 # by this adapter class. This implementation assumes
\r
1024 # that newer adapters do not reimplement any required
\r
1025 # methods already implemented by older adapters.
\r
1026 assert(protoUni not in adaptDic)
\r
1027 adaptDic[protoUni] = (version, self.classId)
\r
1029 # Clear the dictionary object and the information related
\r
1030 # to the class as the next adapter class is to be processed.
\r
1031 self.prototypeDic.clear()
\r
1032 self.classId = None
\r
1033 self.baseClassId = None
\r
1035 except StopIteration:
\r
1038 # Return the result dictionary.
\r
1042 def processing(self):
\r
1043 """Processing of the source file -- only for TranslatorXxxx classes."""
\r
1045 # Start the token generator which parses the class source file.
\r
1046 tokenIterator = self.__tokenGenerator()
\r
1048 # Collect the class and the base class identifiers.
\r
1049 self.__collectClassInfo(tokenIterator)
\r
1050 assert(self.classId != 'Translator')
\r
1051 assert(self.classId[:17] != 'TranslatorAdapter')
\r
1053 # Collect the prototypes of implemented public methods.
\r
1054 self.__collectPublicMethodPrototypes(tokenIterator)
\r
1056 # Eat the rest of the source to cause closing the file.
\r
1059 t = next(tokenIterator)
\r
1060 except StopIteration:
\r
1063 # Shorthands for the used dictionaries.
\r
1064 reqDic = self.manager.requiredMethodsDic
\r
1065 adaptDic = self.manager.adaptMethodsDic
\r
1066 myDic = self.prototypeDic
\r
1068 # Build the list of obsolete methods.
\r
1069 self.obsoleteMethods = []
\r
1071 if p not in reqDic:
\r
1072 self.obsoleteMethods.append(p)
\r
1073 self.obsoleteMethods.sort()
\r
1075 # Build the list of missing methods and the list of implemented
\r
1076 # required methods.
\r
1077 self.missingMethods = []
\r
1078 self.implementedMethods = []
\r
1081 self.implementedMethods.append(p)
\r
1083 self.missingMethods.append(p)
\r
1084 self.missingMethods.sort()
\r
1085 self.implementedMethods.sort()
\r
1087 # Check whether adapter must be used or suggest the newest one.
\r
1088 # Change the status and set the note accordingly.
\r
1089 if self.baseClassId != 'Translator':
\r
1090 if not self.missingMethods:
\r
1091 self.note = 'Change the base class to Translator.'
\r
1093 self.readableStatus = 'almost up-to-date'
\r
1094 elif self.baseClassId != 'TranslatorEnglish':
\r
1095 # The translator uses some of the adapters.
\r
1096 # Look at the missing methods and check what adapter
\r
1097 # implements them. Remember the one with the lowest version.
\r
1098 adaptMinVersion = '9.9.99'
\r
1099 adaptMinClass = 'TranslatorAdapter_9_9_99'
\r
1100 for uniProto in self.missingMethods:
\r
1101 if uniProto in adaptDic:
\r
1102 version, cls = adaptDic[uniProto]
\r
1103 if version < adaptMinVersion:
\r
1104 adaptMinVersion = version
\r
1105 adaptMinClass = cls
\r
1107 # Test against the current status -- preserve the self.status.
\r
1108 # Possibly, the translator implements enough methods to
\r
1109 # use some newer adapter.
\r
1110 status = self.status
\r
1112 # If the version of the used adapter is smaller than
\r
1113 # the required, set the note and update the status as if
\r
1114 # the newer adapter was used.
\r
1115 if adaptMinVersion > status:
\r
1116 self.note = 'Change the base class to %s.' % adaptMinClass
\r
1117 self.status = adaptMinVersion
\r
1118 self.adaptMinClass = adaptMinClass
\r
1119 self.readableStatus = adaptMinVersion # simplified
\r
1121 # If everything seems OK, some explicit warning flags still could
\r
1123 if not self.note and self.status == '' and \
\r
1124 (self.translateMeFlag or self.txtMAX_DOT_GRAPH_HEIGHT_flag):
\r
1126 if self.translateMeFlag:
\r
1127 self.note += 'The "%s" found in a comment.' % self.translateMeText
\r
1128 if self.note != '':
\r
1129 self.note += '\n\t\t'
\r
1130 if self.txtMAX_DOT_GRAPH_HEIGHT_flag:
\r
1131 self.note += 'The MAX_DOT_GRAPH_HEIGHT found in trLegendDocs()'
\r
1133 # If everything seems OK, but there are obsolete methods, set
\r
1134 # the note to clean-up source. This note will be used only when
\r
1135 # the previous code did not set another note (priority).
\r
1136 if not self.note and self.status == '' and self.obsoleteMethods:
\r
1137 self.note = 'Remove the obsolete methods (never used).'
\r
1139 # If there is at least some note but the status suggests it is
\r
1140 # otherwise up-to-date, mark is as ALMOST up-to-date.
\r
1141 if self.note and self.status == '':
\r
1142 self.readableStatus = 'almost up-to-date'
\r
1145 def report(self, fout):
\r
1146 """Returns the report part for the source as a multiline string.
\r
1148 No output for up-to-date translators without problem."""
\r
1150 # If there is nothing to report, return immediately.
\r
1151 if self.status == '' and not self.note:
\r
1154 # Report the number of not implemented methods.
\r
1155 fout.write('\n\n\n')
\r
1156 fout.write(self.classId + ' (' + self.baseClassId + ')')
\r
1157 percentImplemented = 100 # init
\r
1158 allNum = len(self.manager.requiredMethodsDic)
\r
1159 if self.missingMethods:
\r
1160 num = len(self.missingMethods)
\r
1161 percentImplemented = 100 * (allNum - num) / allNum
\r
1162 fout.write(' %d' % num)
\r
1163 fout.write(' method')
\r
1166 fout.write(' to implement (%d %%)' % (100 * num / allNum))
\r
1167 fout.write('\n' + '-' * len(self.classId))
\r
1169 # Write the info about the implemented required methods.
\r
1170 fout.write('\n\n Implements %d' % len(self.implementedMethods))
\r
1171 fout.write(' of the required methods (%d %%).' % percentImplemented)
\r
1173 # Report the missing method, but only when it is not English-based
\r
1175 if self.missingMethods and self.status != 'En':
\r
1176 fout.write('\n\n Missing methods (should be implemented):\n')
\r
1177 reqDic = self.manager.requiredMethodsDic
\r
1178 for p in self.missingMethods:
\r
1179 fout.write('\n ' + reqDic[p])
\r
1181 # Always report obsolete methods.
\r
1182 if self.obsoleteMethods:
\r
1183 fout.write('\n\n Obsolete methods (should be removed, never used):\n')
\r
1184 myDic = self.prototypeDic
\r
1185 for p in self.obsoleteMethods:
\r
1186 fout.write('\n ' + myDic[p])
\r
1188 # For English-based translator, report the implemented methods.
\r
1189 if self.status == 'En' and self.implementedMethods:
\r
1190 fout.write('\n\n This English-based translator implements ')
\r
1191 fout.write('the following methods:\n')
\r
1192 reqDic = self.manager.requiredMethodsDic
\r
1193 for p in self.implementedMethods:
\r
1194 fout.write('\n ' + reqDic[p])
\r
1197 def getmtime(self):
\r
1198 """Returns the last modification time of the source file."""
\r
1199 assert(os.path.isfile(self.fname))
\r
1200 return os.path.getmtime(self.fname)
\r
1204 """Collects basic info and builds subordinate Transl objects."""
\r
1206 def __init__(self):
\r
1207 """Determines paths, creates and initializes structures.
\r
1209 The arguments of the script may explicitly say what languages should
\r
1210 be processed. Write the two letter identifications that are used
\r
1211 for composing the source filenames, so...
\r
1213 python translator.py cz
\r
1215 this will process only translator_cz.h source.
\r
1218 # Determine the path to the script and its name.
\r
1219 self.script = os.path.abspath(sys.argv[0])
\r
1220 self.script_path, self.script_name = os.path.split(self.script)
\r
1221 self.script_path = os.path.abspath(self.script_path)
\r
1223 # Determine the absolute path to the Doxygen's root subdirectory.
\r
1224 # If DOXYGEN environment variable is not found, the directory is
\r
1225 # determined from the path of the script.
\r
1226 doxy_default = os.path.join(self.script_path, '..')
\r
1227 self.doxy_path = os.path.abspath(os.getenv('DOXYGEN', doxy_default))
\r
1229 # Get the explicit arguments of the script.
\r
1230 self.script_argLst = sys.argv[1:]
\r
1232 # Build the path names based on the Doxygen's root knowledge.
\r
1233 self.doc_path = os.path.join(self.doxy_path, 'doc')
\r
1234 self.src_path = os.path.join(self.doxy_path, 'src')
\r
1236 # Create the empty dictionary for Transl object identified by the
\r
1237 # class identifier of the translator.
\r
1238 self.__translDic = {}
\r
1240 # Create the None dictionary of required methods. The key is the
\r
1241 # unified prototype, the value is the full prototype. Set inside
\r
1242 # the self.__build().
\r
1243 self.requiredMethodsDic = None
\r
1245 # Create the empty dictionary that says what method is implemented
\r
1246 # by what adapter.
\r
1247 self.adaptMethodsDic = {}
\r
1249 # The last modification time will capture the modification of this
\r
1250 # script, of the translator.h, of the translator_adapter.h (see the
\r
1251 # self.__build() for the last two) of all the translator_xx.h files
\r
1252 # and of the template for generating the documentation. So, this
\r
1253 # time can be compared with modification time of the generated
\r
1254 # documentation to decide, whether the doc should be re-generated.
\r
1255 self.lastModificationTime = os.path.getmtime(self.script)
\r
1257 # Set the names of the translator report text file, of the template
\r
1258 # for generating "Internationalization" document, for the generated
\r
1259 # file itself, and for the maintainers list.
\r
1260 self.translatorReportFileName = 'translator_report.txt'
\r
1261 self.maintainersFileName = 'maintainers.txt'
\r
1262 self.languageTplFileName = 'language.tpl'
\r
1263 self.languageDocFileName = 'language.doc'
\r
1265 # The information about the maintainers will be stored
\r
1266 # in the dictionary with the following name.
\r
1267 self.__maintainersDic = None
\r
1269 # Define the other used structures and variables for information.
\r
1270 self.langLst = None # including English based
\r
1271 self.supportedLangReadableStr = None # coupled En-based as a note
\r
1272 self.numLang = None # excluding coupled En-based
\r
1273 self.doxVersion = None # Doxygen version
\r
1275 # Build objects where each one is responsible for one translator.
\r
1279 def __build(self):
\r
1280 """Find the translator files and build the objects for translators."""
\r
1282 # The translator.h must exist (the Transl object will check it),
\r
1283 # create the object for it and let it build the dictionary of
\r
1284 # required methods.
\r
1285 tr = Transl(os.path.join(self.src_path, 'translator.h'), self)
\r
1286 self.requiredMethodsDic = tr.collectPureVirtualPrototypes()
\r
1287 tim = tr.getmtime()
\r
1288 if tim > self.lastModificationTime:
\r
1289 self.lastModificationTime = tim
\r
1291 # The translator_adapter.h must exist (the Transl object will check it),
\r
1292 # create the object for it and store the reference in the dictionary.
\r
1293 tr = Transl(os.path.join(self.src_path, 'translator_adapter.h'), self)
\r
1294 self.adaptMethodsDic = tr.collectAdapterPrototypes()
\r
1295 tim = tr.getmtime()
\r
1296 if tim > self.lastModificationTime:
\r
1297 self.lastModificationTime = tim
\r
1299 # Create the list of the filenames with language translator sources.
\r
1300 # If the explicit arguments of the script were typed, process only
\r
1302 if self.script_argLst:
\r
1303 lst = ['translator_' + x + '.h' for x in self.script_argLst]
\r
1305 if not os.path.isfile(os.path.join(self.src_path, fname)):
\r
1306 sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
\r
1309 lst = os.listdir(self.src_path)
\r
1310 lst = [x for x in lst if x[:11] == 'translator_'
\r
1311 and x[-2:] == '.h'
\r
1312 and x != 'translator_adapter.h']
\r
1314 # Build the object for the translator_xx.h files, and process the
\r
1315 # content of the file. Then insert the object to the dictionary
\r
1316 # accessed via classId.
\r
1318 fullname = os.path.join(self.src_path, fname)
\r
1319 tr = Transl(fullname, self)
\r
1321 assert(tr.classId != 'Translator')
\r
1322 self.__translDic[tr.classId] = tr
\r
1324 # Extract the global information of the processed info.
\r
1325 self.__extractProcessedInfo()
\r
1328 def __extractProcessedInfo(self):
\r
1329 """Build lists and strings of the processed info."""
\r
1331 # Build the auxiliary list with strings compound of the status,
\r
1332 # readable form of the language, and classId.
\r
1334 for obj in list(self.__translDic.values()):
\r
1335 assert(obj.classId != 'Translator')
\r
1336 s = obj.status + '|' + obj.langReadable + '|' + obj.classId
\r
1339 # Sort the list and extract the object identifiers (classId's) for
\r
1340 # the up-to-date translators and English-based translators.
\r
1342 self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|']
\r
1343 self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En']
\r
1345 # Reverse the list and extract the TranslatorAdapter based translators.
\r
1347 self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()]
\r
1349 # Build the list of tuples that contain (langReadable, obj).
\r
1350 # Sort it by readable name.
\r
1352 for obj in list(self.__translDic.values()):
\r
1353 self.langLst.append((obj.langReadable, obj))
\r
1355 self.langLst.sort(key=lambda x: x[0])
\r
1357 # Create the list with readable language names. If the language has
\r
1358 # also the English-based version, modify the item by appending
\r
1359 # the note. Number of the supported languages is equal to the length
\r
1361 langReadableLst = []
\r
1362 for name, obj in self.langLst:
\r
1363 if obj.status == 'En': continue
\r
1365 # Append the 'En' to the classId to possibly obtain the classId
\r
1366 # of the English-based object. If the object exists, modify the
\r
1367 # name for the readable list of supported languages.
\r
1368 classIdEn = obj.classId + 'En'
\r
1369 if classIdEn in self.__translDic:
\r
1372 # Append the result name of the language, possibly with note.
\r
1373 langReadableLst.append(name)
\r
1375 # Create the multiline string of readable language names,
\r
1376 # with punctuation, wrapped to paragraph.
\r
1377 if len(langReadableLst) == 1:
\r
1378 s = langReadableLst[0]
\r
1379 elif len(langReadableLst) == 2:
\r
1380 s = ' and '.join(langReadableLst)
\r
1382 s = ', '.join(langReadableLst[:-1]) + ', and '
\r
1383 s += langReadableLst[-1]
\r
1385 self.supportedLangReadableStr = fill(s + '.')
\r
1387 # Find the number of the supported languages. The English based
\r
1388 # languages are not counted if the non-English based also exists.
\r
1389 self.numLang = len(self.langLst)
\r
1390 for name, obj in self.langLst:
\r
1391 if obj.status == 'En':
\r
1392 classId = obj.classId[:-2]
\r
1393 if classId in self.__translDic:
\r
1394 self.numLang -= 1 # the couple will be counted as one
\r
1396 # Extract the version of Doxygen.
\r
1397 f = xopen(os.path.join(self.doxy_path, 'VERSION'))
\r
1398 self.doxVersion = f.readline().strip()
\r
1401 # Update the last modification time.
\r
1402 for tr in list(self.__translDic.values()):
\r
1403 tim = tr.getmtime()
\r
1404 if tim > self.lastModificationTime:
\r
1405 self.lastModificationTime = tim
\r
1408 def __getNoTrSourceFilesLst(self):
\r
1409 """Returns the list of sources to be checked.
\r
1411 All .cpp files and also .h files that do not declare or define
\r
1412 the translator methods are included in the list. The file names
\r
1413 are searched in doxygen/src directory.
\r
1416 for item in os.listdir(self.src_path):
\r
1417 # Split the bare name to get the extension.
\r
1418 name, ext = os.path.splitext(item)
\r
1421 # Include only .cpp and .h files (case independent) and exclude
\r
1422 # the files where the checked identifiers are defined.
\r
1423 if ext == '.cpp' or (ext == '.h' and name.find('translator') == -1):
\r
1424 fname = os.path.join(self.src_path, item)
\r
1425 assert os.path.isfile(fname) # assumes no directory with the ext
\r
1426 files.append(fname) # full name
\r
1430 def __removeUsedInFiles(self, fname, dic):
\r
1431 """Removes items for method identifiers that are found in fname.
\r
1433 The method reads the content of the file as one string and searches
\r
1434 for all identifiers from dic. The identifiers that were found in
\r
1435 the file are removed from the dictionary.
\r
1437 Note: If more files is to be checked, the files where most items are
\r
1438 probably used should be checked first and the resulting reduced
\r
1439 dictionary should be used for checking the next files (speed up).
\r
1441 lst_in = list(dic.keys()) # identifiers to be searched for
\r
1443 # Read content of the file as one string.
\r
1444 assert os.path.isfile(fname)
\r
1449 # Remove the items for identifiers that were found in the file.
\r
1451 item = lst_in.pop(0)
\r
1452 if cont.find(item) != -1:
\r
1456 def __checkForNotUsedTrMethods(self):
\r
1457 """Returns the dictionary of not used translator methods.
\r
1459 The method can be called only after self.requiredMethodsDic has been
\r
1460 built. The stripped prototypes are the values, the method identifiers
\r
1463 # Build the dictionary of the required method prototypes with
\r
1464 # method identifiers used as keys.
\r
1466 for prototype in list(self.requiredMethodsDic.keys()):
\r
1467 ri = prototype.split('(')[0]
\r
1468 identifier = ri.split()[1].strip()
\r
1469 trdic[identifier] = prototype
\r
1471 # Build the list of source files where translator method identifiers
\r
1473 files = self.__getNoTrSourceFilesLst()
\r
1475 # Loop through the files and reduce the dictionary of id -> proto.
\r
1476 for fname in files:
\r
1477 self.__removeUsedInFiles(fname, trdic)
\r
1479 # Return the dictionary of not used translator methods.
\r
1483 def __emails(self, classId):
\r
1484 """Returns the list of maintainer emails.
\r
1486 The method returns the list of e-mail addresses for the translator
\r
1487 class, but only the addresses that were not marked as [xxx]."""
\r
1489 for m in self.__maintainersDic[classId]:
\r
1490 if not m[1].startswith('['):
\r
1492 email = email.replace(' at ', '@') # Unmangle the mangled e-mail
\r
1493 email = email.replace(' dot ', '.')
\r
1498 def getBgcolorByReadableStatus(self, readableStatus):
\r
1499 if readableStatus == 'up-to-date':
\r
1500 color = '#ccffcc' # green
\r
1501 elif readableStatus.startswith('almost'):
\r
1502 color = '#ffffff' # white
\r
1503 elif readableStatus.startswith('English'):
\r
1504 color = '#ccffcc' # green
\r
1505 elif readableStatus.startswith('1.8'):
\r
1506 color = '#ffffcc' # yellow
\r
1507 elif readableStatus.startswith('1.7'):
\r
1508 color = '#ffcccc' # pink
\r
1509 elif readableStatus.startswith('1.6'):
\r
1510 color = '#ffcccc' # pink
\r
1512 color = '#ff5555' # red
\r
1516 def generateTranslatorReport(self):
\r
1517 """Generates the translator report."""
\r
1519 output = os.path.join(self.doc_path, self.translatorReportFileName)
\r
1521 # Open the textual report file for the output.
\r
1522 f = xopen(output, 'w')
\r
1524 # Output the information about the version.
\r
1525 f.write('(' + self.doxVersion + ')\n\n')
\r
1527 # Output the information about the number of the supported languages
\r
1528 # and the list of the languages, or only the note about the explicitly
\r
1529 # given languages to process.
\r
1530 if self.script_argLst:
\r
1531 f.write('The report was generated for the following, explicitly')
\r
1532 f.write(' identified languages:\n\n')
\r
1533 f.write(self.supportedLangReadableStr + '\n\n')
\r
1535 f.write('Doxygen supports the following ')
\r
1536 f.write(str(self.numLang))
\r
1537 f.write(' languages (sorted alphabetically):\n\n')
\r
1538 f.write(self.supportedLangReadableStr + '\n\n')
\r
1540 # Write the summary about the status of language translators (how
\r
1541 # many translators) are up-to-date, etc.
\r
1542 s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst)
\r
1543 s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst)
\r
1544 s += 'and %d are English based.' % len(self.EnBasedIdLst)
\r
1545 f.write(fill(s) + '\n\n')
\r
1547 # The e-mail addresses of the maintainers will be collected to
\r
1548 # the auxiliary file in the order of translator classes listed
\r
1549 # in the translator report.
\r
1550 fmail = xopen('mailto.txt', 'w')
\r
1552 # Write the list of "up-to-date" translator classes.
\r
1553 if self.upToDateIdLst:
\r
1554 s = '''The following translator classes are up-to-date (sorted
\r
1555 alphabetically). This means that they derive from the
\r
1556 Translator class, they implement all %d of the required
\r
1557 methods, and even minor problems were not spotted by the script:'''
\r
1558 s = s % len(self.requiredMethodsDic)
\r
1559 f.write('-' * 70 + '\n')
\r
1560 f.write(fill(s) + '\n\n')
\r
1563 for x in self.upToDateIdLst:
\r
1564 obj = self.__translDic[x]
\r
1565 if obj.note is None:
\r
1566 f.write(' ' + obj.classId + '\n')
\r
1567 mailtoLst.extend(self.__emails(obj.classId))
\r
1569 fmail.write('up-to-date\n')
\r
1570 fmail.write('; '.join(mailtoLst))
\r
1573 # Write separately the list of "ALMOST up-to-date" translator classes.
\r
1574 s = '''The following translator classes are ALMOST up-to-date (sorted
\r
1575 alphabetically). This means that they derive from the
\r
1576 Translator class, but there still may be some minor problems
\r
1577 listed for them:'''
\r
1578 f.write('\n' + ('-' * 70) + '\n')
\r
1579 f.write(fill(s) + '\n\n')
\r
1581 for x in self.upToDateIdLst:
\r
1582 obj = self.__translDic[x]
\r
1583 if obj.note is not None:
\r
1584 f.write(' ' + obj.classId + '\t-- ' + obj.note + '\n')
\r
1585 mailtoLst.extend(self.__emails(obj.classId))
\r
1587 fmail.write('\n\nalmost up-to-date\n')
\r
1588 fmail.write('; '.join(mailtoLst))
\r
1590 # Write the list of the adapter based classes. The very obsolete
\r
1591 # translators that derive from TranslatorEnglish are included.
\r
1592 if self.adaptIdLst:
\r
1593 s = '''The following translator classes need maintenance
\r
1594 (the most obsolete at the end). The other info shows the
\r
1595 estimation of Doxygen version when the class was last
\r
1596 updated and number of methods that must be implemented to
\r
1597 become up-to-date:'''
\r
1598 f.write('\n' + '-' * 70 + '\n')
\r
1599 f.write(fill(s) + '\n\n')
\r
1601 # Find also whether some adapter classes may be removed.
\r
1602 adaptMinVersion = '9.9.99'
\r
1605 numRequired = len(self.requiredMethodsDic)
\r
1606 for x in self.adaptIdLst:
\r
1607 obj = self.__translDic[x]
\r
1608 f.write(' %-30s' % obj.classId)
\r
1609 f.write(' %-6s' % obj.readableStatus)
\r
1610 numimpl = len(obj.missingMethods)
\r
1612 if numimpl > 1: pluralS = 's'
\r
1613 percent = 100 * numimpl / numRequired
\r
1614 f.write('\t%2d method%s to implement (%d %%)' % (
\r
1615 numimpl, pluralS, percent))
\r
1617 f.write('\n\tNote: ' + obj.note + '\n')
\r
1619 mailtoLst.extend(self.__emails(obj.classId)) # to maintainer
\r
1621 # Check the level of required adapter classes.
\r
1622 if obj.status != '0.0.00' and obj.status < adaptMinVersion:
\r
1623 adaptMinVersion = obj.status
\r
1625 fmail.write('\n\ntranslator based\n')
\r
1626 fmail.write('; '.join(mailtoLst))
\r
1628 # Set the note if some old translator adapters are not needed
\r
1629 # any more. Do it only when the script is called without arguments,
\r
1630 # i.e. all languages were checked against the needed translator
\r
1632 if not self.script_argLst:
\r
1634 for version, adaptClassId in list(self.adaptMethodsDic.values()):
\r
1635 if version < adaptMinVersion:
\r
1636 to_remove[adaptClassId] = True
\r
1639 lst = list(to_remove.keys())
\r
1641 plural = len(lst) > 1
\r
1642 note = 'Note: The adapter class'
\r
1643 if plural: note += 'es'
\r
1644 note += ' ' + ', '.join(lst)
\r
1649 note += ' not used and can be removed.'
\r
1650 f.write('\n' + fill(note) + '\n')
\r
1652 # Write the list of the English-based classes.
\r
1653 if self.EnBasedIdLst:
\r
1654 s = '''The following translator classes derive directly from the
\r
1655 TranslatorEnglish. The class identifier has the suffix 'En'
\r
1656 that says that this is intentional. Usually, there is also
\r
1657 a non-English based version of the translator for
\r
1659 f.write('\n' + '-' * 70 + '\n')
\r
1660 f.write(fill(s) + '\n\n')
\r
1662 for x in self.EnBasedIdLst:
\r
1663 obj = self.__translDic[x]
\r
1664 f.write(' ' + obj.classId)
\r
1665 f.write('\timplements %d methods' % len(obj.implementedMethods))
\r
1667 f.write(' -- ' + obj.note)
\r
1670 # Check for not used translator methods and generate warning if found.
\r
1671 # The check is rather time consuming, so it is not done when report
\r
1672 # is restricted to explicitly given language identifiers.
\r
1673 if not self.script_argLst:
\r
1674 dic = self.__checkForNotUsedTrMethods()
\r
1676 s = '''WARNING: The following translator methods are declared
\r
1677 in the Translator class but their identifiers do not appear
\r
1678 in source files. The situation should be checked. The .cpp
\r
1679 files and .h files excluding the '*translator*' files
\r
1680 in doxygen/src directory were simply searched for occurrence
\r
1681 of the method identifiers:'''
\r
1682 f.write('\n' + '=' * 70 + '\n')
\r
1683 f.write(fill(s) + '\n\n')
\r
1685 keys = list(dic.keys())
\r
1688 f.write(' ' + dic[key] + '\n')
\r
1691 # Write the details for the translators.
\r
1692 f.write('\n' + '=' * 70)
\r
1693 f.write('\nDetails for translators (classes sorted alphabetically):\n')
\r
1695 cls = list(self.__translDic.keys())
\r
1699 obj = self.__translDic[c]
\r
1700 assert(obj.classId != 'Translator')
\r
1703 # Close the report file and the auxiliary file with e-mails.
\r
1708 def __loadMaintainers(self):
\r
1709 """Load and process the file with the maintainers.
\r
1711 Fills the dictionary classId -> [(name, e-mail), ...]."""
\r
1713 fname = os.path.join(self.doc_path, self.maintainersFileName)
\r
1715 # Include the maintainers file to the group of files checked with
\r
1716 # respect to the modification time.
\r
1717 tim = os.path.getmtime(fname)
\r
1718 if tim > self.lastModificationTime:
\r
1719 self.lastModificationTime = tim
\r
1721 # Process the content of the maintainers file.
\r
1723 inside = False # inside the record for the language
\r
1726 maintainersLst = None
\r
1727 self.__maintainersDic = {}
\r
1729 line = f.readline() # next line
\r
1730 lineReady = line != '' # when eof, then line == ''
\r
1732 line = line.strip() # eof should also behave as separator
\r
1733 if line != '' and line[0] == '%': # skip the comment line
\r
1736 if not inside: # if outside of the record
\r
1737 if line != '': # should be language identifier
\r
1739 maintainersLst = []
\r
1741 # Otherwise skip empty line that do not act as separator.
\r
1743 else: # if inside the record
\r
1744 if line == '': # separator found
\r
1747 # If it is the first maintainer, create the empty list.
\r
1748 if classId not in self.__maintainersDic:
\r
1749 self.__maintainersDic[classId] = []
\r
1751 # Split the information about the maintainer and append
\r
1752 # the tuple. The address may be prefixed '[unreachable]'
\r
1753 # or whatever '[xxx]'. This will be processed later.
\r
1754 lst = line.split(':', 1)
\r
1755 assert(len(lst) == 2)
\r
1756 t = (lst[0].strip(), lst[1].strip())
\r
1757 self.__maintainersDic[classId].append(t)
\r
1761 def generateLanguageDoc(self):
\r
1762 """Checks the modtime of files and generates language.doc."""
\r
1763 self.__loadMaintainers()
\r
1765 # Check the last modification time of the template file. It is the
\r
1766 # last file from the group that decide whether the documentation
\r
1767 # should or should not be generated.
\r
1768 fTplName = os.path.join(self.doc_path, self.languageTplFileName)
\r
1769 tim = os.path.getmtime(fTplName)
\r
1770 if tim > self.lastModificationTime:
\r
1771 self.lastModificationTime = tim
\r
1773 # If the generated documentation exists and is newer than any of
\r
1774 # the source files from the group, do not generate it and quit
\r
1776 fDocName = os.path.join(self.doc_path, self.languageDocFileName)
\r
1777 if os.path.isfile(fDocName):
\r
1778 if os.path.getmtime(fDocName) > self.lastModificationTime:
\r
1781 # The document or does not exist or is older than some of the
\r
1782 # sources. It must be generated again.
\r
1784 # Read the template of the documentation, and remove the first
\r
1785 # attention lines.
\r
1786 f = xopen(fTplName)
\r
1790 pos = doctpl.find('/***')
\r
1792 doctpl = doctpl[pos:]
\r
1794 # Fill the tplDic by symbols that will be inserted into the
\r
1795 # document template.
\r
1798 s = ('Do not edit this file. It was generated by the %s script.\n' +\
\r
1799 ' * Edit the %s and %s files instead.') % (
\r
1800 self.script_name, self.languageTplFileName, self.maintainersFileName)
\r
1801 tplDic['editnote'] = s
\r
1803 tplDic['doxVersion'] = self.doxVersion
\r
1804 tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr
\r
1805 tplDic['translatorReportFileName'] = self.translatorReportFileName
\r
1807 ahref = '<a href="../doc/' + self.translatorReportFileName
\r
1808 ahref += '"\n><code>doxygen/doc/' + self.translatorReportFileName
\r
1809 ahref += '</code></a>'
\r
1810 tplDic['translatorReportLink'] = ahref
\r
1811 tplDic['numLangStr'] = str(self.numLang)
\r
1813 # Define templates for HTML table parts of the documentation.
\r
1814 htmlTableTpl = '''\
\r
1817 <table align="center" cellspacing="0" cellpadding="0" border="0">
\r
1818 <tr bgcolor="#000000">
\r
1820 <table cellspacing="1" cellpadding="2" border="0">
\r
1821 <tr bgcolor="#4040c0">
\r
1822 <td ><b><font size="+1" color="#ffffff"> Language </font></b></td>
\r
1823 <td ><b><font size="+1" color="#ffffff"> Maintainer </font></b></td>
\r
1824 <td ><b><font size="+1" color="#ffffff"> Contact address </font>
\r
1825 <font size="-2" color="#ffffff">(replace the at and dot)</font></b></td>
\r
1826 <td ><b><font size="+1" color="#ffffff"> Status </font></b></td>
\r
1828 <!-- table content begin -->
\r
1830 <!-- table content end -->
\r
1838 htmlTableTpl = textwrap.dedent(htmlTableTpl)
\r
1839 htmlTrTpl = '\n <tr bgcolor="#ffffff">%s\n </tr>'
\r
1840 htmlTdTpl = '\n <td>%s</td>'
\r
1841 htmlTdStatusColorTpl = '\n <td bgcolor="%s">%s</td>'
\r
1843 # Loop through transl objects in the order of sorted readable names
\r
1844 # and add generate the content of the HTML table.
\r
1846 for name, obj in self.langLst:
\r
1847 # Fill the table data elements for one row. The first element
\r
1848 # contains the readable name of the language. Only the oldest
\r
1849 # translator are colour marked in the language column. Less
\r
1850 # "heavy" color is used (when compared with the Status column).
\r
1851 if obj.readableStatus.startswith('1.4'):
\r
1852 bkcolor = self.getBgcolorByReadableStatus('1.4')
\r
1854 bkcolor = '#ffffff'
\r
1856 lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ]
\r
1858 # The next two elements contain the list of maintainers
\r
1859 # and the list of their mangled e-mails. For English-based
\r
1860 # translators that are coupled with the non-English based,
\r
1861 # insert the 'see' note.
\r
1862 mm = None # init -- maintainer
\r
1863 ee = None # init -- e-mail address
\r
1864 if obj.status == 'En':
\r
1865 # Check whether there is the coupled non-English.
\r
1866 classId = obj.classId[:-2]
\r
1867 if classId in self.__translDic:
\r
1868 lang = self.__translDic[classId].langReadable
\r
1869 mm = 'see the %s language' % lang
\r
1872 if not mm and obj.classId in self.__maintainersDic:
\r
1873 # Build a string of names separated by the HTML break element.
\r
1874 # Special notes used instead of names are highlighted.
\r
1876 for maintainer in self.__maintainersDic[obj.classId]:
\r
1877 name = maintainer[0]
\r
1878 if name.startswith('--'):
\r
1879 name = '<span style="color: red; background-color: yellow">'\
\r
1880 + name + '</span>'
\r
1882 mm = '<br/>'.join(lm)
\r
1884 # The marked addresses (they start with the mark '[unreachable]',
\r
1885 # '[resigned]', whatever '[xxx]') will not be displayed at all.
\r
1886 # Only the mark will be used instead.
\r
1887 rexMark = re.compile('(?P<mark>\\[.*?\\])')
\r
1889 for maintainer in self.__maintainersDic[obj.classId]:
\r
1890 address = maintainer[1]
\r
1891 m = rexMark.search(address)
\r
1893 address = '<span style="color: brown">'\
\r
1894 + m.group('mark') + '</span>'
\r
1895 le.append(address)
\r
1896 ee = '<br/>'.join(le)
\r
1898 # Append the maintainer and e-mail elements.
\r
1899 lst.append(htmlTdTpl % mm)
\r
1900 lst.append(htmlTdTpl % ee)
\r
1902 # The last element contains the readable form of the status.
\r
1903 bgcolor = self.getBgcolorByReadableStatus(obj.readableStatus)
\r
1904 lst.append(htmlTdStatusColorTpl % (bgcolor, obj.readableStatus))
\r
1906 # Join the table data to one table row.
\r
1907 trlst.append(htmlTrTpl % (''.join(lst)))
\r
1909 # Join the table rows and insert into the template.
\r
1910 htmlTable = htmlTableTpl % (''.join(trlst))
\r
1912 # Define templates for LaTeX table parts of the documentation.
\r
1913 latexTableTpl = r'''
\r
1916 \begin{longtable}{|l|l|l|l|}
\r
1918 {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
\r
1926 latexTableTpl = textwrap.dedent(latexTableTpl)
\r
1927 latexLineTpl = '\n' + r' %s & %s & {\tt\tiny %s} & %s \\'
\r
1929 # Loop through transl objects in the order of sorted readable names
\r
1930 # and add generate the content of the LaTeX table.
\r
1932 for name, obj in self.langLst:
\r
1933 # For LaTeX, more maintainers for the same language are
\r
1934 # placed on separate rows in the table. The line separator
\r
1935 # in the table is placed explicitly above the first
\r
1936 # maintainer. Prepare the arguments for the LaTeX row template.
\r
1938 if obj.classId in self.__maintainersDic:
\r
1939 maintainers = self.__maintainersDic[obj.classId]
\r
1941 lang = obj.langReadable
\r
1942 maintainer = None # init
\r
1943 email = None # init
\r
1944 if obj.status == 'En':
\r
1945 # Check whether there is the coupled non-English.
\r
1946 classId = obj.classId[:-2]
\r
1947 if classId in self.__translDic:
\r
1948 langNE = self.__translDic[classId].langReadable
\r
1949 maintainer = 'see the %s language' % langNE
\r
1952 if not maintainer and (obj.classId in self.__maintainersDic):
\r
1953 lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
\r
1954 maintainer = maintainers[0][0]
\r
1955 email = maintainers[0][1]
\r
1957 status = obj.readableStatus
\r
1959 # Use the template to produce the line of the table and insert
\r
1960 # the hline plus the constructed line into the table content.
\r
1961 # The underscore character must be escaped.
\r
1962 trlst.append('\n \\hline')
\r
1963 s = latexLineTpl % (lang, maintainer, email, status)
\r
1964 s = s.replace('_', '\\_')
\r
1967 # List the other maintainers for the language. Do not set
\r
1968 # lang and status for them.
\r
1971 for m in maintainers[1:]:
\r
1974 s = latexLineTpl % (lang, maintainer, email, status)
\r
1975 s = s.replace('_', '\\_')
\r
1978 # Join the table lines and insert into the template.
\r
1979 latexTable = latexTableTpl % (''.join(trlst))
\r
1981 # Put the HTML and LaTeX parts together and define the dic item.
\r
1982 tplDic['informationTable'] = htmlTable + '\n' + latexTable
\r
1984 # Insert the symbols into the document template and write it down.
\r
1985 f = xopen(fDocName, 'w')
\r
1986 f.write(doctpl % tplDic)
\r
1989 if __name__ == '__main__':
\r
1991 # The Python 2.6+ or 3.3+ is required.
\r
1992 major = sys.version_info[0]
\r
1993 minor = sys.version_info[1]
\r
1994 if (major == 2 and minor < 6) or (major == 3 and minor < 0):
\r
1995 print('Python 2.6+ or Python 3.0+ are required for the script')
\r
1998 # The translator manager builds the Transl objects, parses the related
\r
1999 # sources, and keeps them in memory.
\r
2000 trMan = TrManager()
\r
2002 # Process the Transl objects and generate the output files.
\r
2003 trMan.generateLanguageDoc()
\r
2004 trMan.generateTranslatorReport()
\r