3 # This is the API builder, it parses the C sources and build the
4 # API formal description in XML.
6 # See Copyright for the status of this software.
15 #debugsym='ignorableWhitespaceSAXFunc'
19 # C parser analysis code
22 "trio": "too many non standard macros",
23 "trio.c": "too many non standard macros",
24 "trionan.c": "too many non standard macros",
25 "triostr.c": "too many non standard macros",
26 "acconfig.h": "generated portability layer",
27 "config.h": "generated portability layer",
28 "libxml.h": "internal only",
29 "testOOM.c": "out of memory tester",
30 "testOOMlib.h": "out of memory tester",
31 "testOOMlib.c": "out of memory tester",
32 "rngparser.c": "not yet integrated",
33 "rngparser.h": "not yet integrated",
34 "elfgcchack.h": "not a normal header",
35 "testHTML.c": "test tool",
36 "testReader.c": "test tool",
37 "testSchemas.c": "test tool",
38 "testXPath.c": "test tool",
39 "testAutomata.c": "test tool",
40 "testModule.c": "test tool",
41 "testRegexp.c": "test tool",
42 "testThreads.c": "test tool",
43 "testC14N.c": "test tool",
44 "testRelax.c": "test tool",
45 "testThreadsWin32.c": "test tool",
46 "testSAX.c": "test tool",
47 "testURI.c": "test tool",
48 "testapi.c": "generated regression tests",
49 "runtest.c": "regression tests program",
50 "runsuite.c": "regression tests program",
51 "tst.c": "not part of the library",
52 "test.c": "not part of the library",
53 "testdso.c": "test for dynamid shared libraries",
54 "testrecurse.c": "test for entities recursions",
55 "xzlib.h": "Internal API only",
59 "WINAPI": (0, "Windows keyword"),
60 "LIBXML_DLL_IMPORT": (0, "Special macro to flag external keywords"),
61 "XMLPUBVAR": (0, "Special macro for extern vars for win32"),
62 "XSLTPUBVAR": (0, "Special macro for extern vars for win32"),
63 "EXSLTPUBVAR": (0, "Special macro for extern vars for win32"),
64 "XMLPUBFUN": (0, "Special macro for extern funcs for win32"),
65 "XSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
66 "EXSLTPUBFUN": (0, "Special macro for extern funcs for win32"),
67 "XMLCALL": (0, "Special macro for win32 calls"),
68 "XSLTCALL": (0, "Special macro for win32 calls"),
69 "XMLCDECL": (0, "Special macro for win32 calls"),
70 "EXSLTCALL": (0, "Special macro for win32 calls"),
71 "__declspec": (3, "Windows keyword"),
72 "__stdcall": (0, "Windows keyword"),
73 "ATTRIBUTE_UNUSED": (0, "macro keyword"),
74 "LIBEXSLT_PUBLIC": (0, "macro keyword"),
75 "X_IN_Y": (5, "macro function builder"),
76 "ATTRIBUTE_ALLOC_SIZE": (3, "macro for gcc checking extension"),
77 "ATTRIBUTE_PRINTF": (5, "macro for gcc printf args checking extension"),
78 "LIBXML_ATTR_FORMAT": (5, "macro for gcc printf args checking extension"),
79 "LIBXML_ATTR_ALLOC_SIZE": (3, "macro for gcc checking extension"),
83 raw = string.replace(raw, '&', '&')
84 raw = string.replace(raw, '<', '<')
85 raw = string.replace(raw, '>', '>')
86 raw = string.replace(raw, "'", ''')
87 raw = string.replace(raw, '"', '"')
97 def __init__(self, name, header=None, module=None, type=None, lineno = 0,
98 info=None, extra=None, conditionals = None):
107 if conditionals == None or len(conditionals) == 0:
108 self.conditionals = None
110 self.conditionals = conditionals[:]
111 if self.name == debugsym:
112 print "=> define %s : %s" % (debugsym, (module, type, info,
113 extra, conditionals))
116 r = "%s %s:" % (self.type, self.name)
119 if self.module != None:
120 r = r + " from %s" % (self.module)
121 if self.info != None:
122 r = r + " " + `self.info`
123 if self.extra != None:
124 r = r + " " + `self.extra`
125 if self.conditionals != None:
126 r = r + " " + `self.conditionals`
130 def set_header(self, header):
132 def set_module(self, module):
134 def set_type(self, type):
136 def set_info(self, info):
138 def set_extra(self, extra):
140 def set_lineno(self, lineno):
142 def set_static(self, static):
144 def set_conditionals(self, conditionals):
145 if conditionals == None or len(conditionals) == 0:
146 self.conditionals = None
148 self.conditionals = conditionals[:]
152 def get_header(self):
154 def get_module(self):
160 def get_lineno(self):
164 def get_static(self):
166 def get_conditionals(self):
167 return self.conditionals
169 def update(self, header, module, type = None, info = None, extra=None,
171 if self.name == debugsym:
172 print "=> update %s : %s" % (debugsym, (module, type, info,
173 extra, conditionals))
174 if header != None and self.header == None:
175 self.set_header(module)
176 if module != None and (self.module == None or self.header == self.module):
177 self.set_module(module)
178 if type != None and self.type == None:
183 self.set_extra(extra)
184 if conditionals != None:
185 self.set_conditionals(conditionals)
188 def __init__(self, name = "noname"):
190 self.identifiers = {}
201 def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
202 if name[0:2] == '__':
206 d = self.identifiers[name]
207 d.update(header, module, type, lineno, info, extra, conditionals)
209 d = identifier(name, header, module, type, lineno, info, extra, conditionals)
210 self.identifiers[name] = d
212 if d != None and static == 1:
215 if d != None and name != None and type != None:
216 self.references[name] = d
219 print "New ref: %s" % (d)
223 def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
224 if name[0:2] == '__':
228 d = self.identifiers[name]
229 d.update(header, module, type, lineno, info, extra, conditionals)
231 d = identifier(name, header, module, type, lineno, info, extra, conditionals)
232 self.identifiers[name] = d
234 if d != None and static == 1:
237 if d != None and name != None and type != None:
238 if type == "function":
239 self.functions[name] = d
240 elif type == "functype":
241 self.functions[name] = d
242 elif type == "variable":
243 self.variables[name] = d
244 elif type == "include":
245 self.includes[name] = d
246 elif type == "struct":
247 self.structs[name] = d
250 elif type == "typedef":
251 self.typedefs[name] = d
252 elif type == "macro":
253 self.macros[name] = d
255 print "Unable to register type ", type
258 print "New symbol: %s" % (d)
262 def merge(self, idx):
263 for id in idx.functions.keys():
265 # macro might be used to override functions or variables
268 if self.macros.has_key(id):
270 if self.functions.has_key(id):
271 print "function %s from %s redeclared in %s" % (
272 id, self.functions[id].header, idx.functions[id].header)
274 self.functions[id] = idx.functions[id]
275 self.identifiers[id] = idx.functions[id]
276 for id in idx.variables.keys():
278 # macro might be used to override functions or variables
281 if self.macros.has_key(id):
283 if self.variables.has_key(id):
284 print "variable %s from %s redeclared in %s" % (
285 id, self.variables[id].header, idx.variables[id].header)
287 self.variables[id] = idx.variables[id]
288 self.identifiers[id] = idx.variables[id]
289 for id in idx.structs.keys():
290 if self.structs.has_key(id):
291 print "struct %s from %s redeclared in %s" % (
292 id, self.structs[id].header, idx.structs[id].header)
294 self.structs[id] = idx.structs[id]
295 self.identifiers[id] = idx.structs[id]
296 for id in idx.typedefs.keys():
297 if self.typedefs.has_key(id):
298 print "typedef %s from %s redeclared in %s" % (
299 id, self.typedefs[id].header, idx.typedefs[id].header)
301 self.typedefs[id] = idx.typedefs[id]
302 self.identifiers[id] = idx.typedefs[id]
303 for id in idx.macros.keys():
305 # macro might be used to override functions or variables
308 if self.variables.has_key(id):
310 if self.functions.has_key(id):
312 if self.enums.has_key(id):
314 if self.macros.has_key(id):
315 print "macro %s from %s redeclared in %s" % (
316 id, self.macros[id].header, idx.macros[id].header)
318 self.macros[id] = idx.macros[id]
319 self.identifiers[id] = idx.macros[id]
320 for id in idx.enums.keys():
321 if self.enums.has_key(id):
322 print "enum %s from %s redeclared in %s" % (
323 id, self.enums[id].header, idx.enums[id].header)
325 self.enums[id] = idx.enums[id]
326 self.identifiers[id] = idx.enums[id]
328 def merge_public(self, idx):
329 for id in idx.functions.keys():
330 if self.functions.has_key(id):
331 # check that function condition agrees with header
332 if idx.functions[id].conditionals != \
333 self.functions[id].conditionals:
334 print "Header condition differs from Function for %s:" \
336 print " H: %s" % self.functions[id].conditionals
337 print " C: %s" % idx.functions[id].conditionals
338 up = idx.functions[id]
339 self.functions[id].update(None, up.module, up.type, up.info, up.extra)
341 # print "Function %s from %s is not declared in headers" % (
342 # id, idx.functions[id].module)
343 # TODO: do the same for variables.
345 def analyze_dict(self, type, dict):
348 for name in dict.keys():
354 print " %d %s , %d public" % (count, type, public)
356 print " %d public %s" % (count, type)
360 self.analyze_dict("functions", self.functions)
361 self.analyze_dict("variables", self.variables)
362 self.analyze_dict("structs", self.structs)
363 self.analyze_dict("typedefs", self.typedefs)
364 self.analyze_dict("macros", self.macros)
367 """A lexer for the C language, tokenize the input by reading and
368 analyzing it line by line"""
369 def __init__(self, input):
378 line = self.input.readline()
381 self.lineno = self.lineno + 1
382 line = string.lstrip(line)
383 line = string.rstrip(line)
386 while line[-1] == '\\':
388 n = self.input.readline()
389 self.lineno = self.lineno + 1
401 def push(self, token):
402 self.tokens.insert(0, token);
405 print "Last token: ", self.last
406 print "Token queue: ", self.tokens
407 print "Line %d end: " % (self.lineno), self.line
410 while self.tokens == []:
412 line = self.getline()
420 self.tokens = map((lambda x: ('preproc', x)),
424 if line[0] == '"' or line[0] == "'":
434 self.line = line[i+1:]
444 line = self.getline()
447 self.last = ('string', tok)
450 if l >= 2 and line[0] == '/' and line[1] == '*':
458 if line[i] == '*' and i+1 < l and line[i+1] == '/':
459 self.line = line[i+2:]
469 line = self.getline()
472 self.last = ('comment', tok)
474 if l >= 2 and line[0] == '/' and line[1] == '/':
476 self.last = ('comment', line)
480 if line[i] == '/' and i+1 < l and line[i+1] == '/':
484 if line[i] == '/' and i+1 < l and line[i+1] == '*':
488 if line[i] == '"' or line[i] == "'":
496 if line[i] == ' ' or line[i] == '\t':
500 if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
501 (o >= 48 and o <= 57):
505 if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
506 (o >= 48 and o <= 57) or string.find(
507 " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
511 self.tokens.append(('name', line[s:i]))
513 if string.find("(){}:;,[]", line[i]) != -1:
514 # if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
515 # line[i] == '}' or line[i] == ':' or line[i] == ';' or \
516 # line[i] == ',' or line[i] == '[' or line[i] == ']':
517 self.tokens.append(('sep', line[i]))
520 if string.find("+-*><=/%&!|.", line[i]) != -1:
521 # if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
522 # line[i] == '>' or line[i] == '<' or line[i] == '=' or \
523 # line[i] == '/' or line[i] == '%' or line[i] == '&' or \
524 # line[i] == '!' or line[i] == '|' or line[i] == '.':
525 if line[i] == '.' and i + 2 < l and \
526 line[i+1] == '.' and line[i+2] == '.':
527 self.tokens.append(('name', '...'))
533 string.find("+-*><=/%&!|", line[j]) != -1):
534 # line[j] == '+' or line[j] == '-' or line[j] == '*' or \
535 # line[j] == '>' or line[j] == '<' or line[j] == '=' or \
536 # line[j] == '/' or line[j] == '%' or line[j] == '&' or \
537 # line[j] == '!' or line[j] == '|'):
538 self.tokens.append(('op', line[i:j+1]))
541 self.tokens.append(('op', line[i]))
547 if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
548 (o >= 48 and o <= 57) or (
549 string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
550 # line[i] != ' ' and line[i] != '\t' and
551 # line[i] != '(' and line[i] != ')' and
552 # line[i] != '{' and line[i] != '}' and
553 # line[i] != ':' and line[i] != ';' and
554 # line[i] != ',' and line[i] != '+' and
555 # line[i] != '-' and line[i] != '*' and
556 # line[i] != '/' and line[i] != '%' and
557 # line[i] != '&' and line[i] != '!' and
558 # line[i] != '|' and line[i] != '[' and
559 # line[i] != ']' and line[i] != '=' and
560 # line[i] != '*' and line[i] != '>' and
565 self.tokens.append(('name', line[s:i]))
568 self.tokens = self.tokens[1:]
573 """The C module parser"""
574 def __init__(self, filename, idx = None):
575 self.filename = filename
576 if len(filename) > 2 and filename[-2:] == '.h':
580 self.input = open(filename)
581 self.lexer = CLexer(self.input)
586 self.top_comment = ""
587 self.last_comment = ""
591 self.conditionals = []
594 def collect_references(self):
597 def stop_error(self):
600 def start_error(self):
604 return self.lexer.getlineno()
606 def index_add(self, name, module, static, type, info=None, extra = None):
607 if self.is_header == 1:
608 self.index.add(name, module, module, static, type, self.lineno(),
609 info, extra, self.conditionals)
611 self.index.add(name, None, module, static, type, self.lineno(),
612 info, extra, self.conditionals)
614 def index_add_ref(self, name, module, static, type, info=None,
616 if self.is_header == 1:
617 self.index.add_ref(name, module, module, static, type,
618 self.lineno(), info, extra, self.conditionals)
620 self.index.add_ref(name, None, module, static, type, self.lineno(),
621 info, extra, self.conditionals)
623 def warning(self, msg):
628 def error(self, msg, token=-1):
632 print "Parse Error: " + msg
634 print "Got token ", token
638 def debug(self, msg, token=-1):
639 print "Debug: " + msg
641 print "Got token ", token
644 def parseTopComment(self, comment):
646 lines = string.split(comment, "\n")
649 while line != "" and (line[0] == ' ' or line[0] == '\t'):
651 while line != "" and line[0] == '*':
653 while line != "" and (line[0] == ' ' or line[0] == '\t'):
656 (it, line) = string.split(line, ":", 1)
658 while line != "" and (line[0] == ' ' or line[0] == '\t'):
660 if res.has_key(item):
661 res[item] = res[item] + " " + line
666 if res.has_key(item):
667 res[item] = res[item] + " " + line
670 self.index.info = res
672 def parseComment(self, token):
673 if self.top_comment == "":
674 self.top_comment = token[1]
675 if self.comment == None or token[1][0] == '*':
676 self.comment = token[1];
678 self.comment = self.comment + token[1]
679 token = self.lexer.token()
681 if string.find(self.comment, "DOC_DISABLE") != -1:
684 if string.find(self.comment, "DOC_ENABLE") != -1:
690 # Parse a comment block associate to a typedef
692 def parseTypeComment(self, name, quiet = 0):
693 if name[0:2] == '__':
699 if self.comment == None:
701 self.warning("Missing comment for type %s" % (name))
703 if self.comment[0] != '*':
705 self.warning("Missing * in type comment for %s" % (name))
707 lines = string.split(self.comment, '\n')
710 if lines[0] != "* %s:" % (name):
712 self.warning("Misformatted type comment for %s" % (name))
713 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
716 while len(lines) > 0 and lines[0] == '*':
719 while len(lines) > 0:
721 while len(l) > 0 and l[0] == '*':
724 desc = desc + " " + l
727 desc = string.strip(desc)
731 self.warning("Type comment for %s lack description of the macro" % (name))
735 # Parse a comment block associate to a macro
737 def parseMacroComment(self, name, quiet = 0):
738 if name[0:2] == '__':
744 if self.comment == None:
746 self.warning("Missing comment for macro %s" % (name))
748 if self.comment[0] != '*':
750 self.warning("Missing * in macro comment for %s" % (name))
752 lines = string.split(self.comment, '\n')
755 if lines[0] != "* %s:" % (name):
757 self.warning("Misformatted macro comment for %s" % (name))
758 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
761 while lines[0] == '*':
763 while len(lines) > 0 and lines[0][0:3] == '* @':
766 (arg, desc) = string.split(l, ':', 1)
767 desc=string.strip(desc)
768 arg=string.strip(arg)
771 self.warning("Misformatted macro comment for %s" % (name))
772 self.warning(" problem with '%s'" % (lines[0]))
776 l = string.strip(lines[0])
777 while len(l) > 2 and l[0:3] != '* @':
780 desc = desc + ' ' + string.strip(l)
785 args.append((arg, desc))
786 while len(lines) > 0 and lines[0] == '*':
789 while len(lines) > 0:
791 while len(l) > 0 and l[0] == '*':
794 desc = desc + " " + l
797 desc = string.strip(desc)
801 self.warning("Macro comment for %s lack description of the macro" % (name))
806 # Parse a comment block and merge the informations found in the
807 # parameters descriptions, finally returns a block as complete
810 def mergeFunctionComment(self, name, description, quiet = 0):
813 if name[0:2] == '__':
816 (ret, args) = description
820 if self.comment == None:
822 self.warning("Missing comment for function %s" % (name))
823 return(((ret[0], retdesc), args, desc))
824 if self.comment[0] != '*':
826 self.warning("Missing * in function comment for %s" % (name))
827 return(((ret[0], retdesc), args, desc))
828 lines = string.split(self.comment, '\n')
831 if lines[0] != "* %s:" % (name):
833 self.warning("Misformatted function comment for %s" % (name))
834 self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
835 return(((ret[0], retdesc), args, desc))
837 while lines[0] == '*':
840 while len(lines) > 0 and lines[0][0:3] == '* @':
843 (arg, desc) = string.split(l, ':', 1)
844 desc=string.strip(desc)
845 arg=string.strip(arg)
848 self.warning("Misformatted function comment for %s" % (name))
849 self.warning(" problem with '%s'" % (lines[0]))
853 l = string.strip(lines[0])
854 while len(l) > 2 and l[0:3] != '* @':
857 desc = desc + ' ' + string.strip(l)
864 if args[i][1] == arg:
865 args[i] = (args[i][0], arg, desc)
870 self.warning("Unable to find arg %s from function comment for %s" % (
872 while len(lines) > 0 and lines[0] == '*':
875 while len(lines) > 0:
877 while len(l) > 0 and l[0] == '*':
880 if len(l) >= 6 and l[0:6] == "return" or l[0:6] == "Return":
882 l = string.split(l, ' ', 1)[1]
885 retdesc = string.strip(l)
887 while len(lines) > 0:
889 while len(l) > 0 and l[0] == '*':
892 retdesc = retdesc + " " + l
895 desc = desc + " " + l
898 retdesc = string.strip(retdesc)
899 desc = string.strip(desc)
903 # report missing comments
907 if args[i][2] == None and args[i][0] != "void" and \
908 ((args[i][1] != None) or (args[i][1] == '')):
909 self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
911 if retdesc == "" and ret[0] != "void":
912 self.warning("Function comment for %s lacks description of return value" % (name))
914 self.warning("Function comment for %s lacks description of the function" % (name))
916 return(((ret[0], retdesc), args, desc))
918 def parsePreproc(self, token):
920 print "=> preproc ", token, self.lexer.tokens
922 if name == "#include":
923 token = self.lexer.token()
926 if token[0] == 'preproc':
927 self.index_add(token[1], self.filename, not self.is_header,
929 return self.lexer.token()
931 if name == "#define":
932 token = self.lexer.token()
935 if token[0] == 'preproc':
936 # TODO macros with arguments
939 token = self.lexer.token()
940 while token != None and token[0] == 'preproc' and \
943 token = self.lexer.token()
945 name = string.split(name, '(') [0]
948 info = self.parseMacroComment(name, not self.is_header)
949 self.index_add(name, self.filename, not self.is_header,
954 # Processing of conditionals modified by Bill 1/1/05
956 # We process conditionals (i.e. tokens from #ifdef, #ifndef,
957 # #if, #else and #endif) for headers and mainline code,
958 # store the ones from the header in libxml2-api.xml, and later
959 # (in the routine merge_public) verify that the two (header and
960 # mainline code) agree.
962 # There is a small problem with processing the headers. Some of
963 # the variables are not concerned with enabling / disabling of
964 # library functions (e.g. '__XML_PARSER_H__'), and we don't want
965 # them to be included in libxml2-api.xml, or involved in
966 # the check between the header and the mainline code. To
967 # accomplish this, we ignore any conditional which doesn't include
968 # the string 'ENABLED'
971 apstr = self.lexer.tokens[0][1]
973 self.defines.append(apstr)
974 if string.find(apstr, 'ENABLED') != -1:
975 self.conditionals.append("defined(%s)" % apstr)
978 elif name == "#ifndef":
979 apstr = self.lexer.tokens[0][1]
981 self.defines.append(apstr)
982 if string.find(apstr, 'ENABLED') != -1:
983 self.conditionals.append("!defined(%s)" % apstr)
988 for tok in self.lexer.tokens:
991 apstr = apstr + tok[1]
993 self.defines.append(apstr)
994 if string.find(apstr, 'ENABLED') != -1:
995 self.conditionals.append(apstr)
998 elif name == "#else":
999 if self.conditionals != [] and \
1000 string.find(self.defines[-1], 'ENABLED') != -1:
1001 self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
1002 elif name == "#endif":
1003 if self.conditionals != [] and \
1004 string.find(self.defines[-1], 'ENABLED') != -1:
1005 self.conditionals = self.conditionals[:-1]
1006 self.defines = self.defines[:-1]
1007 token = self.lexer.token()
1008 while token != None and token[0] == 'preproc' and \
1010 token = self.lexer.token()
1014 # token acquisition on top of the lexer, it handle internally
1015 # preprocessor and comments since they are logically not part of
1016 # the program structure.
1019 global ignored_words
1021 token = self.lexer.token()
1022 while token != None:
1023 if token[0] == 'comment':
1024 token = self.parseComment(token)
1026 elif token[0] == 'preproc':
1027 token = self.parsePreproc(token)
1029 elif token[0] == "name" and token[1] == "__const":
1030 token = ("name", "const")
1032 elif token[0] == "name" and token[1] == "__attribute":
1033 token = self.lexer.token()
1034 while token != None and token[1] != ";":
1035 token = self.lexer.token()
1037 elif token[0] == "name" and ignored_words.has_key(token[1]):
1038 (n, info) = ignored_words[token[1]]
1041 token = self.lexer.token()
1043 token = self.lexer.token()
1052 # Parse a typedef, it records the type and its name.
1054 def parseTypedef(self, token):
1057 token = self.parseType(token)
1059 self.error("parsing typedef")
1061 base_type = self.type
1063 #self.debug("end typedef type", token)
1064 while token != None:
1065 if token[0] == "name":
1067 signature = self.signature
1068 if signature != None:
1069 type = string.split(type, '(')[0]
1070 d = self.mergeFunctionComment(name,
1071 ((type, None), signature), 1)
1072 self.index_add(name, self.filename, not self.is_header,
1075 if base_type == "struct":
1076 self.index_add(name, self.filename, not self.is_header,
1078 base_type = "struct " + name
1080 # TODO report missing or misformatted comments
1081 info = self.parseTypeComment(name, 1)
1082 self.index_add(name, self.filename, not self.is_header,
1083 "typedef", type, info)
1084 token = self.token()
1086 self.error("parsing typedef: expecting a name")
1088 #self.debug("end typedef", token)
1089 if token != None and token[0] == 'sep' and token[1] == ',':
1091 token = self.token()
1092 while token != None and token[0] == "op":
1093 type = type + token[1]
1094 token = self.token()
1095 elif token != None and token[0] == 'sep' and token[1] == ';':
1097 elif token != None and token[0] == 'name':
1101 self.error("parsing typedef: expecting ';'", token)
1103 token = self.token()
1107 # Parse a C code block, used for functions it parse till
1108 # the balancing } included
1110 def parseBlock(self, token):
1111 while token != None:
1112 if token[0] == "sep" and token[1] == "{":
1113 token = self.token()
1114 token = self.parseBlock(token)
1115 elif token[0] == "sep" and token[1] == "}":
1117 token = self.token()
1120 if self.collect_ref == 1:
1122 token = self.token()
1123 if oldtok[0] == "name" and oldtok[1][0:3] == "xml":
1124 if token[0] == "sep" and token[1] == "(":
1125 self.index_add_ref(oldtok[1], self.filename,
1127 token = self.token()
1128 elif token[0] == "name":
1129 token = self.token()
1130 if token[0] == "sep" and (token[1] == ";" or
1131 token[1] == "," or token[1] == "="):
1132 self.index_add_ref(oldtok[1], self.filename,
1134 elif oldtok[0] == "name" and oldtok[1][0:4] == "XML_":
1135 self.index_add_ref(oldtok[1], self.filename,
1137 elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXML_":
1138 self.index_add_ref(oldtok[1], self.filename,
1142 token = self.token()
1146 # Parse a C struct definition till the balancing }
1148 def parseStruct(self, token):
1150 #self.debug("start parseStruct", token)
1151 while token != None:
1152 if token[0] == "sep" and token[1] == "{":
1153 token = self.token()
1154 token = self.parseTypeBlock(token)
1155 elif token[0] == "sep" and token[1] == "}":
1156 self.struct_fields = fields
1157 #self.debug("end parseStruct", token)
1159 token = self.token()
1162 base_type = self.type
1163 #self.debug("before parseType", token)
1164 token = self.parseType(token)
1165 #self.debug("after parseType", token)
1166 if token != None and token[0] == "name":
1168 token = self.token()
1169 if token[0] == "sep" and token[1] == ";":
1171 token = self.token()
1172 fields.append((self.type, fname, self.comment))
1175 self.error("parseStruct: expecting ;", token)
1176 elif token != None and token[0] == "sep" and token[1] == "{":
1177 token = self.token()
1178 token = self.parseTypeBlock(token)
1179 if token != None and token[0] == "name":
1180 token = self.token()
1181 if token != None and token[0] == "sep" and token[1] == ";":
1182 token = self.token()
1184 self.error("parseStruct: expecting ;", token)
1186 self.error("parseStruct: name", token)
1187 token = self.token()
1188 self.type = base_type;
1189 self.struct_fields = fields
1190 #self.debug("end parseStruct", token)
1195 # Parse a C enum block, parse till the balancing }
1197 def parseEnumBlock(self, token):
1203 while token != None:
1204 if token[0] == "sep" and token[1] == "{":
1205 token = self.token()
1206 token = self.parseTypeBlock(token)
1207 elif token[0] == "sep" and token[1] == "}":
1209 if self.comment != None:
1210 comment = self.comment
1212 self.enums.append((name, value, comment))
1213 token = self.token()
1215 elif token[0] == "name":
1217 if self.comment != None:
1218 comment = string.strip(self.comment)
1220 self.enums.append((name, value, comment))
1223 token = self.token()
1224 if token[0] == "op" and token[1][0] == "=":
1226 if len(token[1]) > 1:
1227 value = token[1][1:]
1228 token = self.token()
1229 while token[0] != "sep" or (token[1] != ',' and
1231 value = value + token[1]
1232 token = self.token()
1235 value = "%d" % (int(value) + 1)
1237 self.warning("Failed to compute value of enum %s" % (name))
1239 if token[0] == "sep" and token[1] == ",":
1240 token = self.token()
1242 token = self.token()
1246 # Parse a C definition block, used for structs it parse till
1249 def parseTypeBlock(self, token):
1250 while token != None:
1251 if token[0] == "sep" and token[1] == "{":
1252 token = self.token()
1253 token = self.parseTypeBlock(token)
1254 elif token[0] == "sep" and token[1] == "}":
1255 token = self.token()
1258 token = self.token()
1262 # Parse a type: the fact that the type name can either occur after
1263 # the definition or within the definition makes it a little harder
1264 # if inside, the name token is pushed back before returning
1266 def parseType(self, token):
1268 self.struct_fields = []
1269 self.signature = None
1273 while token[0] == "name" and (
1274 token[1] == "const" or \
1275 token[1] == "unsigned" or \
1276 token[1] == "signed"):
1278 self.type = token[1]
1280 self.type = self.type + " " + token[1]
1281 token = self.token()
1283 if token[0] == "name" and (token[1] == "long" or token[1] == "short"):
1285 self.type = token[1]
1287 self.type = self.type + " " + token[1]
1288 if token[0] == "name" and token[1] == "int":
1292 self.type = self.type + " " + tmp[1]
1294 elif token[0] == "name" and token[1] == "struct":
1296 self.type = token[1]
1298 self.type = self.type + " " + token[1]
1299 token = self.token()
1301 if token[0] == "name":
1303 token = self.token()
1304 if token != None and token[0] == "sep" and token[1] == "{":
1305 token = self.token()
1306 token = self.parseStruct(token)
1307 elif token != None and token[0] == "op" and token[1] == "*":
1308 self.type = self.type + " " + nametok[1] + " *"
1309 token = self.token()
1310 while token != None and token[0] == "op" and token[1] == "*":
1311 self.type = self.type + " *"
1312 token = self.token()
1313 if token[0] == "name":
1315 token = self.token()
1317 self.error("struct : expecting name", token)
1319 elif token != None and token[0] == "name" and nametok != None:
1320 self.type = self.type + " " + nametok[1]
1324 self.lexer.push(token)
1328 elif token[0] == "name" and token[1] == "enum":
1330 self.type = token[1]
1332 self.type = self.type + " " + token[1]
1334 token = self.token()
1335 if token != None and token[0] == "sep" and token[1] == "{":
1336 token = self.token()
1337 token = self.parseEnumBlock(token)
1339 self.error("parsing enum: expecting '{'", token)
1341 if token != None and token[0] != "name":
1342 self.lexer.push(token)
1343 token = ("name", "enum")
1345 enum_type = token[1]
1346 for enum in self.enums:
1347 self.index_add(enum[0], self.filename,
1348 not self.is_header, "enum",
1349 (enum[1], enum[2], enum_type))
1352 elif token[0] == "name":
1354 self.type = token[1]
1356 self.type = self.type + " " + token[1]
1358 self.error("parsing type %s: expecting a name" % (self.type),
1361 token = self.token()
1362 while token != None and (token[0] == "op" or
1363 token[0] == "name" and token[1] == "const"):
1364 self.type = self.type + " " + token[1]
1365 token = self.token()
1368 # if there is a parenthesis here, this means a function type
1370 if token != None and token[0] == "sep" and token[1] == '(':
1371 self.type = self.type + token[1]
1372 token = self.token()
1373 while token != None and token[0] == "op" and token[1] == '*':
1374 self.type = self.type + token[1]
1375 token = self.token()
1376 if token == None or token[0] != "name" :
1377 self.error("parsing function type, name expected", token);
1379 self.type = self.type + token[1]
1381 token = self.token()
1382 if token != None and token[0] == "sep" and token[1] == ')':
1383 self.type = self.type + token[1]
1384 token = self.token()
1385 if token != None and token[0] == "sep" and token[1] == '(':
1386 token = self.token()
1388 token = self.parseSignature(token);
1391 self.error("parsing function type, '(' expected", token);
1394 self.error("parsing function type, ')' expected", token);
1396 self.lexer.push(token)
1401 # do some lookahead for arrays
1403 if token != None and token[0] == "name":
1405 token = self.token()
1406 if token != None and token[0] == "sep" and token[1] == '[':
1407 self.type = self.type + nametok[1]
1408 while token != None and token[0] == "sep" and token[1] == '[':
1409 self.type = self.type + token[1]
1410 token = self.token()
1411 while token != None and token[0] != 'sep' and \
1412 token[1] != ']' and token[1] != ';':
1413 self.type = self.type + token[1]
1414 token = self.token()
1415 if token != None and token[0] == 'sep' and token[1] == ']':
1416 self.type = self.type + token[1]
1417 token = self.token()
1419 self.error("parsing array type, ']' expected", token);
1421 elif token != None and token[0] == "sep" and token[1] == ':':
1422 # remove :12 in case it's a limited int size
1423 token = self.token()
1424 token = self.token()
1425 self.lexer.push(token)
1431 # Parse a signature: '(' has been parsed and we scan the type definition
1432 # up to the ')' included
1433 def parseSignature(self, token):
1435 if token != None and token[0] == "sep" and token[1] == ')':
1437 token = self.token()
1439 while token != None:
1440 token = self.parseType(token)
1441 if token != None and token[0] == "name":
1442 signature.append((self.type, token[1], None))
1443 token = self.token()
1444 elif token != None and token[0] == "sep" and token[1] == ',':
1445 token = self.token()
1447 elif token != None and token[0] == "sep" and token[1] == ')':
1448 # only the type was provided
1449 if self.type == "...":
1450 signature.append((self.type, "...", None))
1452 signature.append((self.type, None, None))
1453 if token != None and token[0] == "sep":
1455 token = self.token()
1457 elif token[1] == ')':
1458 token = self.token()
1460 self.signature = signature
1464 # Parse a global definition, be it a type, variable or function
1465 # the extern "C" blocks are a bit nasty and require it to recurse.
1467 def parseGlobal(self, token):
1469 if token[1] == 'extern':
1470 token = self.token()
1473 if token[0] == 'string':
1475 token = self.token()
1478 if token[0] == 'sep' and token[1] == "{":
1479 token = self.token()
1480 # print 'Entering extern "C line ', self.lineno()
1481 while token != None and (token[0] != 'sep' or
1483 if token[0] == 'name':
1484 token = self.parseGlobal(token)
1487 "token %s %s unexpected at the top level" % (
1488 token[0], token[1]))
1489 token = self.parseGlobal(token)
1490 # print 'Exiting extern "C" line', self.lineno()
1491 token = self.token()
1495 elif token[1] == 'static':
1497 token = self.token()
1498 if token == None or token[0] != 'name':
1501 if token[1] == 'typedef':
1502 token = self.token()
1503 return self.parseTypedef(token)
1505 token = self.parseType(token)
1506 type_orig = self.type
1507 if token == None or token[0] != "name":
1510 self.name = token[1]
1511 token = self.token()
1512 while token != None and (token[0] == "sep" or token[0] == "op"):
1513 if token[0] == "sep":
1515 type = type + token[1]
1516 token = self.token()
1517 while token != None and (token[0] != "sep" or \
1519 type = type + token[1]
1520 token = self.token()
1522 if token != None and token[0] == "op" and token[1] == "=":
1524 # Skip the initialization of the variable
1526 token = self.token()
1527 if token[0] == 'sep' and token[1] == '{':
1528 token = self.token()
1529 token = self.parseBlock(token)
1532 while token != None and (token[0] != "sep" or \
1533 (token[1] != ';' and token[1] != ',')):
1534 token = self.token()
1536 if token == None or token[0] != "sep" or (token[1] != ';' and
1538 self.error("missing ';' or ',' after value")
1540 if token != None and token[0] == "sep":
1543 token = self.token()
1544 if type == "struct":
1545 self.index_add(self.name, self.filename,
1546 not self.is_header, "struct", self.struct_fields)
1548 self.index_add(self.name, self.filename,
1549 not self.is_header, "variable", type)
1551 elif token[1] == "(":
1552 token = self.token()
1553 token = self.parseSignature(token)
1556 if token[0] == "sep" and token[1] == ";":
1557 d = self.mergeFunctionComment(self.name,
1558 ((type, None), self.signature), 1)
1559 self.index_add(self.name, self.filename, static,
1561 token = self.token()
1562 elif token[0] == "sep" and token[1] == "{":
1563 d = self.mergeFunctionComment(self.name,
1564 ((type, None), self.signature), static)
1565 self.index_add(self.name, self.filename, static,
1567 token = self.token()
1568 token = self.parseBlock(token);
1569 elif token[1] == ',':
1571 self.index_add(self.name, self.filename, static,
1574 token = self.token()
1575 while token != None and token[0] == "sep":
1576 type = type + token[1]
1577 token = self.token()
1578 if token != None and token[0] == "name":
1579 self.name = token[1]
1580 token = self.token()
1587 self.warning("Parsing %s" % (self.filename))
1588 token = self.token()
1589 while token != None:
1590 if token[0] == 'name':
1591 token = self.parseGlobal(token)
1593 self.error("token %s %s unexpected at the top level" % (
1594 token[0], token[1]))
1595 token = self.parseGlobal(token)
1597 self.parseTopComment(self.top_comment)
1602 """A documentation builder"""
1603 def __init__(self, name, directories=['.'], excludes=[]):
1605 self.directories = directories
1606 self.excludes = excludes + ignored_files.keys()
1612 if name == 'libxml2':
1613 self.basename = 'libxml'
1615 self.basename = name
1617 def indexString(self, id, str):
1620 str = string.replace(str, "'", ' ')
1621 str = string.replace(str, '"', ' ')
1622 str = string.replace(str, "/", ' ')
1623 str = string.replace(str, '*', ' ')
1624 str = string.replace(str, "[", ' ')
1625 str = string.replace(str, "]", ' ')
1626 str = string.replace(str, "(", ' ')
1627 str = string.replace(str, ")", ' ')
1628 str = string.replace(str, "<", ' ')
1629 str = string.replace(str, '>', ' ')
1630 str = string.replace(str, "&", ' ')
1631 str = string.replace(str, '#', ' ')
1632 str = string.replace(str, ",", ' ')
1633 str = string.replace(str, '.', ' ')
1634 str = string.replace(str, ';', ' ')
1635 tokens = string.split(str)
1636 for token in tokens:
1639 if string.find(string.letters, c) < 0:
1641 elif len(token) < 3:
1644 lower = string.lower(token)
1645 # TODO: generalize this a bit
1646 if lower == 'and' or lower == 'the':
1648 elif self.xref.has_key(token):
1649 self.xref[token].append(id)
1651 self.xref[token] = [id]
1656 print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
1659 def scanHeaders(self):
1660 for header in self.headers.keys():
1661 parser = CParser(header)
1662 idx = parser.parse()
1663 self.headers[header] = idx;
1666 def scanModules(self):
1667 for module in self.modules.keys():
1668 parser = CParser(module)
1669 idx = parser.parse()
1671 self.modules[module] = idx
1672 self.idx.merge_public(idx)
1675 for directory in self.directories:
1676 files = glob.glob(directory + "/*.c")
1679 for excl in self.excludes:
1680 if string.find(file, excl) != -1:
1684 self.modules[file] = None;
1685 files = glob.glob(directory + "/*.h")
1688 for excl in self.excludes:
1689 if string.find(file, excl) != -1:
1693 self.headers[file] = None;
1697 def modulename_file(self, file):
1698 module = os.path.basename(file)
1699 if module[-2:] == '.h':
1700 module = module[:-2]
1701 elif module[-2:] == '.c':
1702 module = module[:-2]
1705 def serialize_enum(self, output, name):
1706 id = self.idx.enums[name]
1707 output.write(" <enum name='%s' file='%s'" % (name,
1708 self.modulename_file(id.header)))
1711 if info[0] != None and info[0] != '':
1716 output.write(" value='%s'" % (val));
1717 if info[2] != None and info[2] != '':
1718 output.write(" type='%s'" % info[2]);
1719 if info[1] != None and info[1] != '':
1720 output.write(" info='%s'" % escape(info[1]));
1721 output.write("/>\n")
1723 def serialize_macro(self, output, name):
1724 id = self.idx.macros[name]
1725 output.write(" <macro name='%s' file='%s'>\n" % (name,
1726 self.modulename_file(id.header)))
1729 (args, desc) = id.info
1730 if desc != None and desc != "":
1731 output.write(" <info>%s</info>\n" % (escape(desc)))
1732 self.indexString(name, desc)
1735 if desc != None and desc != "":
1736 output.write(" <arg name='%s' info='%s'/>\n" % (
1737 name, escape(desc)))
1738 self.indexString(name, desc)
1740 output.write(" <arg name='%s'/>\n" % (name))
1743 output.write(" </macro>\n")
1745 def serialize_typedef(self, output, name):
1746 id = self.idx.typedefs[name]
1747 if id.info[0:7] == 'struct ':
1748 output.write(" <struct name='%s' file='%s' type='%s'" % (
1749 name, self.modulename_file(id.header), id.info))
1751 if self.idx.structs.has_key(name) and ( \
1752 type(self.idx.structs[name].info) == type(()) or
1753 type(self.idx.structs[name].info) == type([])):
1754 output.write(">\n");
1756 for field in self.idx.structs[name].info:
1758 self.indexString(name, desc)
1763 output.write(" <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
1765 print "Failed to serialize struct %s" % (name)
1766 output.write(" </struct>\n")
1768 output.write("/>\n");
1770 output.write(" <typedef name='%s' file='%s' type='%s'" % (
1771 name, self.modulename_file(id.header), id.info))
1774 if desc != None and desc != "":
1775 output.write(">\n <info>%s</info>\n" % (escape(desc)))
1776 output.write(" </typedef>\n")
1778 output.write("/>\n")
1780 output.write("/>\n")
1782 def serialize_variable(self, output, name):
1783 id = self.idx.variables[name]
1785 output.write(" <variable name='%s' file='%s' type='%s'/>\n" % (
1786 name, self.modulename_file(id.header), id.info))
1788 output.write(" <variable name='%s' file='%s'/>\n" % (
1789 name, self.modulename_file(id.header)))
1791 def serialize_function(self, output, name):
1792 id = self.idx.functions[name]
1793 if name == debugsym:
1796 output.write(" <%s name='%s' file='%s' module='%s'>\n" % (id.type,
1797 name, self.modulename_file(id.header),
1798 self.modulename_file(id.module)))
1800 # Processing of conditionals modified by Bill 1/1/05
1802 if id.conditionals != None:
1804 for cond in id.conditionals:
1806 apstr = apstr + " && "
1807 apstr = apstr + cond
1808 output.write(" <cond>%s</cond>\n"% (apstr));
1810 (ret, params, desc) = id.info
1811 if (desc == None or desc == '') and \
1812 name[0:9] != "xmlThrDef" and name != "xmlDllMain":
1813 print "%s %s from %s has no description" % (id.type, name,
1814 self.modulename_file(id.module))
1816 output.write(" <info>%s</info>\n" % (escape(desc)))
1817 self.indexString(name, desc)
1819 if ret[0] == "void":
1820 output.write(" <return type='void'/>\n")
1822 output.write(" <return type='%s' info='%s'/>\n" % (
1823 ret[0], escape(ret[1])))
1824 self.indexString(name, ret[1])
1825 for param in params:
1826 if param[0] == 'void':
1828 if param[2] == None:
1829 output.write(" <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
1831 output.write(" <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
1832 self.indexString(name, param[2])
1834 print "Failed to save function %s info: " % name, `id.info`
1835 output.write(" </%s>\n" % (id.type))
1837 def serialize_exports(self, output, file):
1838 module = self.modulename_file(file)
1839 output.write(" <file name='%s'>\n" % (module))
1840 dict = self.headers[file]
1841 if dict.info != None:
1842 for data in ('Summary', 'Description', 'Author'):
1844 output.write(" <%s>%s</%s>\n" % (
1846 escape(dict.info[data]),
1847 string.lower(data)))
1849 print "Header %s lacks a %s description" % (module, data)
1850 if dict.info.has_key('Description'):
1851 desc = dict.info['Description']
1852 if string.find(desc, "DEPRECATED") != -1:
1853 output.write(" <deprecated/>\n")
1855 ids = dict.macros.keys()
1857 for id in uniq(ids):
1858 # Macros are sometime used to masquerade other types.
1859 if dict.functions.has_key(id):
1861 if dict.variables.has_key(id):
1863 if dict.typedefs.has_key(id):
1865 if dict.structs.has_key(id):
1867 if dict.enums.has_key(id):
1869 output.write(" <exports symbol='%s' type='macro'/>\n" % (id))
1870 ids = dict.enums.keys()
1872 for id in uniq(ids):
1873 output.write(" <exports symbol='%s' type='enum'/>\n" % (id))
1874 ids = dict.typedefs.keys()
1876 for id in uniq(ids):
1877 output.write(" <exports symbol='%s' type='typedef'/>\n" % (id))
1878 ids = dict.structs.keys()
1880 for id in uniq(ids):
1881 output.write(" <exports symbol='%s' type='struct'/>\n" % (id))
1882 ids = dict.variables.keys()
1884 for id in uniq(ids):
1885 output.write(" <exports symbol='%s' type='variable'/>\n" % (id))
1886 ids = dict.functions.keys()
1888 for id in uniq(ids):
1889 output.write(" <exports symbol='%s' type='function'/>\n" % (id))
1890 output.write(" </file>\n")
1892 def serialize_xrefs_files(self, output):
1893 headers = self.headers.keys()
1895 for file in headers:
1896 module = self.modulename_file(file)
1897 output.write(" <file name='%s'>\n" % (module))
1898 dict = self.headers[file]
1899 ids = uniq(dict.functions.keys() + dict.variables.keys() + \
1900 dict.macros.keys() + dict.typedefs.keys() + \
1901 dict.structs.keys() + dict.enums.keys())
1904 output.write(" <ref name='%s'/>\n" % (id))
1905 output.write(" </file>\n")
1908 def serialize_xrefs_functions(self, output):
1910 for name in self.idx.functions.keys():
1911 id = self.idx.functions[name]
1913 (ret, params, desc) = id.info
1914 for param in params:
1915 if param[0] == 'void':
1917 if funcs.has_key(param[0]):
1918 funcs[param[0]].append(name)
1920 funcs[param[0]] = [name]
1926 if type == '' or type == 'void' or type == "int" or \
1927 type == "char *" or type == "const char *" :
1929 output.write(" <type name='%s'>\n" % (type))
1932 pid = '' # not sure why we have dups, but get rid of them!
1935 output.write(" <ref name='%s'/>\n" % (id))
1937 output.write(" </type>\n")
1939 def serialize_xrefs_constructors(self, output):
1941 for name in self.idx.functions.keys():
1942 id = self.idx.functions[name]
1944 (ret, params, desc) = id.info
1945 if ret[0] == "void":
1947 if funcs.has_key(ret[0]):
1948 funcs[ret[0]].append(name)
1950 funcs[ret[0]] = [name]
1956 if type == '' or type == 'void' or type == "int" or \
1957 type == "char *" or type == "const char *" :
1959 output.write(" <type name='%s'>\n" % (type))
1963 output.write(" <ref name='%s'/>\n" % (id))
1964 output.write(" </type>\n")
1966 def serialize_xrefs_alpha(self, output):
1968 ids = self.idx.identifiers.keys()
1973 output.write(" </letter>\n")
1975 output.write(" <letter name='%s'>\n" % (letter))
1976 output.write(" <ref name='%s'/>\n" % (id))
1978 output.write(" </letter>\n")
1980 def serialize_xrefs_references(self, output):
1981 typ = self.idx.identifiers.keys()
1984 idf = self.idx.identifiers[id]
1986 output.write(" <reference name='%s' href='%s'/>\n" % (id,
1987 'html/' + self.basename + '-' +
1988 self.modulename_file(module) + '.html#' +
1991 def serialize_xrefs_index(self, output):
2000 if len(index[id]) > 30:
2003 if letter == None or count > 200:
2005 output.write(" </letter>\n")
2006 output.write(" </chunk>\n")
2008 chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
2009 output.write(" <chunk name='chunk%s'>\n" % (chunk))
2010 first_letter = id[0]
2012 elif letter != None:
2013 output.write(" </letter>\n")
2015 output.write(" <letter name='%s'>\n" % (letter))
2016 output.write(" <word name='%s'>\n" % (id))
2020 for token in tokens:
2024 output.write(" <ref name='%s'/>\n" % (token))
2026 output.write(" </word>\n")
2028 output.write(" </letter>\n")
2029 output.write(" </chunk>\n")
2031 chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
2032 output.write(" <chunks>\n")
2034 output.write(" <chunk name='%s' start='%s' end='%s'/>\n" % (
2035 ch[0], ch[1], ch[2]))
2036 output.write(" </chunks>\n")
2038 def serialize_xrefs(self, output):
2039 output.write(" <references>\n")
2040 self.serialize_xrefs_references(output)
2041 output.write(" </references>\n")
2042 output.write(" <alpha>\n")
2043 self.serialize_xrefs_alpha(output)
2044 output.write(" </alpha>\n")
2045 output.write(" <constructors>\n")
2046 self.serialize_xrefs_constructors(output)
2047 output.write(" </constructors>\n")
2048 output.write(" <functions>\n")
2049 self.serialize_xrefs_functions(output)
2050 output.write(" </functions>\n")
2051 output.write(" <files>\n")
2052 self.serialize_xrefs_files(output)
2053 output.write(" </files>\n")
2054 output.write(" <index>\n")
2055 self.serialize_xrefs_index(output)
2056 output.write(" </index>\n")
2058 def serialize(self):
2059 filename = "%s-api.xml" % self.name
2060 print "Saving XML description %s" % (filename)
2061 output = open(filename, "w")
2062 output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
2063 output.write("<api name='%s'>\n" % self.name)
2064 output.write(" <files>\n")
2065 headers = self.headers.keys()
2067 for file in headers:
2068 self.serialize_exports(output, file)
2069 output.write(" </files>\n")
2070 output.write(" <symbols>\n")
2071 macros = self.idx.macros.keys()
2073 for macro in macros:
2074 self.serialize_macro(output, macro)
2075 enums = self.idx.enums.keys()
2078 self.serialize_enum(output, enum)
2079 typedefs = self.idx.typedefs.keys()
2081 for typedef in typedefs:
2082 self.serialize_typedef(output, typedef)
2083 variables = self.idx.variables.keys()
2085 for variable in variables:
2086 self.serialize_variable(output, variable)
2087 functions = self.idx.functions.keys()
2089 for function in functions:
2090 self.serialize_function(output, function)
2091 output.write(" </symbols>\n")
2092 output.write("</api>\n")
2095 filename = "%s-refs.xml" % self.name
2096 print "Saving XML Cross References %s" % (filename)
2097 output = open(filename, "w")
2098 output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
2099 output.write("<apirefs name='%s'>\n" % self.name)
2100 self.serialize_xrefs(output)
2101 output.write("</apirefs>\n")
2107 if glob.glob("parser.c") != [] :
2108 print "Rebuilding API description for libxml2"
2109 builder = docBuilder("libxml2", [".", "."],
2110 ["xmlwin32version.h", "tst.c"])
2111 elif glob.glob("../parser.c") != [] :
2112 print "Rebuilding API description for libxml2"
2113 builder = docBuilder("libxml2", ["..", "../include/libxml"],
2114 ["xmlwin32version.h", "tst.c"])
2115 elif glob.glob("../libxslt/transform.c") != [] :
2116 print "Rebuilding API description for libxslt"
2117 builder = docBuilder("libxslt", ["../libxslt"],
2118 ["win32config.h", "libxslt.h", "tst.c"])
2120 print "rebuild() failed, unable to guess the module"
2125 if glob.glob("../libexslt/exslt.c") != [] :
2126 extra = docBuilder("libexslt", ["../libexslt"], ["libexslt.h"])
2133 # for debugging the parser
2135 def parse(filename):
2136 parser = CParser(filename)
2137 idx = parser.parse()
2140 if __name__ == "__main__":
2141 if len(sys.argv) > 1: