apibuild.py 98.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
#!/usr/bin/python -u
#
# This is the API builder, it parses the C sources and build the
# API formal description in XML.
#
# See Copyright for the status of this software.
#
# daniel@veillard.com
#
import os, sys
import string
import glob
13
import re
14

15 16 17
quiet=True
warnings=0
debug=False
18 19 20 21 22
debugsym=None

#
# C parser analysis code
#
23 24
included_files = {
  "libvirt.h": "header with general libvirt API definitions",
25
  "libvirt-domain.h": "header with general libvirt API definitions",
26
  "libvirt-domain-snapshot.h": "header with general libvirt API definitions",
27
  "libvirt-event.h": "header with general libvirt API definitions",
28
  "libvirt-interface.h": "header with general libvirt API definitions",
29
  "libvirt-network.h": "header with general libvirt API definitions",
30
  "libvirt-nodedev.h": "header with general libvirt API definitions",
31
  "libvirt-nwfilter.h": "header with general libvirt API definitions",
32
  "libvirt-secret.h": "header with general libvirt API definitions",
33
  "libvirt-storage.h": "header with general libvirt API definitions",
34
  "libvirt-stream.h": "header with general libvirt API definitions",
35 36
  "virterror.h": "header with error specific API definitions",
  "libvirt.c": "Main interfaces for the libvirt library",
37
  "libvirt-domain.c": "Domain interfaces for the libvirt library",
38
  "libvirt-domain-snapshot.c": "Domain snapshot interfaces for the libvirt library",
39
  "libvirt-host.c": "Host interfaces for the libvirt library",
40
  "libvirt-interface.c": "Interface interfaces for the libvirt library",
41
  "libvirt-network.c": "Network interfaces for the libvirt library",
42
  "libvirt-nodedev.c": "Node device interfaces for the libvirt library",
43
  "libvirt-nwfilter.c": "NWFilter interfaces for the libvirt library",
44
  "libvirt-secret.c": "Secret interfaces for the libvirt library",
45
  "libvirt-storage.c": "Storage interfaces for the libvirt library",
46
  "libvirt-stream.c": "Stream interfaces for the libvirt library",
47
  "virerror.c": "implements error handling and reporting code for libvirt",
48
  "virevent.c": "event loop for monitoring file handles",
49
  "virtypedparam.c": "virTypedParameters APIs",
50 51
}

52 53 54 55 56
qemu_included_files = {
  "libvirt-qemu.h": "header with QEMU specific API definitions",
  "libvirt-qemu.c": "Implementations for the QEMU specific APIs",
}

57 58 59 60 61
lxc_included_files = {
  "libvirt-lxc.h": "header with LXC specific API definitions",
  "libvirt-lxc.c": "Implementations for the LXC specific APIs",
}

62 63
ignored_words = {
  "ATTRIBUTE_UNUSED": (0, "macro keyword"),
64
  "ATTRIBUTE_SENTINEL": (0, "macro keyword"),
65
  "VIR_DEPRECATED": (0, "macro keyword"),
66
  "VIR_EXPORT_VAR": (0, "macro keyword"),
67 68 69
  "WINAPI": (0, "Windows keyword"),
  "__declspec": (3, "Windows keyword"),
  "__stdcall": (0, "Windows keyword"),
70 71
}

D
Daniel Veillard 已提交
72
ignored_functions = {
73
  "virConnectSupportsFeature": "private function for remote access",
D
Daniel Veillard 已提交
74 75 76 77 78
  "virDomainMigrateFinish": "private function for migration",
  "virDomainMigrateFinish2": "private function for migration",
  "virDomainMigratePerform": "private function for migration",
  "virDomainMigratePrepare": "private function for migration",
  "virDomainMigratePrepare2": "private function for migration",
C
Chris Lalancette 已提交
79
  "virDomainMigratePrepareTunnel": "private function for tunnelled migration",
80 81 82 83 84 85
  "virDomainMigrateBegin3": "private function for migration",
  "virDomainMigrateFinish3": "private function for migration",
  "virDomainMigratePerform3": "private function for migration",
  "virDomainMigratePrepare3": "private function for migration",
  "virDomainMigrateConfirm3": "private function for migration",
  "virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
86
  "DllMain": "specific function for Win32",
87
  "virTypedParamsValidate": "internal function in virtypedparam.c",
88
  "virTypedParameterValidateSet": "internal function in virtypedparam.c",
89 90
  "virTypedParameterAssign": "internal function in virtypedparam.c",
  "virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
91
  "virTypedParameterToString": "internal function in virtypedparam.c",
92
  "virTypedParamsCheck": "internal function in virtypedparam.c",
93
  "virTypedParamsCopy": "internal function in virtypedparam.c",
94 95 96 97 98 99
  "virDomainMigrateBegin3Params": "private function for migration",
  "virDomainMigrateFinish3Params": "private function for migration",
  "virDomainMigratePerform3Params": "private function for migration",
  "virDomainMigratePrepare3Params": "private function for migration",
  "virDomainMigrateConfirm3Params": "private function for migration",
  "virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
D
Daniel Veillard 已提交
100 101
}

102 103 104 105 106 107
ignored_macros = {
  "_virSchedParameter": "backward compatibility macro for virTypedParameter",
  "_virBlkioParameter": "backward compatibility macro for virTypedParameter",
  "_virMemoryParameter": "backward compatibility macro for virTypedParameter",
}

108 109 110 111 112 113 114 115 116 117 118 119
def escape(raw):
    raw = string.replace(raw, '&', '&')
    raw = string.replace(raw, '<', '&lt;')
    raw = string.replace(raw, '>', '&gt;')
    raw = string.replace(raw, "'", '&apos;')
    raw = string.replace(raw, '"', '&quot;')
    return raw

def uniq(items):
    d = {}
    for item in items:
        d[item]=1
120 121 122
    k = d.keys()
    k.sort()
    return k
123 124 125 126 127

class identifier:
    def __init__(self, name, header=None, module=None, type=None, lineno = 0,
                 info=None, extra=None, conditionals = None):
        self.name = name
128 129 130 131 132 133 134
        self.header = header
        self.module = module
        self.type = type
        self.info = info
        self.extra = extra
        self.lineno = lineno
        self.static = 0
135
        if conditionals is None or len(conditionals) == 0:
136 137 138
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
139
        if self.name == debugsym and not quiet:
140 141
            print "=> define %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals))
142 143 144

    def __repr__(self):
        r = "%s %s:" % (self.type, self.name)
145 146
        if self.static:
            r = r + " static"
147
        if self.module is not None:
148
            r = r + " from %s" % (self.module)
149
        if self.info is not None:
150
            r = r + " " +  `self.info`
151
        if self.extra is not None:
152
            r = r + " " + `self.extra`
153
        if self.conditionals is not None:
154 155
            r = r + " " + `self.conditionals`
        return r
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172


    def set_header(self, header):
        self.header = header
    def set_module(self, module):
        self.module = module
    def set_type(self, type):
        self.type = type
    def set_info(self, info):
        self.info = info
    def set_extra(self, extra):
        self.extra = extra
    def set_lineno(self, lineno):
        self.lineno = lineno
    def set_static(self, static):
        self.static = static
    def set_conditionals(self, conditionals):
173
        if conditionals is None or len(conditionals) == 0:
174 175 176
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

    def get_name(self):
        return self.name
    def get_header(self):
        return self.module
    def get_module(self):
        return self.module
    def get_type(self):
        return self.type
    def get_info(self):
        return self.info
    def get_lineno(self):
        return self.lineno
    def get_extra(self):
        return self.extra
    def get_static(self):
        return self.static
    def get_conditionals(self):
        return self.conditionals

    def update(self, header, module, type = None, info = None, extra=None,
               conditionals=None):
199
        if self.name == debugsym and not quiet:
200 201
            print "=> update %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals))
202
        if header is not None and self.header is None:
203
            self.set_header(module)
204
        if module is not None and (self.module is None or self.header == self.module):
205
            self.set_module(module)
206
        if type is not None and self.type is None:
207
            self.set_type(type)
208
        if info is not None:
209
            self.set_info(info)
210
        if extra is not None:
211
            self.set_extra(extra)
212
        if conditionals is not None:
213
            self.set_conditionals(conditionals)
214 215 216 217 218 219

class index:
    def __init__(self, name = "noname"):
        self.name = name
        self.identifiers = {}
        self.functions = {}
220 221 222
        self.variables = {}
        self.includes = {}
        self.structs = {}
223
        self.unions = {}
224 225 226 227 228
        self.enums = {}
        self.typedefs = {}
        self.macros = {}
        self.references = {}
        self.info = {}
229 230 231

    def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
232
            return None
233 234
        d = None
        try:
235 236 237 238 239
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d
240

241
        if d is not None and static == 1:
242
            d.set_static(1)
243

244
        if d is not None and name is not None and type is not None:
245
            self.references[name] = d
246

247
        if name == debugsym and not quiet:
248
            print "New ref: %s" % (d)
249

250
        return d
251 252 253

    def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
254
            return None
255 256
        d = None
        try:
257 258 259 260 261 262
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d

263
        if d is not None and static == 1:
264 265
            d.set_static(1)

266
        if d is not None and name is not None and type is not None:
267 268 269 270 271 272 273 274 275 276
            if type == "function":
                self.functions[name] = d
            elif type == "functype":
                self.functions[name] = d
            elif type == "variable":
                self.variables[name] = d
            elif type == "include":
                self.includes[name] = d
            elif type == "struct":
                self.structs[name] = d
277 278
            elif type == "union":
                self.unions[name] = d
279 280 281 282 283 284 285
            elif type == "enum":
                self.enums[name] = d
            elif type == "typedef":
                self.typedefs[name] = d
            elif type == "macro":
                self.macros[name] = d
            else:
286
                self.warning("Unable to register type ", type)
287

288
        if name == debugsym and not quiet:
289 290 291
            print "New symbol: %s" % (d)

        return d
292 293 294 295 296 297 298

    def merge(self, idx):
        for id in idx.functions.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
299 300 301
             if self.macros.has_key(id):
                 del self.macros[id]
             if self.functions.has_key(id):
302 303
                 self.warning("function %s from %s redeclared in %s" % (
                    id, self.functions[id].header, idx.functions[id].header))
304 305 306
             else:
                 self.functions[id] = idx.functions[id]
                 self.identifiers[id] = idx.functions[id]
307 308 309 310 311
        for id in idx.variables.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
312 313 314
             if self.macros.has_key(id):
                 del self.macros[id]
             if self.variables.has_key(id):
315 316
                 self.warning("variable %s from %s redeclared in %s" % (
                    id, self.variables[id].header, idx.variables[id].header))
317 318 319
             else:
                 self.variables[id] = idx.variables[id]
                 self.identifiers[id] = idx.variables[id]
320
        for id in idx.structs.keys():
321
             if self.structs.has_key(id):
322 323
                 self.warning("struct %s from %s redeclared in %s" % (
                    id, self.structs[id].header, idx.structs[id].header))
324 325 326
             else:
                 self.structs[id] = idx.structs[id]
                 self.identifiers[id] = idx.structs[id]
327 328 329 330 331 332 333
        for id in idx.unions.keys():
             if self.unions.has_key(id):
                 print "union %s from %s redeclared in %s" % (
                    id, self.unions[id].header, idx.unions[id].header)
             else:
                 self.unions[id] = idx.unions[id]
                 self.identifiers[id] = idx.unions[id]
334
        for id in idx.typedefs.keys():
335
             if self.typedefs.has_key(id):
336 337
                 self.warning("typedef %s from %s redeclared in %s" % (
                    id, self.typedefs[id].header, idx.typedefs[id].header))
338 339 340
             else:
                 self.typedefs[id] = idx.typedefs[id]
                 self.identifiers[id] = idx.typedefs[id]
341 342 343 344 345 346 347 348 349 350 351
        for id in idx.macros.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
             if self.variables.has_key(id):
                 continue
             if self.functions.has_key(id):
                 continue
             if self.enums.has_key(id):
                 continue
352
             if self.macros.has_key(id):
353 354
                 self.warning("macro %s from %s redeclared in %s" % (
                    id, self.macros[id].header, idx.macros[id].header))
355 356 357
             else:
                 self.macros[id] = idx.macros[id]
                 self.identifiers[id] = idx.macros[id]
358
        for id in idx.enums.keys():
359
             if self.enums.has_key(id):
360 361
                 self.warning("enum %s from %s redeclared in %s" % (
                    id, self.enums[id].header, idx.enums[id].header))
362 363 364
             else:
                 self.enums[id] = idx.enums[id]
                 self.identifiers[id] = idx.enums[id]
365 366 367

    def merge_public(self, idx):
        for id in idx.functions.keys():
368 369 370 371
             if self.functions.has_key(id):
                 # check that function condition agrees with header
                 if idx.functions[id].conditionals != \
                    self.functions[id].conditionals:
372 373 374 375
                     self.warning("Header condition differs from Function for %s:" \
                                      % id)
                     self.warning("  H: %s" % self.functions[id].conditionals)
                     self.warning("  C: %s" % idx.functions[id].conditionals)
376 377 378 379 380 381
                 up = idx.functions[id]
                 self.functions[id].update(None, up.module, up.type, up.info, up.extra)
         #     else:
         #         print "Function %s from %s is not declared in headers" % (
         #              id, idx.functions[id].module)
         # TODO: do the same for variables.
382 383 384

    def analyze_dict(self, type, dict):
        count = 0
385
        public = 0
386
        for name in dict.keys():
387 388 389 390
            id = dict[name]
            count = count + 1
            if id.static == 0:
                public = public + 1
391
        if count != public:
392 393 394
            print "  %d %s , %d public" % (count, type, public)
        elif count != 0:
            print "  %d public %s" % (count, type)
395 396 397


    def analyze(self):
398 399 400 401 402 403 404
        if not quiet:
            self.analyze_dict("functions", self.functions)
            self.analyze_dict("variables", self.variables)
            self.analyze_dict("structs", self.structs)
            self.analyze_dict("unions", self.unions)
            self.analyze_dict("typedefs", self.typedefs)
            self.analyze_dict("macros", self.macros)
405

406 407 408 409 410
class CLexer:
    """A lexer for the C language, tokenize the input by reading and
       analyzing it line by line"""
    def __init__(self, input):
        self.input = input
411 412 413
        self.tokens = []
        self.line = ""
        self.lineno = 0
414 415 416

    def getline(self):
        line = ''
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
        while line == '':
            line = self.input.readline()
            if not line:
                return None
            self.lineno = self.lineno + 1
            line = string.lstrip(line)
            line = string.rstrip(line)
            if line == '':
                continue
            while line[-1] == '\\':
                line = line[:-1]
                n = self.input.readline()
                self.lineno = self.lineno + 1
                n = string.lstrip(n)
                n = string.rstrip(n)
                if not n:
                    break
                else:
                    line = line + n
436
        return line
437

438 439 440 441
    def getlineno(self):
        return self.lineno

    def push(self, token):
442
        self.tokens.insert(0, token)
443 444 445

    def debug(self):
        print "Last token: ", self.last
446 447
        print "Token queue: ", self.tokens
        print "Line %d end: " % (self.lineno), self.line
448 449 450

    def token(self):
        while self.tokens == []:
451 452 453 454 455
            if self.line == "":
                line = self.getline()
            else:
                line = self.line
                self.line = ""
456
            if line is None:
457 458 459 460 461
                return None

            if line[0] == '#':
                self.tokens = map((lambda x: ('preproc', x)),
                                  string.split(line))
462
                break
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
            l = len(line)
            if line[0] == '"' or line[0] == "'":
                end = line[0]
                line = line[1:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == end:
                            self.line = line[i+1:]
                            line = line[:i]
                            l = i
                            found = 1
                            break
                        if line[i] == '\\':
                            i = i + 1
                        i = i + 1
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
485
                        if line is None:
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
                            return None
                self.last = ('string', tok)
                return self.last

            if l >= 2 and line[0] == '/' and line[1] == '*':
                line = line[2:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == '*' and i+1 < l and line[i+1] == '/':
                            self.line = line[i+2:]
                            line = line[:i-1]
                            l = i
                            found = 1
                            break
                        i = i + 1
                    if tok != "":
                        tok = tok + "\n"
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
510
                        if line is None:
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
                            return None
                self.last = ('comment', tok)
                return self.last
            if l >= 2 and line[0] == '/' and line[1] == '/':
                line = line[2:]
                self.last = ('comment', line)
                return self.last
            i = 0
            while i < l:
                if line[i] == '/' and i+1 < l and line[i+1] == '/':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '/' and i+1 < l and line[i+1] == '*':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '"' or line[i] == "'":
                    self.line = line[i:]
                    line = line[:i]
                    break
                i = i + 1
            l = len(line)
            i = 0
            while i < l:
                if line[i] == ' ' or line[i] == '\t':
                    i = i + 1
                    continue
                o = ord(line[i])
                if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                   (o >= 48 and o <= 57):
                    s = i
                    while i < l:
                        o = ord(line[i])
                        if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                           (o >= 48 and o <= 57) or string.find(
                               " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
                            i = i + 1
                        else:
                            break
                    self.tokens.append(('name', line[s:i]))
                    continue
                if string.find("(){}:;,[]", line[i]) != -1:
554
#                 if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
555 556 557 558 559 560
#                   line[i] == '}' or line[i] == ':' or line[i] == ';' or \
#                   line[i] == ',' or line[i] == '[' or line[i] == ']':
                    self.tokens.append(('sep', line[i]))
                    i = i + 1
                    continue
                if string.find("+-*><=/%&!|.", line[i]) != -1:
561
#                 if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
#                   line[i] == '>' or line[i] == '<' or line[i] == '=' or \
#                   line[i] == '/' or line[i] == '%' or line[i] == '&' or \
#                   line[i] == '!' or line[i] == '|' or line[i] == '.':
                    if line[i] == '.' and  i + 2 < l and \
                       line[i+1] == '.' and line[i+2] == '.':
                        self.tokens.append(('name', '...'))
                        i = i + 3
                        continue

                    j = i + 1
                    if j < l and (
                       string.find("+-*><=/%&!|", line[j]) != -1):
#                       line[j] == '+' or line[j] == '-' or line[j] == '*' or \
#                       line[j] == '>' or line[j] == '<' or line[j] == '=' or \
#                       line[j] == '/' or line[j] == '%' or line[j] == '&' or \
#                       line[j] == '!' or line[j] == '|'):
                        self.tokens.append(('op', line[i:j+1]))
                        i = j + 1
                    else:
                        self.tokens.append(('op', line[i]))
                        i = i + 1
                    continue
                s = i
                while i < l:
                    o = ord(line[i])
                    if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                       (o >= 48 and o <= 57) or (
                        string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
#                        line[i] != ' ' and line[i] != '\t' and
#                        line[i] != '(' and line[i] != ')' and
#                        line[i] != '{'  and line[i] != '}' and
#                        line[i] != ':' and line[i] != ';' and
#                        line[i] != ',' and line[i] != '+' and
#                        line[i] != '-' and line[i] != '*' and
#                        line[i] != '/' and line[i] != '%' and
#                        line[i] != '&' and line[i] != '!' and
#                        line[i] != '|' and line[i] != '[' and
#                        line[i] != ']' and line[i] != '=' and
#                        line[i] != '*' and line[i] != '>' and
#                        line[i] != '<'):
                        i = i + 1
                    else:
                        break
                self.tokens.append(('name', line[s:i]))

        tok = self.tokens[0]
        self.tokens = self.tokens[1:]
        self.last = tok
        return tok
611

612 613 614 615
class CParser:
    """The C module parser"""
    def __init__(self, filename, idx = None):
        self.filename = filename
616 617 618 619
        if len(filename) > 2 and filename[-2:] == '.h':
            self.is_header = 1
        else:
            self.is_header = 0
620
        self.input = open(filename)
621
        self.lexer = CLexer(self.input)
622
        if idx is None:
623 624 625 626 627 628 629 630 631 632
            self.index = index()
        else:
            self.index = idx
        self.top_comment = ""
        self.last_comment = ""
        self.comment = None
        self.collect_ref = 0
        self.no_error = 0
        self.conditionals = []
        self.defines = []
633 634 635 636 637 638 639 640 641 642 643 644 645 646

    def collect_references(self):
        self.collect_ref = 1

    def stop_error(self):
        self.no_error = 1

    def start_error(self):
        self.no_error = 0

    def lineno(self):
        return self.lexer.getlineno()

    def index_add(self, name, module, static, type, info=None, extra = None):
647 648 649 650 651 652
        if self.is_header == 1:
            self.index.add(name, module, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
        else:
            self.index.add(name, None, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
653 654 655

    def index_add_ref(self, name, module, static, type, info=None,
                      extra = None):
656 657 658 659 660 661
        if self.is_header == 1:
            self.index.add_ref(name, module, module, static, type,
                               self.lineno(), info, extra, self.conditionals)
        else:
            self.index.add_ref(name, None, module, static, type, self.lineno(),
                               info, extra, self.conditionals)
662 663

    def warning(self, msg):
664 665
        global warnings
        warnings = warnings + 1
666
        if self.no_error:
667 668
            return
        print msg
669 670 671

    def error(self, msg, token=-1):
        if self.no_error:
672
            return
673 674

        print "Parse Error: " + msg
675 676 677 678
        if token != -1:
            print "Got token ", token
        self.lexer.debug()
        sys.exit(1)
679 680 681

    def debug(self, msg, token=-1):
        print "Debug: " + msg
682 683 684
        if token != -1:
            print "Got token ", token
        self.lexer.debug()
685 686

    def parseTopComment(self, comment):
687 688 689 690
        res = {}
        lines = string.split(comment, "\n")
        item = None
        for line in lines:
C
Claudio Bley 已提交
691
            line = line.lstrip().lstrip('*').lstrip()
692 693 694 695 696 697 698

            m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
            if m:
                item = m.group(1)
                line = m.group(2).lstrip()

            if item:
699 700 701 702 703
                if res.has_key(item):
                    res[item] = res[item] + " " + line
                else:
                    res[item] = line
        self.index.info = res
704

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
    def strip_lead_star(self, line):
        l = len(line)
        i = 0
        while i < l:
            if line[i] == ' ' or line[i] == '\t':
                i += 1
            elif line[i] == '*':
                return line[:i] + line[i + 1:]
            else:
                 return line
        return line

    def cleanupComment(self):
        if type(self.comment) != type(""):
            return
        # remove the leading * on multi-line comments
        lines = self.comment.splitlines(True)
        com = ""
        for line in lines:
            com = com + self.strip_lead_star(line)
        self.comment = com.strip()

727
    def parseComment(self, token):
728
        com = token[1]
729
        if self.top_comment == "":
730
            self.top_comment = com
731
        if self.comment is None or com[0] == '*':
732
            self.comment = com
733
        else:
734
            self.comment = self.comment + com
735
        token = self.lexer.token()
736 737

        if string.find(self.comment, "DOC_DISABLE") != -1:
738
            self.stop_error()
739 740

        if string.find(self.comment, "DOC_ENABLE") != -1:
741
            self.start_error()
742

743
        return token
744 745 746 747 748 749

    #
    # Parse a comment block associate to a typedef
    #
    def parseTypeComment(self, name, quiet = 0):
        if name[0:2] == '__':
750
            quiet = 1
751 752

        args = []
753
        desc = ""
754

755
        if self.comment is None:
756 757 758
            if not quiet:
                self.warning("Missing comment for type %s" % (name))
            return((args, desc))
759
        if self.comment[0] != '*':
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
            if not quiet:
                self.warning("Missing * in type comment for %s" % (name))
            return((args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted type comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
            l = string.strip(l)
            desc = desc + " " + l
            del lines[0]

        desc = string.strip(desc)

        if quiet == 0:
            if desc == "":
                self.warning("Type comment for %s lack description of the macro" % (name))

        return(desc)
790 791 792 793
    #
    # Parse a comment block associate to a macro
    #
    def parseMacroComment(self, name, quiet = 0):
794 795
        global ignored_macros

796
        if name[0:2] == '__':
797
            quiet = 1
798 799
        if ignored_macros.has_key(name):
            quiet = 1
800 801

        args = []
802
        desc = ""
803

804
        if self.comment is None:
805 806 807
            if not quiet:
                self.warning("Missing comment for macro %s" % (name))
            return((args, desc))
808
        if self.comment[0] != '*':
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
            if not quiet:
                self.warning("Missing * in macro comment for %s" % (name))
            return((args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted macro comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
                (arg, desc) = string.split(l, ':', 1)
                desc=string.strip(desc)
                arg=string.strip(arg)
829
            except:
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
                if not quiet:
                    self.warning("Misformatted macro comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
            l = string.strip(lines[0])
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
                desc = desc + ' ' + string.strip(l)
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
845
            args.append((arg, desc))
846 847 848 849 850 851 852 853 854 855
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
            l = string.strip(l)
            desc = desc + " " + l
            del lines[0]
856

857
        desc = string.strip(desc)
858

859 860 861
        if quiet == 0:
            if desc == "":
                self.warning("Macro comment for %s lack description of the macro" % (name))
862

863
        return((args, desc))
864 865

     #
866
     # Parse a comment block and merge the information found in the
867 868 869 870
     # parameters descriptions, finally returns a block as complete
     # as possible
     #
    def mergeFunctionComment(self, name, description, quiet = 0):
D
Daniel Veillard 已提交
871 872
        global ignored_functions

873
        if name == 'main':
874
            quiet = 1
875
        if name[0:2] == '__':
876
            quiet = 1
D
Daniel Veillard 已提交
877 878
        if ignored_functions.has_key(name):
            quiet = 1
879

880 881 882
        (ret, args) = description
        desc = ""
        retdesc = ""
883

884
        if self.comment is None:
885 886 887
            if not quiet:
                self.warning("Missing comment for function %s" % (name))
            return(((ret[0], retdesc), args, desc))
888
        if self.comment[0] != '*':
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
            if not quiet:
                self.warning("Missing * in function comment for %s" % (name))
            return(((ret[0], retdesc), args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted function comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return(((ret[0], retdesc), args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        nbargs = len(args)
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
                (arg, desc) = string.split(l, ':', 1)
                desc=string.strip(desc)
                arg=string.strip(arg)
910
            except:
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
                if not quiet:
                    self.warning("Misformatted function comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
            l = string.strip(lines[0])
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
                desc = desc + ' ' + string.strip(l)
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
            i = 0
            while i < nbargs:
                if args[i][1] == arg:
                    args[i] = (args[i][0], arg, desc)
930
                    break
931 932 933 934 935 936 937 938 939 940 941 942
                i = i + 1
            if i >= nbargs:
                if not quiet:
                    self.warning("Unable to find arg %s from function comment for %s" % (
                       arg, name))
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = None
        while len(lines) > 0:
            l = lines[0]
            i = 0
            # Remove all leading '*', followed by at most one ' ' character
943
            # since we need to preserve correct indentation of code examples
944 945 946 947 948 949
            while i < len(l) and l[i] == '*':
                i = i + 1
            if i > 0:
                if i < len(l) and l[i] == ' ':
                    i = i + 1
                l = l[i:]
950
            if len(l) >= 6 and l[0:7] == "Returns":
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
                try:
                    l = string.split(l, ' ', 1)[1]
                except:
                    l = ""
                retdesc = string.strip(l)
                del lines[0]
                while len(lines) > 0:
                    l = lines[0]
                    while len(l) > 0 and l[0] == '*':
                        l = l[1:]
                    l = string.strip(l)
                    retdesc = retdesc + " " + l
                    del lines[0]
            else:
                if desc is not None:
                    desc = desc + "\n" + l
                else:
                    desc = l
                del lines[0]

        if desc is None:
            desc = ""
        retdesc = string.strip(retdesc)
        desc = string.strip(desc)

        if quiet == 0:
             #
             # report missing comments
             #
            i = 0
            while i < nbargs:
982
                if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
983 984 985 986 987 988 989 990 991
                    self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
                i = i + 1
            if retdesc == "" and ret[0] != "void":
                self.warning("Function comment for %s lacks description of return value" % (name))
            if desc == "":
                self.warning("Function comment for %s lacks description of the function" % (name))


        return(((ret[0], retdesc), args, desc))
992 993

    def parsePreproc(self, token):
994 995
        if debug:
            print "=> preproc ", token, self.lexer.tokens
996
        name = token[1]
997 998
        if name == "#include":
            token = self.lexer.token()
999
            if token is None:
1000 1001 1002 1003 1004 1005 1006 1007
                return None
            if token[0] == 'preproc':
                self.index_add(token[1], self.filename, not self.is_header,
                                "include")
                return self.lexer.token()
            return token
        if name == "#define":
            token = self.lexer.token()
1008
            if token is None:
1009 1010 1011 1012 1013 1014
                return None
            if token[0] == 'preproc':
                 # TODO macros with arguments
                name = token[1]
                lst = []
                token = self.lexer.token()
1015
                while token is not None and token[0] == 'preproc' and \
1016 1017 1018
                      token[1][0] != '#':
                    lst.append(token[1])
                    token = self.lexer.token()
1019
                try:
1020
                    name = string.split(name, '(') [0]
1021 1022 1023
                except:
                    pass
                info = self.parseMacroComment(name, not self.is_header)
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
                self.index_add(name, self.filename, not self.is_header,
                                "macro", info)
                return token

        #
        # Processing of conditionals modified by Bill 1/1/05
        #
        # We process conditionals (i.e. tokens from #ifdef, #ifndef,
        # #if, #else and #endif) for headers and mainline code,
        # store the ones from the header in libxml2-api.xml, and later
        # (in the routine merge_public) verify that the two (header and
        # mainline code) agree.
        #
        # There is a small problem with processing the headers. Some of
        # the variables are not concerned with enabling / disabling of
        # library functions (e.g. '__XML_PARSER_H__'), and we don't want
        # them to be included in libxml2-api.xml, or involved in
        # the check between the header and the mainline code.  To
        # accomplish this, we ignore any conditional which doesn't include
        # the string 'ENABLED'
        #
        if name == "#ifdef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append("defined(%s)" % apstr)
            except:
                pass
        elif name == "#ifndef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append("!defined(%s)" % apstr)
            except:
                pass
        elif name == "#if":
            apstr = ""
            for tok in self.lexer.tokens:
                if apstr != "":
                    apstr = apstr + " "
                apstr = apstr + tok[1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append(apstr)
            except:
                pass
        elif name == "#else":
            if self.conditionals != [] and \
               string.find(self.defines[-1], 'ENABLED') != -1:
                self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
        elif name == "#endif":
            if self.conditionals != [] and \
               string.find(self.defines[-1], 'ENABLED') != -1:
                self.conditionals = self.conditionals[:-1]
            self.defines = self.defines[:-1]
        token = self.lexer.token()
1083
        while token is not None and token[0] == 'preproc' and \
1084 1085 1086
            token[1][0] != '#':
            token = self.lexer.token()
        return token
1087 1088 1089 1090 1091 1092

     #
     # token acquisition on top of the lexer, it handle internally
     # preprocessor and comments since they are logically not part of
     # the program structure.
     #
1093 1094 1095
    def push(self, tok):
        self.lexer.push(tok)

1096 1097 1098 1099
    def token(self):
        global ignored_words

        token = self.lexer.token()
1100
        while token is not None:
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
            if token[0] == 'comment':
                token = self.parseComment(token)
                continue
            elif token[0] == 'preproc':
                token = self.parsePreproc(token)
                continue
            elif token[0] == "name" and token[1] == "__const":
                token = ("name", "const")
                return token
            elif token[0] == "name" and token[1] == "__attribute":
                token = self.lexer.token()
1112
                while token is not None and token[1] != ";":
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
                    token = self.lexer.token()
                return token
            elif token[0] == "name" and ignored_words.has_key(token[1]):
                (n, info) = ignored_words[token[1]]
                i = 0
                while i < n:
                    token = self.lexer.token()
                    i = i + 1
                token = self.lexer.token()
                continue
            else:
                if debug:
                    print "=> ", token
                return token
        return None
1128 1129 1130 1131 1132

     #
     # Parse a typedef, it records the type and its name.
     #
    def parseTypedef(self, token):
1133
        if token is None:
1134 1135
            return None
        token = self.parseType(token)
1136
        if token is None:
1137 1138 1139 1140 1141
            self.error("parsing typedef")
            return None
        base_type = self.type
        type = base_type
         #self.debug("end typedef type", token)
1142
        while token is not None:
1143 1144 1145
            if token[0] == "name":
                name = token[1]
                signature = self.signature
1146
                if signature is not None:
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
                    type = string.split(type, '(')[0]
                    d = self.mergeFunctionComment(name,
                            ((type, None), signature), 1)
                    self.index_add(name, self.filename, not self.is_header,
                                    "functype", d)
                else:
                    if base_type == "struct":
                        self.index_add(name, self.filename, not self.is_header,
                                        "struct", type)
                        base_type = "struct " + name
                    else:
                        # TODO report missing or misformatted comments
                        info = self.parseTypeComment(name, 1)
                        self.index_add(name, self.filename, not self.is_header,
                                    "typedef", type, info)
                token = self.token()
            else:
                self.error("parsing typedef: expecting a name")
                return token
             #self.debug("end typedef", token)
1167
            if token is not None and token[0] == 'sep' and token[1] == ',':
1168 1169
                type = base_type
                token = self.token()
1170
                while token is not None and token[0] == "op":
1171 1172
                    type = type + token[1]
                    token = self.token()
1173
            elif token is not None and token[0] == 'sep' and token[1] == ';':
1174
                break
1175
            elif token is not None and token[0] == 'name':
1176
                type = base_type
1177
                continue
1178 1179 1180 1181 1182
            else:
                self.error("parsing typedef: expecting ';'", token)
                return token
        token = self.token()
        return token
1183

1184 1185 1186 1187 1188
     #
     # Parse a C code block, used for functions it parse till
     # the balancing } included
     #
    def parseBlock(self, token):
1189
        while token is not None:
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.comment = None
                token = self.token()
                return token
            else:
                if self.collect_ref == 1:
                    oldtok = token
                    token = self.token()
                    if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
                        if token[0] == "sep" and token[1] == "(":
                            self.index_add_ref(oldtok[1], self.filename,
                                                0, "function")
                            token = self.token()
                        elif token[0] == "name":
                            token = self.token()
                            if token[0] == "sep" and (token[1] == ";" or
                               token[1] == "," or token[1] == "="):
                                self.index_add_ref(oldtok[1], self.filename,
                                                    0, "type")
                    elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")
                    elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")

                else:
                    token = self.token()
        return token
1222 1223 1224 1225 1226 1227

     #
     # Parse a C struct definition till the balancing }
     #
    def parseStruct(self, token):
        fields = []
1228
         #self.debug("start parseStruct", token)
1229
        while token is not None:
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.struct_fields = fields
                 #self.debug("end parseStruct", token)
                 #print fields
                token = self.token()
                return token
            else:
                base_type = self.type
                 #self.debug("before parseType", token)
                token = self.parseType(token)
                 #self.debug("after parseType", token)
1244
                if token is not None and token[0] == "name":
1245 1246 1247 1248 1249
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
1250 1251 1252 1253 1254 1255 1256
                        self.cleanupComment()
                        if self.type == "union":
                            fields.append((self.type, fname, self.comment,
                                           self.union_fields))
                            self.union_fields = []
                        else:
                            fields.append((self.type, fname, self.comment))
1257 1258 1259
                        self.comment = None
                    else:
                        self.error("parseStruct: expecting ;", token)
1260
                elif token is not None and token[0] == "sep" and token[1] == "{":
1261 1262
                    token = self.token()
                    token = self.parseTypeBlock(token)
1263
                    if token is not None and token[0] == "name":
1264
                        token = self.token()
1265
                    if token is not None and token[0] == "sep" and token[1] == ";":
1266 1267 1268 1269 1270 1271
                        token = self.token()
                    else:
                        self.error("parseStruct: expecting ;", token)
                else:
                    self.error("parseStruct: name", token)
                    token = self.token()
1272
                self.type = base_type
1273
        self.struct_fields = fields
1274 1275 1276
         #self.debug("end parseStruct", token)
         #print fields
        return token
1277

1278 1279 1280 1281 1282 1283
     #
     # Parse a C union definition till the balancing }
     #
    def parseUnion(self, token):
        fields = []
        # self.debug("start parseUnion", token)
1284
        while token is not None:
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.union_fields = fields
                # self.debug("end parseUnion", token)
                # print fields
                token = self.token()
                return token
            else:
                base_type = self.type
                # self.debug("before parseType", token)
                token = self.parseType(token)
                # self.debug("after parseType", token)
1299
                if token is not None and token[0] == "name":
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
                        self.cleanupComment()
                        fields.append((self.type, fname, self.comment))
                        self.comment = None
                    else:
                        self.error("parseUnion: expecting ;", token)
1310
                elif token is not None and token[0] == "sep" and token[1] == "{":
1311 1312
                    token = self.token()
                    token = self.parseTypeBlock(token)
1313
                    if token is not None and token[0] == "name":
1314
                        token = self.token()
1315
                    if token is not None and token[0] == "sep" and token[1] == ";":
1316 1317 1318 1319 1320 1321
                        token = self.token()
                    else:
                        self.error("parseUnion: expecting ;", token)
                else:
                    self.error("parseUnion: name", token)
                    token = self.token()
1322
                self.type = base_type
1323 1324 1325 1326 1327
        self.union_fields = fields
        # self.debug("end parseUnion", token)
        # print fields
        return token

1328 1329 1330 1331 1332
     #
     # Parse a C enum block, parse till the balancing }
     #
    def parseEnumBlock(self, token):
        self.enums = []
1333 1334 1335
        name = None
        self.comment = None
        comment = ""
E
Eric Blake 已提交
1336
        value = "-1"
1337
        while token is not None:
1338 1339 1340 1341
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
1342
                if name is not None:
1343
                    self.cleanupComment()
1344
                    if self.comment is not None:
1345 1346 1347 1348 1349 1350
                        comment = self.comment
                        self.comment = None
                    self.enums.append((name, value, comment))
                token = self.token()
                return token
            elif token[0] == "name":
1351
                    self.cleanupComment()
1352 1353
                    if name is not None:
                        if self.comment is not None:
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
                            comment = string.strip(self.comment)
                            self.comment = None
                        self.enums.append((name, value, comment))
                    name = token[1]
                    comment = ""
                    token = self.token()
                    if token[0] == "op" and token[1][0] == "=":
                        value = ""
                        if len(token[1]) > 1:
                            value = token[1][1:]
                        token = self.token()
                        while token[0] != "sep" or (token[1] != ',' and
                              token[1] != '}'):
                            value = value + token[1]
                            token = self.token()
                    else:
                        try:
                            value = "%d" % (int(value) + 1)
                        except:
                            self.warning("Failed to compute value of enum %s" % (name))
                            value=""
                    if token[0] == "sep" and token[1] == ",":
                        token = self.token()
            else:
                token = self.token()
        return token
1380

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
    def parseVirEnumDecl(self, token):
        if token[0] != "name":
            self.error("parsing VIR_ENUM_DECL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

    def parseVirEnumImpl(self, token):
        # First the type name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
        token = self.token()

        # Now the sentinel name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        token = self.token()

        # Now a list of strings (optional comments)
        while token is not None:
            isGettext = False
            # First a string, optionally with N_(...)
            if token[0] == 'name':
                if token[1] != 'N_':
                    self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
                token = self.token()
                if token[0] != "sep" or token[1] != '(':
                    self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
                token = self.token()
                isGettext = True

                if token[0] != "string":
                    self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
                token = self.token()
            elif token[0] == "string":
                token = self.token()
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting a string", token)

            # Then a separator
            if token[0] == "sep":
                if isGettext and token[1] == ')':
                    token = self.token()

                if token[1] == ',':
                    token = self.token()

                if token[1] == ')':
                    token = self.token()
                    break

            # Then an optional comment
            if token[0] == "comment":
                token = self.token()


        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
    def parseVirLogInit(self, token):
        if token[0] != "string":
            self.error("parsing VIR_LOG_INIT: expecting string", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1488
     #
1489
     # Parse a C definition block, used for structs or unions it parse till
1490 1491 1492
     # the balancing }
     #
    def parseTypeBlock(self, token):
1493
        while token is not None:
1494 1495 1496 1497 1498 1499 1500 1501 1502
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                token = self.token()
                return token
            else:
                token = self.token()
        return token
1503 1504 1505 1506 1507 1508 1509 1510

     #
     # Parse a type: the fact that the type name can either occur after
     #    the definition or within the definition makes it a little harder
     #    if inside, the name token is pushed back before returning
     #
    def parseType(self, token):
        self.type = ""
1511
        self.struct_fields = []
1512
        self.union_fields = []
1513
        self.signature = None
1514
        if token is None:
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
            return token

        while token[0] == "name" and (
              token[1] == "const" or \
              token[1] == "unsigned" or \
              token[1] == "signed"):
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
1526

1527
        if token[0] == "name" and token[1] == "long":
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]

            # some read ahead for long long
            oldtmp = token
            token = self.token()
            if token[0] == "name" and token[1] == "long":
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp

1542 1543
            oldtmp = token
            token = self.token()
1544
            if token[0] == "name" and token[1] == "int":
1545 1546 1547 1548
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp
1549 1550

        elif token[0] == "name" and token[1] == "short":
1551 1552 1553 1554
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
1555

1556
        elif token[0] == "name" and token[1] == "struct":
1557 1558 1559 1560 1561 1562 1563 1564 1565
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1566
            if token is not None and token[0] == "sep" and token[1] == "{":
1567 1568
                token = self.token()
                token = self.parseStruct(token)
1569
            elif token is not None and token[0] == "op" and token[1] == "*":
1570 1571
                self.type = self.type + " " + nametok[1] + " *"
                token = self.token()
1572
                while token is not None and token[0] == "op" and token[1] == "*":
1573 1574 1575 1576 1577 1578 1579 1580
                    self.type = self.type + " *"
                    token = self.token()
                if token[0] == "name":
                    nametok = token
                    token = self.token()
                else:
                    self.error("struct : expecting name", token)
                    return token
1581
            elif token is not None and token[0] == "name" and nametok is not None:
1582 1583 1584
                self.type = self.type + " " + nametok[1]
                return token

1585
            if nametok is not None:
1586 1587 1588
                self.lexer.push(token)
                token = nametok
            return token
1589

1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
        elif token[0] == "name" and token[1] == "union":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1600
            if token is not None and token[0] == "sep" and token[1] == "{":
1601 1602
                token = self.token()
                token = self.parseUnion(token)
1603
            elif token is not None and token[0] == "name" and nametok is not None:
1604 1605 1606
                self.type = self.type + " " + nametok[1]
                return token

1607
            if nametok is not None:
1608 1609 1610 1611
                self.lexer.push(token)
                token = nametok
            return token

1612
        elif token[0] == "name" and token[1] == "enum":
1613 1614 1615 1616 1617 1618
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            self.enums = []
            token = self.token()
1619
            if token is not None and token[0] == "sep" and token[1] == "{":
1620 1621 1622 1623 1624
                token = self.token()
                token = self.parseEnumBlock(token)
            else:
                self.error("parsing enum: expecting '{'", token)
            enum_type = None
1625
            if token is not None and token[0] != "name":
1626 1627 1628 1629 1630 1631 1632 1633 1634
                self.lexer.push(token)
                token = ("name", "enum")
            else:
                enum_type = token[1]
            for enum in self.enums:
                self.index_add(enum[0], self.filename,
                               not self.is_header, "enum",
                               (enum[1], enum[2], enum_type))
            return token
1635 1636
        elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
            token = self.token()
1637
            if token is not None and token[0] == "sep" and token[1] == "(":
1638 1639 1640 1641
                token = self.token()
                token = self.parseVirEnumDecl(token)
            else:
                self.error("parsing VIR_ENUM_DECL: expecting '('", token)
1642
            if token is not None:
1643 1644 1645 1646 1647 1648
                self.lexer.push(token)
                token = ("name", "virenumdecl")
            return token

        elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
            token = self.token()
1649
            if token is not None and token[0] == "sep" and token[1] == "(":
1650 1651 1652 1653
                token = self.token()
                token = self.parseVirEnumImpl(token)
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
1654
            if token is not None:
1655 1656 1657
                self.lexer.push(token)
                token = ("name", "virenumimpl")
            return token
1658

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
        elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
            token = self.token()
            if token is not None and token[0] == "sep" and token[1] == "(":
                token = self.token()
                token = self.parseVirLogInit(token)
            else:
                self.error("parsing VIR_LOG_INIT: expecting '('", token)
            if token is not None:
                self.lexer.push(token)
                token = ("name", "virloginit")
            return token

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
        elif token[0] == "name":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
        else:
            self.error("parsing type %s: expecting a name" % (self.type),
                       token)
            return token
        token = self.token()
1681
        while token is not None and (token[0] == "op" or
1682 1683 1684
              token[0] == "name" and token[1] == "const"):
            self.type = self.type + " " + token[1]
            token = self.token()
1685 1686

         #
1687 1688
         # if there is a parenthesis here, this means a function type
         #
1689
        if token is not None and token[0] == "sep" and token[1] == '(':
1690 1691
            self.type = self.type + token[1]
            token = self.token()
1692
            while token is not None and token[0] == "op" and token[1] == '*':
1693 1694
                self.type = self.type + token[1]
                token = self.token()
1695
            if token is None or token[0] != "name" :
1696
                self.error("parsing function type, name expected", token)
1697 1698 1699 1700
                return token
            self.type = self.type + token[1]
            nametok = token
            token = self.token()
1701
            if token is not None and token[0] == "sep" and token[1] == ')':
1702 1703
                self.type = self.type + token[1]
                token = self.token()
1704
                if token is not None and token[0] == "sep" and token[1] == '(':
1705
                    token = self.token()
1706 1707 1708
                    type = self.type
                    token = self.parseSignature(token)
                    self.type = type
1709
                else:
1710
                    self.error("parsing function type, '(' expected", token)
1711 1712
                    return token
            else:
1713
                self.error("parsing function type, ')' expected", token)
1714 1715 1716 1717 1718 1719 1720 1721
                return token
            self.lexer.push(token)
            token = nametok
            return token

         #
         # do some lookahead for arrays
         #
1722
        if token is not None and token[0] == "name":
1723 1724
            nametok = token
            token = self.token()
1725
            if token is not None and token[0] == "sep" and token[1] == '[':
1726
                self.type = self.type + " " + nametok[1]
1727
                while token is not None and token[0] == "sep" and token[1] == '[':
1728 1729
                    self.type = self.type + token[1]
                    token = self.token()
1730
                    while token is not None and token[0] != 'sep' and \
1731 1732 1733
                          token[1] != ']' and token[1] != ';':
                        self.type = self.type + token[1]
                        token = self.token()
1734
                if token is not None and token[0] == 'sep' and token[1] == ']':
1735 1736 1737
                    self.type = self.type + token[1]
                    token = self.token()
                else:
1738
                    self.error("parsing array type, ']' expected", token)
1739
                    return token
1740
            elif token is not None and token[0] == "sep" and token[1] == ':':
1741 1742 1743 1744 1745 1746 1747
                 # remove :12 in case it's a limited int size
                token = self.token()
                token = self.token()
            self.lexer.push(token)
            token = nametok

        return token
1748 1749 1750 1751 1752 1753

     #
     # Parse a signature: '(' has been parsed and we scan the type definition
     #    up to the ')' included
    def parseSignature(self, token):
        signature = []
1754
        if token is not None and token[0] == "sep" and token[1] == ')':
1755 1756 1757
            self.signature = []
            token = self.token()
            return token
1758
        while token is not None:
1759
            token = self.parseType(token)
1760
            if token is not None and token[0] == "name":
1761 1762
                signature.append((self.type, token[1], None))
                token = self.token()
1763
            elif token is not None and token[0] == "sep" and token[1] == ',':
1764 1765
                token = self.token()
                continue
1766
            elif token is not None and token[0] == "sep" and token[1] == ')':
1767 1768 1769 1770 1771
                 # only the type was provided
                if self.type == "...":
                    signature.append((self.type, "...", None))
                else:
                    signature.append((self.type, None, None))
1772
            if token is not None and token[0] == "sep":
1773 1774 1775 1776 1777 1778 1779 1780
                if token[1] == ',':
                    token = self.token()
                    continue
                elif token[1] == ')':
                    token = self.token()
                    break
        self.signature = signature
        return token
1781

1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
    # this dict contains the functions that are allowed to use [unsigned]
    # long for legacy reasons in their signature and return type. this list is
    # fixed. new procedures and public APIs have to use [unsigned] long long
    long_legacy_functions = \
      { "virGetVersion"                  : (False, ("libVer", "typeVer")),
        "virConnectGetLibVersion"        : (False, ("libVer")),
        "virConnectGetVersion"           : (False, ("hvVer")),
        "virDomainGetMaxMemory"          : (True,  ()),
        "virDomainMigrate"               : (False, ("flags", "bandwidth")),
        "virDomainMigrate2"              : (False, ("flags", "bandwidth")),
        "virDomainMigrateBegin3"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateConfirm3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateDirect"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateFinish"         : (False, ("flags")),
        "virDomainMigrateFinish2"        : (False, ("flags")),
        "virDomainMigrateFinish3"        : (False, ("flags")),
        "virDomainMigratePeer2Peer"      : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare2"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel"  : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel3" : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI"          : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI2"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion1"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion2"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateSetMaxSpeed"    : (False, ("bandwidth")),
        "virDomainSetMaxMemory"          : (False, ("memory")),
        "virDomainSetMemory"             : (False, ("memory")),
1814
        "virDomainSetMemoryFlags"        : (False, ("memory")),
E
Eric Blake 已提交
1815
        "virDomainBlockCommit"           : (False, ("bandwidth")),
1816
        "virDomainBlockJobSetSpeed"      : (False, ("bandwidth")),
1817
        "virDomainBlockPull"             : (False, ("bandwidth")),
1818
        "virDomainBlockRebase"           : (False, ("bandwidth")),
1819
        "virDomainMigrateGetMaxSpeed"    : (False, ("bandwidth")) }
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844

    def checkLongLegacyFunction(self, name, return_type, signature):
        if "long" in return_type and "long long" not in return_type:
            try:
                if not CParser.long_legacy_functions[name][0]:
                    raise Exception()
            except:
                self.error(("function '%s' is not allowed to return long, "
                            "use long long instead") % (name))

        for param in signature:
            if "long" in param[0] and "long long" not in param[0]:
                try:
                    if param[1] not in CParser.long_legacy_functions[name][1]:
                        raise Exception()
                except:
                    self.error(("function '%s' is not allowed to take long "
                                "parameter '%s', use long long instead")
                               % (name, param[1]))

    # this dict contains the structs that are allowed to use [unsigned]
    # long for legacy reasons. this list is fixed. new structs have to use
    # [unsigned] long long
    long_legacy_struct_fields = \
      { "_virDomainInfo"                 : ("maxMem", "memory"),
1845 1846
        "_virNodeInfo"                   : ("memory"),
        "_virDomainBlockJobInfo"         : ("bandwidth") }
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858

    def checkLongLegacyStruct(self, name, fields):
        for field in fields:
            if "long" in field[0] and "long long" not in field[0]:
                try:
                    if field[1] not in CParser.long_legacy_struct_fields[name]:
                        raise Exception()
                except:
                    self.error(("struct '%s' is not allowed to contain long "
                                "field '%s', use long long instead") \
                               % (name, field[1]))

1859 1860 1861 1862 1863 1864 1865
     #
     # Parse a global definition, be it a type, variable or function
     # the extern "C" blocks are a bit nasty and require it to recurse.
     #
    def parseGlobal(self, token):
        static = 0
        if token[1] == 'extern':
1866
            token = self.token()
1867
            if token is None:
1868 1869 1870 1871
                return token
            if token[0] == 'string':
                if token[1] == 'C':
                    token = self.token()
1872
                    if token is None:
1873 1874 1875 1876
                        return token
                    if token[0] == 'sep' and token[1] == "{":
                        token = self.token()
#                        print 'Entering extern "C line ', self.lineno()
1877
                        while token is not None and (token[0] != 'sep' or
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
                              token[1] != "}"):
                            if token[0] == 'name':
                                token = self.parseGlobal(token)
                            else:
                                self.error(
                                 "token %s %s unexpected at the top level" % (
                                        token[0], token[1]))
                                token = self.parseGlobal(token)
#                        print 'Exiting extern "C" line', self.lineno()
                        token = self.token()
                        return token
                else:
                    return token
        elif token[1] == 'static':
            static = 1
            token = self.token()
1894
            if token is None or  token[0] != 'name':
1895 1896 1897 1898 1899 1900 1901 1902
                return token

        if token[1] == 'typedef':
            token = self.token()
            return self.parseTypedef(token)
        else:
            token = self.parseType(token)
            type_orig = self.type
1903
        if token is None or token[0] != "name":
1904 1905 1906 1907
            return token
        type = type_orig
        self.name = token[1]
        token = self.token()
1908
        while token is not None and (token[0] == "sep" or token[0] == "op"):
1909 1910 1911 1912
            if token[0] == "sep":
                if token[1] == "[":
                    type = type + token[1]
                    token = self.token()
1913
                    while token is not None and (token[0] != "sep" or \
1914 1915 1916 1917
                          token[1] != ";"):
                        type = type + token[1]
                        token = self.token()

1918
            if token is not None and token[0] == "op" and token[1] == "=":
1919 1920 1921 1922 1923 1924 1925 1926 1927
                 #
                 # Skip the initialization of the variable
                 #
                token = self.token()
                if token[0] == 'sep' and token[1] == '{':
                    token = self.token()
                    token = self.parseBlock(token)
                else:
                    self.comment = None
1928
                    while token is not None and (token[0] != "sep" or \
1929 1930 1931
                          (token[1] != ';' and token[1] != ',')):
                            token = self.token()
                self.comment = None
1932
                if token is None or token[0] != "sep" or (token[1] != ';' and
1933 1934 1935
                   token[1] != ','):
                    self.error("missing ';' or ',' after value")

1936
            if token is not None and token[0] == "sep":
1937 1938 1939 1940
                if token[1] == ";":
                    self.comment = None
                    token = self.token()
                    if type == "struct":
1941
                        self.checkLongLegacyStruct(self.name, self.struct_fields)
1942 1943 1944 1945 1946 1947 1948 1949 1950
                        self.index_add(self.name, self.filename,
                             not self.is_header, "struct", self.struct_fields)
                    else:
                        self.index_add(self.name, self.filename,
                             not self.is_header, "variable", type)
                    break
                elif token[1] == "(":
                    token = self.token()
                    token = self.parseSignature(token)
1951
                    if token is None:
1952 1953
                        return None
                    if token[0] == "sep" and token[1] == ";":
1954
                        self.checkLongLegacyFunction(self.name, type, self.signature)
1955 1956 1957 1958 1959 1960
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), 1)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
                    elif token[0] == "sep" and token[1] == "{":
1961
                        self.checkLongLegacyFunction(self.name, type, self.signature)
1962 1963 1964 1965 1966
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), static)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
1967
                        token = self.parseBlock(token)
1968 1969 1970 1971 1972 1973
                elif token[1] == ',':
                    self.comment = None
                    self.index_add(self.name, self.filename, static,
                                    "variable", type)
                    type = type_orig
                    token = self.token()
1974
                    while token is not None and token[0] == "sep":
1975 1976
                        type = type + token[1]
                        token = self.token()
1977
                    if token is not None and token[0] == "name":
1978 1979 1980 1981 1982 1983
                        self.name = token[1]
                        token = self.token()
                else:
                    break

        return token
1984 1985

    def parse(self):
1986 1987
        if not quiet:
            print "Parsing %s" % (self.filename)
1988
        token = self.token()
1989
        while token is not None:
1990
            if token[0] == 'name':
1991
                token = self.parseGlobal(token)
1992
            else:
1993 1994 1995 1996 1997
                self.error("token %s %s unexpected at the top level" % (
                       token[0], token[1]))
                token = self.parseGlobal(token)
                return
        self.parseTopComment(self.top_comment)
1998
        return self.index
1999

2000 2001 2002

class docBuilder:
    """A documentation builder"""
J
Jiri Denemark 已提交
2003
    def __init__(self, name, path='.', directories=['.'], includes=[]):
2004
        self.name = name
J
Jiri Denemark 已提交
2005
        self.path = path
2006
        self.directories = directories
2007 2008 2009 2010
        if name == "libvirt":
            self.includes = includes + included_files.keys()
        elif name == "libvirt-qemu":
            self.includes = includes + qemu_included_files.keys()
2011 2012
        elif name == "libvirt-lxc":
            self.includes = includes + lxc_included_files.keys()
2013 2014 2015
        self.modules = {}
        self.headers = {}
        self.idx = index()
2016
        self.xref = {}
2017 2018
        self.index = {}
        self.basename = name
2019
        self.errors = 0
2020

2021 2022 2023 2024 2025
    def warning(self, msg):
        global warnings
        warnings = warnings + 1
        print msg

2026 2027 2028 2029
    def error(self, msg):
        self.errors += 1
        print >>sys.stderr, "Error:", msg

2030
    def indexString(self, id, str):
2031
        if str is None:
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
            return
        str = string.replace(str, "'", ' ')
        str = string.replace(str, '"', ' ')
        str = string.replace(str, "/", ' ')
        str = string.replace(str, '*', ' ')
        str = string.replace(str, "[", ' ')
        str = string.replace(str, "]", ' ')
        str = string.replace(str, "(", ' ')
        str = string.replace(str, ")", ' ')
        str = string.replace(str, "<", ' ')
        str = string.replace(str, '>', ' ')
        str = string.replace(str, "&", ' ')
        str = string.replace(str, '#', ' ')
        str = string.replace(str, ",", ' ')
        str = string.replace(str, '.', ' ')
        str = string.replace(str, ';', ' ')
        tokens = string.split(str)
        for token in tokens:
            try:
                c = token[0]
                if string.find(string.letters, c) < 0:
                    pass
                elif len(token) < 3:
                    pass
                else:
                    lower = string.lower(token)
                    # TODO: generalize this a bit
                    if lower == 'and' or lower == 'the':
                        pass
                    elif self.xref.has_key(token):
                        self.xref[token].append(id)
                    else:
                        self.xref[token] = [id]
            except:
                pass
2067 2068

    def analyze(self):
2069 2070
        if not quiet:
            print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
2071
        self.idx.analyze()
2072 2073

    def scanHeaders(self):
2074 2075 2076
        for header in self.headers.keys():
            parser = CParser(header)
            idx = parser.parse()
2077
            self.headers[header] = idx
2078
            self.idx.merge(idx)
2079 2080

    def scanModules(self):
2081 2082 2083 2084 2085 2086
        for module in self.modules.keys():
            parser = CParser(module)
            idx = parser.parse()
            # idx.analyze()
            self.modules[module] = idx
            self.idx.merge_public(idx)
2087 2088 2089

    def scan(self):
        for directory in self.directories:
2090 2091 2092 2093 2094
            files = glob.glob(directory + "/*.c")
            for file in files:
                skip = 1
                for incl in self.includes:
                    if string.find(file, incl) != -1:
2095
                        skip = 0
2096 2097
                        break
                if skip == 0:
2098
                    self.modules[file] = None
2099 2100 2101 2102 2103
            files = glob.glob(directory + "/*.h")
            for file in files:
                skip = 1
                for incl in self.includes:
                    if string.find(file, incl) != -1:
2104
                        skip = 0
2105 2106
                        break
                if skip == 0:
2107
                    self.headers[file] = None
2108 2109
        self.scanHeaders()
        self.scanModules()
2110

2111 2112
    def modulename_file(self, file):
        module = os.path.basename(file)
2113 2114 2115 2116 2117
        if module[-2:] == '.h':
            module = module[:-2]
        elif module[-2:] == '.c':
            module = module[:-2]
        return module
2118 2119 2120 2121

    def serialize_enum(self, output, name):
        id = self.idx.enums[name]
        output.write("    <enum name='%s' file='%s'" % (name,
2122
                     self.modulename_file(id.header)))
2123
        if id.info is not None:
2124
            info = id.info
2125
            if info[0] is not None and info[0] != '':
2126 2127 2128 2129
                try:
                    val = eval(info[0])
                except:
                    val = info[0]
2130
                output.write(" value='%s'" % (val))
2131
            if info[2] is not None and info[2] != '':
2132
                output.write(" type='%s'" % info[2])
2133
            if info[1] is not None and info[1] != '':
2134
                output.write(" info='%s'" % escape(info[1]))
2135 2136 2137 2138 2139
        output.write("/>\n")

    def serialize_macro(self, output, name):
        id = self.idx.macros[name]
        output.write("    <macro name='%s' file='%s'>\n" % (name,
2140
                     self.modulename_file(id.header)))
2141
        if id.info is not None:
2142
            try:
2143
                (args, desc) = id.info
2144
                if desc is not None and desc != "":
2145 2146 2147 2148
                    output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
                    self.indexString(name, desc)
                for arg in args:
                    (name, desc) = arg
2149
                    if desc is not None and desc != "":
2150 2151 2152 2153 2154
                        output.write("      <arg name='%s' info='%s'/>\n" % (
                                     name, escape(desc)))
                        self.indexString(name, desc)
                    else:
                        output.write("      <arg name='%s'/>\n" % (name))
2155 2156 2157 2158
            except:
                pass
        output.write("    </macro>\n")

2159 2160 2161 2162 2163
    def serialize_union(self, output, field, desc):
        output.write("      <field name='%s' type='union' info='%s'>\n" % (field[1] , desc))
        output.write("        <union>\n")
        for f in field[3]:
            desc = f[2]
2164
            if desc is None:
2165 2166 2167 2168 2169 2170 2171 2172
                desc = ''
            else:
                desc = escape(desc)
            output.write("          <field name='%s' type='%s' info='%s'/>\n" % (f[1] , f[0], desc))

        output.write("        </union>\n")
        output.write("      </field>\n")

2173 2174
    def serialize_typedef(self, output, name):
        id = self.idx.typedefs[name]
2175 2176 2177 2178 2179 2180 2181
        if id.info[0:7] == 'struct ':
            output.write("    <struct name='%s' file='%s' type='%s'" % (
                     name, self.modulename_file(id.header), id.info))
            name = id.info[7:]
            if self.idx.structs.has_key(name) and ( \
               type(self.idx.structs[name].info) == type(()) or
                type(self.idx.structs[name].info) == type([])):
2182
                output.write(">\n")
2183 2184 2185 2186
                try:
                    for field in self.idx.structs[name].info:
                        desc = field[2]
                        self.indexString(name, desc)
2187
                        if desc is None:
2188 2189 2190
                            desc = ''
                        else:
                            desc = escape(desc)
2191 2192 2193 2194
                        if field[0] == "union":
                            self.serialize_union(output, field, desc)
                        else:
                            output.write("      <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
2195
                except:
2196
                    self.warning("Failed to serialize struct %s" % (name))
2197 2198
                output.write("    </struct>\n")
            else:
2199
                output.write("/>\n")
2200 2201 2202
        else :
            output.write("    <typedef name='%s' file='%s' type='%s'" % (
                         name, self.modulename_file(id.header), id.info))
2203
            try:
2204
                desc = id.extra
2205
                if desc is not None and desc != "":
2206 2207 2208 2209 2210 2211
                    output.write(">\n      <info><![CDATA[%s]]></info>\n" % (desc))
                    output.write("    </typedef>\n")
                else:
                    output.write("/>\n")
            except:
                output.write("/>\n")
2212 2213 2214

    def serialize_variable(self, output, name):
        id = self.idx.variables[name]
2215
        if id.info is not None:
2216 2217 2218 2219 2220
            output.write("    <variable name='%s' file='%s' type='%s'/>\n" % (
                    name, self.modulename_file(id.header), id.info))
        else:
            output.write("    <variable name='%s' file='%s'/>\n" % (
                    name, self.modulename_file(id.header)))
2221

2222 2223
    def serialize_function(self, output, name):
        id = self.idx.functions[name]
2224
        if name == debugsym and not quiet:
2225
            print "=>", id
2226 2227

        output.write("    <%s name='%s' file='%s' module='%s'>\n" % (id.type,
2228 2229 2230 2231 2232
                     name, self.modulename_file(id.header),
                     self.modulename_file(id.module)))
        #
        # Processing of conditionals modified by Bill 1/1/05
        #
2233
        if id.conditionals is not None:
2234 2235 2236 2237 2238
            apstr = ""
            for cond in id.conditionals:
                if apstr != "":
                    apstr = apstr + " &amp;&amp; "
                apstr = apstr + cond
2239
            output.write("      <cond>%s</cond>\n"% (apstr))
2240 2241 2242 2243
        try:
            (ret, params, desc) = id.info
            output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
            self.indexString(name, desc)
2244
            if ret[0] is not None:
2245 2246
                if ret[0] == "void":
                    output.write("      <return type='void'/>\n")
2247
                elif (ret[1] is None or ret[1] == '') and not ignored_functions.has_key(name):
2248
                    self.error("Missing documentation for return of function `%s'" % name)
2249 2250 2251 2252 2253 2254 2255
                else:
                    output.write("      <return type='%s' info='%s'/>\n" % (
                             ret[0], escape(ret[1])))
                    self.indexString(name, ret[1])
            for param in params:
                if param[0] == 'void':
                    continue
2256
                if (param[2] is None or param[2] == ''):
2257 2258 2259 2260
                    if ignored_functions.has_key(name):
                        output.write("      <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
                    else:
                        self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
2261 2262 2263 2264
                else:
                    output.write("      <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
                    self.indexString(name, param[2])
        except:
2265 2266
            print >>sys.stderr, "Exception:", sys.exc_info()[1]
            self.warning("Failed to save function %s info: %s" % (name, `id.info`))
2267 2268 2269 2270
        output.write("    </%s>\n" % (id.type))

    def serialize_exports(self, output, file):
        module = self.modulename_file(file)
2271 2272
        output.write("    <file name='%s'>\n" % (module))
        dict = self.headers[file]
2273
        if dict.info is not None:
2274 2275 2276 2277 2278 2279 2280
            for data in ('Summary', 'Description', 'Author'):
                try:
                    output.write("     <%s>%s</%s>\n" % (
                                 string.lower(data),
                                 escape(dict.info[data]),
                                 string.lower(data)))
                except:
2281
                    self.warning("Header %s lacks a %s description" % (module, data))
2282 2283 2284 2285
            if dict.info.has_key('Description'):
                desc = dict.info['Description']
                if string.find(desc, "DEPRECATED") != -1:
                    output.write("     <deprecated/>\n")
2286 2287

        ids = dict.macros.keys()
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
        ids.sort()
        for id in uniq(ids):
            # Macros are sometime used to masquerade other types.
            if dict.functions.has_key(id):
                continue
            if dict.variables.has_key(id):
                continue
            if dict.typedefs.has_key(id):
                continue
            if dict.structs.has_key(id):
                continue
2299 2300
            if dict.unions.has_key(id):
                continue
2301 2302 2303
            if dict.enums.has_key(id):
                continue
            output.write("     <exports symbol='%s' type='macro'/>\n" % (id))
2304
        ids = dict.enums.keys()
2305 2306 2307
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='enum'/>\n" % (id))
2308
        ids = dict.typedefs.keys()
2309 2310 2311
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='typedef'/>\n" % (id))
2312
        ids = dict.structs.keys()
2313 2314 2315
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='struct'/>\n" % (id))
2316
        ids = dict.variables.keys()
2317 2318 2319
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='variable'/>\n" % (id))
2320
        ids = dict.functions.keys()
2321 2322 2323 2324
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='function'/>\n" % (id))
        output.write("    </file>\n")
2325 2326 2327 2328 2329

    def serialize_xrefs_files(self, output):
        headers = self.headers.keys()
        headers.sort()
        for file in headers:
2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
            module = self.modulename_file(file)
            output.write("    <file name='%s'>\n" % (module))
            dict = self.headers[file]
            ids = uniq(dict.functions.keys() + dict.variables.keys() + \
                  dict.macros.keys() + dict.typedefs.keys() + \
                  dict.structs.keys() + dict.enums.keys())
            ids.sort()
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </file>\n")
2340 2341 2342 2343
        pass

    def serialize_xrefs_functions(self, output):
        funcs = {}
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                for param in params:
                    if param[0] == 'void':
                        continue
                    if funcs.has_key(param[0]):
                        funcs[param[0]].append(name)
                    else:
                        funcs[param[0]] = [name]
            except:
                pass
        typ = funcs.keys()
        typ.sort()
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
            ids = funcs[type]
            ids.sort()
            pid = ''    # not sure why we have dups, but get rid of them!
            for id in ids:
                if id != pid:
                    output.write("      <ref name='%s'/>\n" % (id))
                    pid = id
            output.write("    </type>\n")
2372 2373 2374

    def serialize_xrefs_constructors(self, output):
        funcs = {}
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                if ret[0] == "void":
                    continue
                if funcs.has_key(ret[0]):
                    funcs[ret[0]].append(name)
                else:
                    funcs[ret[0]] = [name]
            except:
                pass
        typ = funcs.keys()
        typ.sort()
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
            ids = funcs[type]
            ids.sort()
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </type>\n")
2399 2400

    def serialize_xrefs_alpha(self, output):
2401 2402 2403 2404 2405
        letter = None
        ids = self.idx.identifiers.keys()
        ids.sort()
        for id in ids:
            if id[0] != letter:
2406
                if letter is not None:
2407 2408 2409 2410
                    output.write("    </letter>\n")
                letter = id[0]
                output.write("    <letter name='%s'>\n" % (letter))
            output.write("      <ref name='%s'/>\n" % (id))
2411
        if letter is not None:
2412
            output.write("    </letter>\n")
2413 2414 2415

    def serialize_xrefs_references(self, output):
        typ = self.idx.identifiers.keys()
2416 2417 2418 2419 2420 2421 2422 2423
        typ.sort()
        for id in typ:
            idf = self.idx.identifiers[id]
            module = idf.header
            output.write("    <reference name='%s' href='%s'/>\n" % (id,
                         'html/' + self.basename + '-' +
                         self.modulename_file(module) + '.html#' +
                         id))
2424 2425 2426

    def serialize_xrefs_index(self, output):
        index = self.xref
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
        typ = index.keys()
        typ.sort()
        letter = None
        count = 0
        chunk = 0
        chunks = []
        for id in typ:
            if len(index[id]) > 30:
                continue
            if id[0] != letter:
2437 2438
                if letter is None or count > 200:
                    if letter is not None:
2439 2440 2441 2442 2443 2444 2445
                        output.write("      </letter>\n")
                        output.write("    </chunk>\n")
                        count = 0
                        chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
                    output.write("    <chunk name='chunk%s'>\n" % (chunk))
                    first_letter = id[0]
                    chunk = chunk + 1
2446
                elif letter is not None:
2447 2448 2449 2450
                    output.write("      </letter>\n")
                letter = id[0]
                output.write("      <letter name='%s'>\n" % (letter))
            output.write("        <word name='%s'>\n" % (id))
2451
            tokens = index[id]
2452 2453 2454 2455 2456 2457 2458 2459 2460
            tokens.sort()
            tok = None
            for token in tokens:
                if tok == token:
                    continue
                tok = token
                output.write("          <ref name='%s'/>\n" % (token))
                count = count + 1
            output.write("        </word>\n")
2461
        if letter is not None:
2462 2463 2464 2465 2466 2467 2468 2469 2470
            output.write("      </letter>\n")
            output.write("    </chunk>\n")
            if count != 0:
                chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
            output.write("    <chunks>\n")
            for ch in chunks:
                output.write("      <chunk name='%s' start='%s' end='%s'/>\n" % (
                             ch[0], ch[1], ch[2]))
            output.write("    </chunks>\n")
2471 2472

    def serialize_xrefs(self, output):
2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
        output.write("  <references>\n")
        self.serialize_xrefs_references(output)
        output.write("  </references>\n")
        output.write("  <alpha>\n")
        self.serialize_xrefs_alpha(output)
        output.write("  </alpha>\n")
        output.write("  <constructors>\n")
        self.serialize_xrefs_constructors(output)
        output.write("  </constructors>\n")
        output.write("  <functions>\n")
        self.serialize_xrefs_functions(output)
        output.write("  </functions>\n")
        output.write("  <files>\n")
        self.serialize_xrefs_files(output)
        output.write("  </files>\n")
        output.write("  <index>\n")
        self.serialize_xrefs_index(output)
        output.write("  </index>\n")
2491 2492

    def serialize(self):
J
Jiri Denemark 已提交
2493
        filename = "%s/%s-api.xml" % (self.path, self.name)
2494 2495
        if not quiet:
            print "Saving XML description %s" % (filename)
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<api name='%s'>\n" % self.name)
        output.write("  <files>\n")
        headers = self.headers.keys()
        headers.sort()
        for file in headers:
            self.serialize_exports(output, file)
        output.write("  </files>\n")
        output.write("  <symbols>\n")
        macros = self.idx.macros.keys()
        macros.sort()
        for macro in macros:
            self.serialize_macro(output, macro)
        enums = self.idx.enums.keys()
        enums.sort()
        for enum in enums:
            self.serialize_enum(output, enum)
        typedefs = self.idx.typedefs.keys()
        typedefs.sort()
        for typedef in typedefs:
            self.serialize_typedef(output, typedef)
        variables = self.idx.variables.keys()
        variables.sort()
        for variable in variables:
            self.serialize_variable(output, variable)
        functions = self.idx.functions.keys()
        functions.sort()
        for function in functions:
            self.serialize_function(output, function)
        output.write("  </symbols>\n")
        output.write("</api>\n")
        output.close()

2530 2531 2532 2533
        if self.errors > 0:
            print >>sys.stderr, "apibuild.py: %d error(s) encountered during generation" % self.errors
            sys.exit(3)

J
Jiri Denemark 已提交
2534
        filename = "%s/%s-refs.xml" % (self.path, self.name)
2535 2536
        if not quiet:
            print "Saving XML Cross References %s" % (filename)
2537 2538 2539 2540 2541 2542 2543 2544
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<apirefs name='%s'>\n" % self.name)
        self.serialize_xrefs(output)
        output.write("</apirefs>\n")
        output.close()


2545
def rebuild(name):
2546
    if name not in ["libvirt", "libvirt-qemu", "libvirt-lxc"]:
J
Ján Tomko 已提交
2547
        self.warning("rebuild() failed, unknown module %s") % name
2548
        return None
2549
    builder = None
2550 2551
    srcdir = os.environ["srcdir"]
    if glob.glob(srcdir + "/../src/libvirt.c") != [] :
2552
        if not quiet:
2553
            print "Rebuilding API description for %s" % name
J
Jiri Denemark 已提交
2554 2555 2556 2557 2558
        dirs = [srcdir + "/../src",
                srcdir + "/../src/util",
                srcdir + "/../include/libvirt"]
        if glob.glob(srcdir + "/../include/libvirt/libvirt.h") == [] :
            dirs.append("../include/libvirt")
2559
        builder = docBuilder(name, srcdir, dirs, [])
D
Daniel Veillard 已提交
2560
    elif glob.glob("src/libvirt.c") != [] :
2561
        if not quiet:
2562 2563
            print "Rebuilding API description for %s" % name
        builder = docBuilder(name, srcdir,
J
Jiri Denemark 已提交
2564
                             ["src", "src/util", "include/libvirt"],
2565
                             [])
2566
    else:
2567
        self.warning("rebuild() failed, unable to guess the module")
2568
        return None
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
    builder.scan()
    builder.analyze()
    builder.serialize()
    return builder

#
# for debugging the parser
#
def parse(filename):
    parser = CParser(filename)
    idx = parser.parse()
    return idx

if __name__ == "__main__":
    if len(sys.argv) > 1:
        debug = 1
        parse(sys.argv[1])
    else:
2587 2588
        rebuild("libvirt")
        rebuild("libvirt-qemu")
2589
        rebuild("libvirt-lxc")
2590 2591 2592 2593
    if warnings > 0:
        sys.exit(2)
    else:
        sys.exit(0)