apibuild.py 98.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
#!/usr/bin/python -u
#
# This is the API builder, it parses the C sources and build the
# API formal description in XML.
#
# See Copyright for the status of this software.
#
# daniel@veillard.com
#
import os, sys
import string
import glob
13
import re
14

15 16 17
quiet=True
warnings=0
debug=False
18 19 20 21 22
debugsym=None

#
# C parser analysis code
#
23 24
included_files = {
  "libvirt.h": "header with general libvirt API definitions",
25
  "libvirt-domain-snapshot.h": "header with general libvirt API definitions",
26 27
  "virterror.h": "header with error specific API definitions",
  "libvirt.c": "Main interfaces for the libvirt library",
28
  "libvirt-domain.c": "Domain interfaces for the libvirt library",
29
  "libvirt-domain-snapshot.c": "Domain snapshot interfaces for the libvirt library",
30
  "libvirt-host.c": "Host interfaces for the libvirt library",
31
  "libvirt-interface.c": "Interface interfaces for the libvirt library",
32
  "libvirt-network.c": "Network interfaces for the libvirt library",
33
  "libvirt-nodedev.c": "Node device interfaces for the libvirt library",
34
  "libvirt-nwfilter.c": "NWFilter interfaces for the libvirt library",
35
  "libvirt-secret.c": "Secret interfaces for the libvirt library",
36
  "libvirt-storage.c": "Storage interfaces for the libvirt library",
37
  "libvirt-stream.c": "Stream interfaces for the libvirt library",
38
  "virerror.c": "implements error handling and reporting code for libvirt",
39
  "virevent.c": "event loop for monitoring file handles",
40
  "virtypedparam.c": "virTypedParameters APIs",
41 42
}

43 44 45 46 47
qemu_included_files = {
  "libvirt-qemu.h": "header with QEMU specific API definitions",
  "libvirt-qemu.c": "Implementations for the QEMU specific APIs",
}

48 49 50 51 52
lxc_included_files = {
  "libvirt-lxc.h": "header with LXC specific API definitions",
  "libvirt-lxc.c": "Implementations for the LXC specific APIs",
}

53 54
ignored_words = {
  "ATTRIBUTE_UNUSED": (0, "macro keyword"),
55
  "ATTRIBUTE_SENTINEL": (0, "macro keyword"),
56
  "VIR_DEPRECATED": (0, "macro keyword"),
57
  "VIR_EXPORT_VAR": (0, "macro keyword"),
58 59 60
  "WINAPI": (0, "Windows keyword"),
  "__declspec": (3, "Windows keyword"),
  "__stdcall": (0, "Windows keyword"),
61 62
}

D
Daniel Veillard 已提交
63
ignored_functions = {
64
  "virConnectSupportsFeature": "private function for remote access",
D
Daniel Veillard 已提交
65 66 67 68 69
  "virDomainMigrateFinish": "private function for migration",
  "virDomainMigrateFinish2": "private function for migration",
  "virDomainMigratePerform": "private function for migration",
  "virDomainMigratePrepare": "private function for migration",
  "virDomainMigratePrepare2": "private function for migration",
C
Chris Lalancette 已提交
70
  "virDomainMigratePrepareTunnel": "private function for tunnelled migration",
71 72 73 74 75 76
  "virDomainMigrateBegin3": "private function for migration",
  "virDomainMigrateFinish3": "private function for migration",
  "virDomainMigratePerform3": "private function for migration",
  "virDomainMigratePrepare3": "private function for migration",
  "virDomainMigrateConfirm3": "private function for migration",
  "virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
77
  "DllMain": "specific function for Win32",
78
  "virTypedParamsValidate": "internal function in virtypedparam.c",
79
  "virTypedParameterValidateSet": "internal function in virtypedparam.c",
80 81
  "virTypedParameterAssign": "internal function in virtypedparam.c",
  "virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
82
  "virTypedParameterToString": "internal function in virtypedparam.c",
83
  "virTypedParamsCheck": "internal function in virtypedparam.c",
84
  "virTypedParamsCopy": "internal function in virtypedparam.c",
85 86 87 88 89 90
  "virDomainMigrateBegin3Params": "private function for migration",
  "virDomainMigrateFinish3Params": "private function for migration",
  "virDomainMigratePerform3Params": "private function for migration",
  "virDomainMigratePrepare3Params": "private function for migration",
  "virDomainMigrateConfirm3Params": "private function for migration",
  "virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
D
Daniel Veillard 已提交
91 92
}

93 94 95 96 97 98
ignored_macros = {
  "_virSchedParameter": "backward compatibility macro for virTypedParameter",
  "_virBlkioParameter": "backward compatibility macro for virTypedParameter",
  "_virMemoryParameter": "backward compatibility macro for virTypedParameter",
}

99 100 101 102 103 104 105 106 107 108 109 110
def escape(raw):
    raw = string.replace(raw, '&', '&')
    raw = string.replace(raw, '<', '&lt;')
    raw = string.replace(raw, '>', '&gt;')
    raw = string.replace(raw, "'", '&apos;')
    raw = string.replace(raw, '"', '&quot;')
    return raw

def uniq(items):
    d = {}
    for item in items:
        d[item]=1
111 112 113
    k = d.keys()
    k.sort()
    return k
114 115 116 117 118

class identifier:
    def __init__(self, name, header=None, module=None, type=None, lineno = 0,
                 info=None, extra=None, conditionals = None):
        self.name = name
119 120 121 122 123 124 125
        self.header = header
        self.module = module
        self.type = type
        self.info = info
        self.extra = extra
        self.lineno = lineno
        self.static = 0
126
        if conditionals is None or len(conditionals) == 0:
127 128 129
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
130
        if self.name == debugsym and not quiet:
131 132
            print "=> define %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals))
133 134 135

    def __repr__(self):
        r = "%s %s:" % (self.type, self.name)
136 137
        if self.static:
            r = r + " static"
138
        if self.module is not None:
139
            r = r + " from %s" % (self.module)
140
        if self.info is not None:
141
            r = r + " " +  `self.info`
142
        if self.extra is not None:
143
            r = r + " " + `self.extra`
144
        if self.conditionals is not None:
145 146
            r = r + " " + `self.conditionals`
        return r
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163


    def set_header(self, header):
        self.header = header
    def set_module(self, module):
        self.module = module
    def set_type(self, type):
        self.type = type
    def set_info(self, info):
        self.info = info
    def set_extra(self, extra):
        self.extra = extra
    def set_lineno(self, lineno):
        self.lineno = lineno
    def set_static(self, static):
        self.static = static
    def set_conditionals(self, conditionals):
164
        if conditionals is None or len(conditionals) == 0:
165 166 167
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

    def get_name(self):
        return self.name
    def get_header(self):
        return self.module
    def get_module(self):
        return self.module
    def get_type(self):
        return self.type
    def get_info(self):
        return self.info
    def get_lineno(self):
        return self.lineno
    def get_extra(self):
        return self.extra
    def get_static(self):
        return self.static
    def get_conditionals(self):
        return self.conditionals

    def update(self, header, module, type = None, info = None, extra=None,
               conditionals=None):
190
        if self.name == debugsym and not quiet:
191 192
            print "=> update %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals))
193
        if header is not None and self.header is None:
194
            self.set_header(module)
195
        if module is not None and (self.module is None or self.header == self.module):
196
            self.set_module(module)
197
        if type is not None and self.type is None:
198
            self.set_type(type)
199
        if info is not None:
200
            self.set_info(info)
201
        if extra is not None:
202
            self.set_extra(extra)
203
        if conditionals is not None:
204
            self.set_conditionals(conditionals)
205 206 207 208 209 210

class index:
    def __init__(self, name = "noname"):
        self.name = name
        self.identifiers = {}
        self.functions = {}
211 212 213
        self.variables = {}
        self.includes = {}
        self.structs = {}
214
        self.unions = {}
215 216 217 218 219
        self.enums = {}
        self.typedefs = {}
        self.macros = {}
        self.references = {}
        self.info = {}
220 221 222

    def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
223
            return None
224 225
        d = None
        try:
226 227 228 229 230
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d
231

232
        if d is not None and static == 1:
233
            d.set_static(1)
234

235
        if d is not None and name is not None and type is not None:
236
            self.references[name] = d
237

238
        if name == debugsym and not quiet:
239
            print "New ref: %s" % (d)
240

241
        return d
242 243 244

    def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
245
            return None
246 247
        d = None
        try:
248 249 250 251 252 253
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d

254
        if d is not None and static == 1:
255 256
            d.set_static(1)

257
        if d is not None and name is not None and type is not None:
258 259 260 261 262 263 264 265 266 267
            if type == "function":
                self.functions[name] = d
            elif type == "functype":
                self.functions[name] = d
            elif type == "variable":
                self.variables[name] = d
            elif type == "include":
                self.includes[name] = d
            elif type == "struct":
                self.structs[name] = d
268 269
            elif type == "union":
                self.unions[name] = d
270 271 272 273 274 275 276
            elif type == "enum":
                self.enums[name] = d
            elif type == "typedef":
                self.typedefs[name] = d
            elif type == "macro":
                self.macros[name] = d
            else:
277
                self.warning("Unable to register type ", type)
278

279
        if name == debugsym and not quiet:
280 281 282
            print "New symbol: %s" % (d)

        return d
283 284 285 286 287 288 289

    def merge(self, idx):
        for id in idx.functions.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
290 291 292
             if self.macros.has_key(id):
                 del self.macros[id]
             if self.functions.has_key(id):
293 294
                 self.warning("function %s from %s redeclared in %s" % (
                    id, self.functions[id].header, idx.functions[id].header))
295 296 297
             else:
                 self.functions[id] = idx.functions[id]
                 self.identifiers[id] = idx.functions[id]
298 299 300 301 302
        for id in idx.variables.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
303 304 305
             if self.macros.has_key(id):
                 del self.macros[id]
             if self.variables.has_key(id):
306 307
                 self.warning("variable %s from %s redeclared in %s" % (
                    id, self.variables[id].header, idx.variables[id].header))
308 309 310
             else:
                 self.variables[id] = idx.variables[id]
                 self.identifiers[id] = idx.variables[id]
311
        for id in idx.structs.keys():
312
             if self.structs.has_key(id):
313 314
                 self.warning("struct %s from %s redeclared in %s" % (
                    id, self.structs[id].header, idx.structs[id].header))
315 316 317
             else:
                 self.structs[id] = idx.structs[id]
                 self.identifiers[id] = idx.structs[id]
318 319 320 321 322 323 324
        for id in idx.unions.keys():
             if self.unions.has_key(id):
                 print "union %s from %s redeclared in %s" % (
                    id, self.unions[id].header, idx.unions[id].header)
             else:
                 self.unions[id] = idx.unions[id]
                 self.identifiers[id] = idx.unions[id]
325
        for id in idx.typedefs.keys():
326
             if self.typedefs.has_key(id):
327 328
                 self.warning("typedef %s from %s redeclared in %s" % (
                    id, self.typedefs[id].header, idx.typedefs[id].header))
329 330 331
             else:
                 self.typedefs[id] = idx.typedefs[id]
                 self.identifiers[id] = idx.typedefs[id]
332 333 334 335 336 337 338 339 340 341 342
        for id in idx.macros.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
             if self.variables.has_key(id):
                 continue
             if self.functions.has_key(id):
                 continue
             if self.enums.has_key(id):
                 continue
343
             if self.macros.has_key(id):
344 345
                 self.warning("macro %s from %s redeclared in %s" % (
                    id, self.macros[id].header, idx.macros[id].header))
346 347 348
             else:
                 self.macros[id] = idx.macros[id]
                 self.identifiers[id] = idx.macros[id]
349
        for id in idx.enums.keys():
350
             if self.enums.has_key(id):
351 352
                 self.warning("enum %s from %s redeclared in %s" % (
                    id, self.enums[id].header, idx.enums[id].header))
353 354 355
             else:
                 self.enums[id] = idx.enums[id]
                 self.identifiers[id] = idx.enums[id]
356 357 358

    def merge_public(self, idx):
        for id in idx.functions.keys():
359 360 361 362
             if self.functions.has_key(id):
                 # check that function condition agrees with header
                 if idx.functions[id].conditionals != \
                    self.functions[id].conditionals:
363 364 365 366
                     self.warning("Header condition differs from Function for %s:" \
                                      % id)
                     self.warning("  H: %s" % self.functions[id].conditionals)
                     self.warning("  C: %s" % idx.functions[id].conditionals)
367 368 369 370 371 372
                 up = idx.functions[id]
                 self.functions[id].update(None, up.module, up.type, up.info, up.extra)
         #     else:
         #         print "Function %s from %s is not declared in headers" % (
         #              id, idx.functions[id].module)
         # TODO: do the same for variables.
373 374 375

    def analyze_dict(self, type, dict):
        count = 0
376
        public = 0
377
        for name in dict.keys():
378 379 380 381
            id = dict[name]
            count = count + 1
            if id.static == 0:
                public = public + 1
382
        if count != public:
383 384 385
            print "  %d %s , %d public" % (count, type, public)
        elif count != 0:
            print "  %d public %s" % (count, type)
386 387 388


    def analyze(self):
389 390 391 392 393 394 395
        if not quiet:
            self.analyze_dict("functions", self.functions)
            self.analyze_dict("variables", self.variables)
            self.analyze_dict("structs", self.structs)
            self.analyze_dict("unions", self.unions)
            self.analyze_dict("typedefs", self.typedefs)
            self.analyze_dict("macros", self.macros)
396

397 398 399 400 401
class CLexer:
    """A lexer for the C language, tokenize the input by reading and
       analyzing it line by line"""
    def __init__(self, input):
        self.input = input
402 403 404
        self.tokens = []
        self.line = ""
        self.lineno = 0
405 406 407

    def getline(self):
        line = ''
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
        while line == '':
            line = self.input.readline()
            if not line:
                return None
            self.lineno = self.lineno + 1
            line = string.lstrip(line)
            line = string.rstrip(line)
            if line == '':
                continue
            while line[-1] == '\\':
                line = line[:-1]
                n = self.input.readline()
                self.lineno = self.lineno + 1
                n = string.lstrip(n)
                n = string.rstrip(n)
                if not n:
                    break
                else:
                    line = line + n
427
        return line
428

429 430 431 432
    def getlineno(self):
        return self.lineno

    def push(self, token):
433
        self.tokens.insert(0, token)
434 435 436

    def debug(self):
        print "Last token: ", self.last
437 438
        print "Token queue: ", self.tokens
        print "Line %d end: " % (self.lineno), self.line
439 440 441

    def token(self):
        while self.tokens == []:
442 443 444 445 446
            if self.line == "":
                line = self.getline()
            else:
                line = self.line
                self.line = ""
447
            if line is None:
448 449 450 451 452
                return None

            if line[0] == '#':
                self.tokens = map((lambda x: ('preproc', x)),
                                  string.split(line))
453
                break
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
            l = len(line)
            if line[0] == '"' or line[0] == "'":
                end = line[0]
                line = line[1:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == end:
                            self.line = line[i+1:]
                            line = line[:i]
                            l = i
                            found = 1
                            break
                        if line[i] == '\\':
                            i = i + 1
                        i = i + 1
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
476
                        if line is None:
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
                            return None
                self.last = ('string', tok)
                return self.last

            if l >= 2 and line[0] == '/' and line[1] == '*':
                line = line[2:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == '*' and i+1 < l and line[i+1] == '/':
                            self.line = line[i+2:]
                            line = line[:i-1]
                            l = i
                            found = 1
                            break
                        i = i + 1
                    if tok != "":
                        tok = tok + "\n"
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
501
                        if line is None:
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
                            return None
                self.last = ('comment', tok)
                return self.last
            if l >= 2 and line[0] == '/' and line[1] == '/':
                line = line[2:]
                self.last = ('comment', line)
                return self.last
            i = 0
            while i < l:
                if line[i] == '/' and i+1 < l and line[i+1] == '/':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '/' and i+1 < l and line[i+1] == '*':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '"' or line[i] == "'":
                    self.line = line[i:]
                    line = line[:i]
                    break
                i = i + 1
            l = len(line)
            i = 0
            while i < l:
                if line[i] == ' ' or line[i] == '\t':
                    i = i + 1
                    continue
                o = ord(line[i])
                if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                   (o >= 48 and o <= 57):
                    s = i
                    while i < l:
                        o = ord(line[i])
                        if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                           (o >= 48 and o <= 57) or string.find(
                               " \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
                            i = i + 1
                        else:
                            break
                    self.tokens.append(('name', line[s:i]))
                    continue
                if string.find("(){}:;,[]", line[i]) != -1:
545
#                 if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
546 547 548 549 550 551
#                   line[i] == '}' or line[i] == ':' or line[i] == ';' or \
#                   line[i] == ',' or line[i] == '[' or line[i] == ']':
                    self.tokens.append(('sep', line[i]))
                    i = i + 1
                    continue
                if string.find("+-*><=/%&!|.", line[i]) != -1:
552
#                 if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
#                   line[i] == '>' or line[i] == '<' or line[i] == '=' or \
#                   line[i] == '/' or line[i] == '%' or line[i] == '&' or \
#                   line[i] == '!' or line[i] == '|' or line[i] == '.':
                    if line[i] == '.' and  i + 2 < l and \
                       line[i+1] == '.' and line[i+2] == '.':
                        self.tokens.append(('name', '...'))
                        i = i + 3
                        continue

                    j = i + 1
                    if j < l and (
                       string.find("+-*><=/%&!|", line[j]) != -1):
#                       line[j] == '+' or line[j] == '-' or line[j] == '*' or \
#                       line[j] == '>' or line[j] == '<' or line[j] == '=' or \
#                       line[j] == '/' or line[j] == '%' or line[j] == '&' or \
#                       line[j] == '!' or line[j] == '|'):
                        self.tokens.append(('op', line[i:j+1]))
                        i = j + 1
                    else:
                        self.tokens.append(('op', line[i]))
                        i = i + 1
                    continue
                s = i
                while i < l:
                    o = ord(line[i])
                    if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                       (o >= 48 and o <= 57) or (
                        string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
#                        line[i] != ' ' and line[i] != '\t' and
#                        line[i] != '(' and line[i] != ')' and
#                        line[i] != '{'  and line[i] != '}' and
#                        line[i] != ':' and line[i] != ';' and
#                        line[i] != ',' and line[i] != '+' and
#                        line[i] != '-' and line[i] != '*' and
#                        line[i] != '/' and line[i] != '%' and
#                        line[i] != '&' and line[i] != '!' and
#                        line[i] != '|' and line[i] != '[' and
#                        line[i] != ']' and line[i] != '=' and
#                        line[i] != '*' and line[i] != '>' and
#                        line[i] != '<'):
                        i = i + 1
                    else:
                        break
                self.tokens.append(('name', line[s:i]))

        tok = self.tokens[0]
        self.tokens = self.tokens[1:]
        self.last = tok
        return tok
602

603 604 605 606
class CParser:
    """The C module parser"""
    def __init__(self, filename, idx = None):
        self.filename = filename
607 608 609 610
        if len(filename) > 2 and filename[-2:] == '.h':
            self.is_header = 1
        else:
            self.is_header = 0
611
        self.input = open(filename)
612
        self.lexer = CLexer(self.input)
613
        if idx is None:
614 615 616 617 618 619 620 621 622 623
            self.index = index()
        else:
            self.index = idx
        self.top_comment = ""
        self.last_comment = ""
        self.comment = None
        self.collect_ref = 0
        self.no_error = 0
        self.conditionals = []
        self.defines = []
624 625 626 627 628 629 630 631 632 633 634 635 636 637

    def collect_references(self):
        self.collect_ref = 1

    def stop_error(self):
        self.no_error = 1

    def start_error(self):
        self.no_error = 0

    def lineno(self):
        return self.lexer.getlineno()

    def index_add(self, name, module, static, type, info=None, extra = None):
638 639 640 641 642 643
        if self.is_header == 1:
            self.index.add(name, module, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
        else:
            self.index.add(name, None, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
644 645 646

    def index_add_ref(self, name, module, static, type, info=None,
                      extra = None):
647 648 649 650 651 652
        if self.is_header == 1:
            self.index.add_ref(name, module, module, static, type,
                               self.lineno(), info, extra, self.conditionals)
        else:
            self.index.add_ref(name, None, module, static, type, self.lineno(),
                               info, extra, self.conditionals)
653 654

    def warning(self, msg):
655 656
        global warnings
        warnings = warnings + 1
657
        if self.no_error:
658 659
            return
        print msg
660 661 662

    def error(self, msg, token=-1):
        if self.no_error:
663
            return
664 665

        print "Parse Error: " + msg
666 667 668 669
        if token != -1:
            print "Got token ", token
        self.lexer.debug()
        sys.exit(1)
670 671 672

    def debug(self, msg, token=-1):
        print "Debug: " + msg
673 674 675
        if token != -1:
            print "Got token ", token
        self.lexer.debug()
676 677

    def parseTopComment(self, comment):
678 679 680 681
        res = {}
        lines = string.split(comment, "\n")
        item = None
        for line in lines:
C
Claudio Bley 已提交
682
            line = line.lstrip().lstrip('*').lstrip()
683 684 685 686 687 688 689

            m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
            if m:
                item = m.group(1)
                line = m.group(2).lstrip()

            if item:
690 691 692 693 694
                if res.has_key(item):
                    res[item] = res[item] + " " + line
                else:
                    res[item] = line
        self.index.info = res
695

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
    def strip_lead_star(self, line):
        l = len(line)
        i = 0
        while i < l:
            if line[i] == ' ' or line[i] == '\t':
                i += 1
            elif line[i] == '*':
                return line[:i] + line[i + 1:]
            else:
                 return line
        return line

    def cleanupComment(self):
        if type(self.comment) != type(""):
            return
        # remove the leading * on multi-line comments
        lines = self.comment.splitlines(True)
        com = ""
        for line in lines:
            com = com + self.strip_lead_star(line)
        self.comment = com.strip()

718
    def parseComment(self, token):
719
        com = token[1]
720
        if self.top_comment == "":
721
            self.top_comment = com
722
        if self.comment is None or com[0] == '*':
723
            self.comment = com
724
        else:
725
            self.comment = self.comment + com
726
        token = self.lexer.token()
727 728

        if string.find(self.comment, "DOC_DISABLE") != -1:
729
            self.stop_error()
730 731

        if string.find(self.comment, "DOC_ENABLE") != -1:
732
            self.start_error()
733

734
        return token
735 736 737 738 739 740

    #
    # Parse a comment block associate to a typedef
    #
    def parseTypeComment(self, name, quiet = 0):
        if name[0:2] == '__':
741
            quiet = 1
742 743

        args = []
744
        desc = ""
745

746
        if self.comment is None:
747 748 749
            if not quiet:
                self.warning("Missing comment for type %s" % (name))
            return((args, desc))
750
        if self.comment[0] != '*':
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
            if not quiet:
                self.warning("Missing * in type comment for %s" % (name))
            return((args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted type comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
            l = string.strip(l)
            desc = desc + " " + l
            del lines[0]

        desc = string.strip(desc)

        if quiet == 0:
            if desc == "":
                self.warning("Type comment for %s lack description of the macro" % (name))

        return(desc)
781 782 783 784
    #
    # Parse a comment block associate to a macro
    #
    def parseMacroComment(self, name, quiet = 0):
785 786
        global ignored_macros

787
        if name[0:2] == '__':
788
            quiet = 1
789 790
        if ignored_macros.has_key(name):
            quiet = 1
791 792

        args = []
793
        desc = ""
794

795
        if self.comment is None:
796 797 798
            if not quiet:
                self.warning("Missing comment for macro %s" % (name))
            return((args, desc))
799
        if self.comment[0] != '*':
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
            if not quiet:
                self.warning("Missing * in macro comment for %s" % (name))
            return((args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted macro comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
                (arg, desc) = string.split(l, ':', 1)
                desc=string.strip(desc)
                arg=string.strip(arg)
820
            except:
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
                if not quiet:
                    self.warning("Misformatted macro comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
            l = string.strip(lines[0])
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
                desc = desc + ' ' + string.strip(l)
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
836
            args.append((arg, desc))
837 838 839 840 841 842 843 844 845 846
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
            l = string.strip(l)
            desc = desc + " " + l
            del lines[0]
847

848
        desc = string.strip(desc)
849

850 851 852
        if quiet == 0:
            if desc == "":
                self.warning("Macro comment for %s lack description of the macro" % (name))
853

854
        return((args, desc))
855 856

     #
857
     # Parse a comment block and merge the information found in the
858 859 860 861
     # parameters descriptions, finally returns a block as complete
     # as possible
     #
    def mergeFunctionComment(self, name, description, quiet = 0):
D
Daniel Veillard 已提交
862 863
        global ignored_functions

864
        if name == 'main':
865
            quiet = 1
866
        if name[0:2] == '__':
867
            quiet = 1
D
Daniel Veillard 已提交
868 869
        if ignored_functions.has_key(name):
            quiet = 1
870

871 872 873
        (ret, args) = description
        desc = ""
        retdesc = ""
874

875
        if self.comment is None:
876 877 878
            if not quiet:
                self.warning("Missing comment for function %s" % (name))
            return(((ret[0], retdesc), args, desc))
879
        if self.comment[0] != '*':
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
            if not quiet:
                self.warning("Missing * in function comment for %s" % (name))
            return(((ret[0], retdesc), args, desc))
        lines = string.split(self.comment, '\n')
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted function comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return(((ret[0], retdesc), args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        nbargs = len(args)
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
                (arg, desc) = string.split(l, ':', 1)
                desc=string.strip(desc)
                arg=string.strip(arg)
901
            except:
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
                if not quiet:
                    self.warning("Misformatted function comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
            l = string.strip(lines[0])
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
                desc = desc + ' ' + string.strip(l)
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
            i = 0
            while i < nbargs:
                if args[i][1] == arg:
                    args[i] = (args[i][0], arg, desc)
921
                    break
922 923 924 925 926 927 928 929 930 931 932 933
                i = i + 1
            if i >= nbargs:
                if not quiet:
                    self.warning("Unable to find arg %s from function comment for %s" % (
                       arg, name))
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = None
        while len(lines) > 0:
            l = lines[0]
            i = 0
            # Remove all leading '*', followed by at most one ' ' character
934
            # since we need to preserve correct indentation of code examples
935 936 937 938 939 940
            while i < len(l) and l[i] == '*':
                i = i + 1
            if i > 0:
                if i < len(l) and l[i] == ' ':
                    i = i + 1
                l = l[i:]
941
            if len(l) >= 6 and l[0:7] == "Returns":
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
                try:
                    l = string.split(l, ' ', 1)[1]
                except:
                    l = ""
                retdesc = string.strip(l)
                del lines[0]
                while len(lines) > 0:
                    l = lines[0]
                    while len(l) > 0 and l[0] == '*':
                        l = l[1:]
                    l = string.strip(l)
                    retdesc = retdesc + " " + l
                    del lines[0]
            else:
                if desc is not None:
                    desc = desc + "\n" + l
                else:
                    desc = l
                del lines[0]

        if desc is None:
            desc = ""
        retdesc = string.strip(retdesc)
        desc = string.strip(desc)

        if quiet == 0:
             #
             # report missing comments
             #
            i = 0
            while i < nbargs:
973
                if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
974 975 976 977 978 979 980 981 982
                    self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
                i = i + 1
            if retdesc == "" and ret[0] != "void":
                self.warning("Function comment for %s lacks description of return value" % (name))
            if desc == "":
                self.warning("Function comment for %s lacks description of the function" % (name))


        return(((ret[0], retdesc), args, desc))
983 984

    def parsePreproc(self, token):
985 986
        if debug:
            print "=> preproc ", token, self.lexer.tokens
987
        name = token[1]
988 989
        if name == "#include":
            token = self.lexer.token()
990
            if token is None:
991 992 993 994 995 996 997 998
                return None
            if token[0] == 'preproc':
                self.index_add(token[1], self.filename, not self.is_header,
                                "include")
                return self.lexer.token()
            return token
        if name == "#define":
            token = self.lexer.token()
999
            if token is None:
1000 1001 1002 1003 1004 1005
                return None
            if token[0] == 'preproc':
                 # TODO macros with arguments
                name = token[1]
                lst = []
                token = self.lexer.token()
1006
                while token is not None and token[0] == 'preproc' and \
1007 1008 1009
                      token[1][0] != '#':
                    lst.append(token[1])
                    token = self.lexer.token()
1010
                try:
1011
                    name = string.split(name, '(') [0]
1012 1013 1014
                except:
                    pass
                info = self.parseMacroComment(name, not self.is_header)
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
                self.index_add(name, self.filename, not self.is_header,
                                "macro", info)
                return token

        #
        # Processing of conditionals modified by Bill 1/1/05
        #
        # We process conditionals (i.e. tokens from #ifdef, #ifndef,
        # #if, #else and #endif) for headers and mainline code,
        # store the ones from the header in libxml2-api.xml, and later
        # (in the routine merge_public) verify that the two (header and
        # mainline code) agree.
        #
        # There is a small problem with processing the headers. Some of
        # the variables are not concerned with enabling / disabling of
        # library functions (e.g. '__XML_PARSER_H__'), and we don't want
        # them to be included in libxml2-api.xml, or involved in
        # the check between the header and the mainline code.  To
        # accomplish this, we ignore any conditional which doesn't include
        # the string 'ENABLED'
        #
        if name == "#ifdef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append("defined(%s)" % apstr)
            except:
                pass
        elif name == "#ifndef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append("!defined(%s)" % apstr)
            except:
                pass
        elif name == "#if":
            apstr = ""
            for tok in self.lexer.tokens:
                if apstr != "":
                    apstr = apstr + " "
                apstr = apstr + tok[1]
            try:
                self.defines.append(apstr)
                if string.find(apstr, 'ENABLED') != -1:
                    self.conditionals.append(apstr)
            except:
                pass
        elif name == "#else":
            if self.conditionals != [] and \
               string.find(self.defines[-1], 'ENABLED') != -1:
                self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
        elif name == "#endif":
            if self.conditionals != [] and \
               string.find(self.defines[-1], 'ENABLED') != -1:
                self.conditionals = self.conditionals[:-1]
            self.defines = self.defines[:-1]
        token = self.lexer.token()
1074
        while token is not None and token[0] == 'preproc' and \
1075 1076 1077
            token[1][0] != '#':
            token = self.lexer.token()
        return token
1078 1079 1080 1081 1082 1083

     #
     # token acquisition on top of the lexer, it handle internally
     # preprocessor and comments since they are logically not part of
     # the program structure.
     #
1084 1085 1086
    def push(self, tok):
        self.lexer.push(tok)

1087 1088 1089 1090
    def token(self):
        global ignored_words

        token = self.lexer.token()
1091
        while token is not None:
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
            if token[0] == 'comment':
                token = self.parseComment(token)
                continue
            elif token[0] == 'preproc':
                token = self.parsePreproc(token)
                continue
            elif token[0] == "name" and token[1] == "__const":
                token = ("name", "const")
                return token
            elif token[0] == "name" and token[1] == "__attribute":
                token = self.lexer.token()
1103
                while token is not None and token[1] != ";":
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
                    token = self.lexer.token()
                return token
            elif token[0] == "name" and ignored_words.has_key(token[1]):
                (n, info) = ignored_words[token[1]]
                i = 0
                while i < n:
                    token = self.lexer.token()
                    i = i + 1
                token = self.lexer.token()
                continue
            else:
                if debug:
                    print "=> ", token
                return token
        return None
1119 1120 1121 1122 1123

     #
     # Parse a typedef, it records the type and its name.
     #
    def parseTypedef(self, token):
1124
        if token is None:
1125 1126
            return None
        token = self.parseType(token)
1127
        if token is None:
1128 1129 1130 1131 1132
            self.error("parsing typedef")
            return None
        base_type = self.type
        type = base_type
         #self.debug("end typedef type", token)
1133
        while token is not None:
1134 1135 1136
            if token[0] == "name":
                name = token[1]
                signature = self.signature
1137
                if signature is not None:
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
                    type = string.split(type, '(')[0]
                    d = self.mergeFunctionComment(name,
                            ((type, None), signature), 1)
                    self.index_add(name, self.filename, not self.is_header,
                                    "functype", d)
                else:
                    if base_type == "struct":
                        self.index_add(name, self.filename, not self.is_header,
                                        "struct", type)
                        base_type = "struct " + name
                    else:
                        # TODO report missing or misformatted comments
                        info = self.parseTypeComment(name, 1)
                        self.index_add(name, self.filename, not self.is_header,
                                    "typedef", type, info)
                token = self.token()
            else:
                self.error("parsing typedef: expecting a name")
                return token
             #self.debug("end typedef", token)
1158
            if token is not None and token[0] == 'sep' and token[1] == ',':
1159 1160
                type = base_type
                token = self.token()
1161
                while token is not None and token[0] == "op":
1162 1163
                    type = type + token[1]
                    token = self.token()
1164
            elif token is not None and token[0] == 'sep' and token[1] == ';':
1165
                break
1166
            elif token is not None and token[0] == 'name':
1167
                type = base_type
1168
                continue
1169 1170 1171 1172 1173
            else:
                self.error("parsing typedef: expecting ';'", token)
                return token
        token = self.token()
        return token
1174

1175 1176 1177 1178 1179
     #
     # Parse a C code block, used for functions it parse till
     # the balancing } included
     #
    def parseBlock(self, token):
1180
        while token is not None:
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.comment = None
                token = self.token()
                return token
            else:
                if self.collect_ref == 1:
                    oldtok = token
                    token = self.token()
                    if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
                        if token[0] == "sep" and token[1] == "(":
                            self.index_add_ref(oldtok[1], self.filename,
                                                0, "function")
                            token = self.token()
                        elif token[0] == "name":
                            token = self.token()
                            if token[0] == "sep" and (token[1] == ";" or
                               token[1] == "," or token[1] == "="):
                                self.index_add_ref(oldtok[1], self.filename,
                                                    0, "type")
                    elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")
                    elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")

                else:
                    token = self.token()
        return token
1213 1214 1215 1216 1217 1218

     #
     # Parse a C struct definition till the balancing }
     #
    def parseStruct(self, token):
        fields = []
1219
         #self.debug("start parseStruct", token)
1220
        while token is not None:
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.struct_fields = fields
                 #self.debug("end parseStruct", token)
                 #print fields
                token = self.token()
                return token
            else:
                base_type = self.type
                 #self.debug("before parseType", token)
                token = self.parseType(token)
                 #self.debug("after parseType", token)
1235
                if token is not None and token[0] == "name":
1236 1237 1238 1239 1240
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
1241 1242 1243 1244 1245 1246 1247
                        self.cleanupComment()
                        if self.type == "union":
                            fields.append((self.type, fname, self.comment,
                                           self.union_fields))
                            self.union_fields = []
                        else:
                            fields.append((self.type, fname, self.comment))
1248 1249 1250
                        self.comment = None
                    else:
                        self.error("parseStruct: expecting ;", token)
1251
                elif token is not None and token[0] == "sep" and token[1] == "{":
1252 1253
                    token = self.token()
                    token = self.parseTypeBlock(token)
1254
                    if token is not None and token[0] == "name":
1255
                        token = self.token()
1256
                    if token is not None and token[0] == "sep" and token[1] == ";":
1257 1258 1259 1260 1261 1262
                        token = self.token()
                    else:
                        self.error("parseStruct: expecting ;", token)
                else:
                    self.error("parseStruct: name", token)
                    token = self.token()
1263
                self.type = base_type
1264
        self.struct_fields = fields
1265 1266 1267
         #self.debug("end parseStruct", token)
         #print fields
        return token
1268

1269 1270 1271 1272 1273 1274
     #
     # Parse a C union definition till the balancing }
     #
    def parseUnion(self, token):
        fields = []
        # self.debug("start parseUnion", token)
1275
        while token is not None:
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.union_fields = fields
                # self.debug("end parseUnion", token)
                # print fields
                token = self.token()
                return token
            else:
                base_type = self.type
                # self.debug("before parseType", token)
                token = self.parseType(token)
                # self.debug("after parseType", token)
1290
                if token is not None and token[0] == "name":
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
                        self.cleanupComment()
                        fields.append((self.type, fname, self.comment))
                        self.comment = None
                    else:
                        self.error("parseUnion: expecting ;", token)
1301
                elif token is not None and token[0] == "sep" and token[1] == "{":
1302 1303
                    token = self.token()
                    token = self.parseTypeBlock(token)
1304
                    if token is not None and token[0] == "name":
1305
                        token = self.token()
1306
                    if token is not None and token[0] == "sep" and token[1] == ";":
1307 1308 1309 1310 1311 1312
                        token = self.token()
                    else:
                        self.error("parseUnion: expecting ;", token)
                else:
                    self.error("parseUnion: name", token)
                    token = self.token()
1313
                self.type = base_type
1314 1315 1316 1317 1318
        self.union_fields = fields
        # self.debug("end parseUnion", token)
        # print fields
        return token

1319 1320 1321 1322 1323
     #
     # Parse a C enum block, parse till the balancing }
     #
    def parseEnumBlock(self, token):
        self.enums = []
1324 1325 1326
        name = None
        self.comment = None
        comment = ""
E
Eric Blake 已提交
1327
        value = "-1"
1328
        while token is not None:
1329 1330 1331 1332
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
1333
                if name is not None:
1334
                    self.cleanupComment()
1335
                    if self.comment is not None:
1336 1337 1338 1339 1340 1341
                        comment = self.comment
                        self.comment = None
                    self.enums.append((name, value, comment))
                token = self.token()
                return token
            elif token[0] == "name":
1342
                    self.cleanupComment()
1343 1344
                    if name is not None:
                        if self.comment is not None:
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
                            comment = string.strip(self.comment)
                            self.comment = None
                        self.enums.append((name, value, comment))
                    name = token[1]
                    comment = ""
                    token = self.token()
                    if token[0] == "op" and token[1][0] == "=":
                        value = ""
                        if len(token[1]) > 1:
                            value = token[1][1:]
                        token = self.token()
                        while token[0] != "sep" or (token[1] != ',' and
                              token[1] != '}'):
                            value = value + token[1]
                            token = self.token()
                    else:
                        try:
                            value = "%d" % (int(value) + 1)
                        except:
                            self.warning("Failed to compute value of enum %s" % (name))
                            value=""
                    if token[0] == "sep" and token[1] == ",":
                        token = self.token()
            else:
                token = self.token()
        return token
1371

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
    def parseVirEnumDecl(self, token):
        if token[0] != "name":
            self.error("parsing VIR_ENUM_DECL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

    def parseVirEnumImpl(self, token):
        # First the type name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
        token = self.token()

        # Now the sentinel name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        token = self.token()

        # Now a list of strings (optional comments)
        while token is not None:
            isGettext = False
            # First a string, optionally with N_(...)
            if token[0] == 'name':
                if token[1] != 'N_':
                    self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
                token = self.token()
                if token[0] != "sep" or token[1] != '(':
                    self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
                token = self.token()
                isGettext = True

                if token[0] != "string":
                    self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
                token = self.token()
            elif token[0] == "string":
                token = self.token()
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting a string", token)

            # Then a separator
            if token[0] == "sep":
                if isGettext and token[1] == ')':
                    token = self.token()

                if token[1] == ',':
                    token = self.token()

                if token[1] == ')':
                    token = self.token()
                    break

            # Then an optional comment
            if token[0] == "comment":
                token = self.token()


        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
    def parseVirLogInit(self, token):
        if token[0] != "string":
            self.error("parsing VIR_LOG_INIT: expecting string", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1479
     #
1480
     # Parse a C definition block, used for structs or unions it parse till
1481 1482 1483
     # the balancing }
     #
    def parseTypeBlock(self, token):
1484
        while token is not None:
1485 1486 1487 1488 1489 1490 1491 1492 1493
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                token = self.token()
                return token
            else:
                token = self.token()
        return token
1494 1495 1496 1497 1498 1499 1500 1501

     #
     # Parse a type: the fact that the type name can either occur after
     #    the definition or within the definition makes it a little harder
     #    if inside, the name token is pushed back before returning
     #
    def parseType(self, token):
        self.type = ""
1502
        self.struct_fields = []
1503
        self.union_fields = []
1504
        self.signature = None
1505
        if token is None:
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
            return token

        while token[0] == "name" and (
              token[1] == "const" or \
              token[1] == "unsigned" or \
              token[1] == "signed"):
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
1517

1518
        if token[0] == "name" and token[1] == "long":
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]

            # some read ahead for long long
            oldtmp = token
            token = self.token()
            if token[0] == "name" and token[1] == "long":
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp

1533 1534
            oldtmp = token
            token = self.token()
1535
            if token[0] == "name" and token[1] == "int":
1536 1537 1538 1539
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp
1540 1541

        elif token[0] == "name" and token[1] == "short":
1542 1543 1544 1545
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
1546

1547
        elif token[0] == "name" and token[1] == "struct":
1548 1549 1550 1551 1552 1553 1554 1555 1556
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1557
            if token is not None and token[0] == "sep" and token[1] == "{":
1558 1559
                token = self.token()
                token = self.parseStruct(token)
1560
            elif token is not None and token[0] == "op" and token[1] == "*":
1561 1562
                self.type = self.type + " " + nametok[1] + " *"
                token = self.token()
1563
                while token is not None and token[0] == "op" and token[1] == "*":
1564 1565 1566 1567 1568 1569 1570 1571
                    self.type = self.type + " *"
                    token = self.token()
                if token[0] == "name":
                    nametok = token
                    token = self.token()
                else:
                    self.error("struct : expecting name", token)
                    return token
1572
            elif token is not None and token[0] == "name" and nametok is not None:
1573 1574 1575
                self.type = self.type + " " + nametok[1]
                return token

1576
            if nametok is not None:
1577 1578 1579
                self.lexer.push(token)
                token = nametok
            return token
1580

1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
        elif token[0] == "name" and token[1] == "union":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1591
            if token is not None and token[0] == "sep" and token[1] == "{":
1592 1593
                token = self.token()
                token = self.parseUnion(token)
1594
            elif token is not None and token[0] == "name" and nametok is not None:
1595 1596 1597
                self.type = self.type + " " + nametok[1]
                return token

1598
            if nametok is not None:
1599 1600 1601 1602
                self.lexer.push(token)
                token = nametok
            return token

1603
        elif token[0] == "name" and token[1] == "enum":
1604 1605 1606 1607 1608 1609
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            self.enums = []
            token = self.token()
1610
            if token is not None and token[0] == "sep" and token[1] == "{":
1611 1612 1613 1614 1615
                token = self.token()
                token = self.parseEnumBlock(token)
            else:
                self.error("parsing enum: expecting '{'", token)
            enum_type = None
1616
            if token is not None and token[0] != "name":
1617 1618 1619 1620 1621 1622 1623 1624 1625
                self.lexer.push(token)
                token = ("name", "enum")
            else:
                enum_type = token[1]
            for enum in self.enums:
                self.index_add(enum[0], self.filename,
                               not self.is_header, "enum",
                               (enum[1], enum[2], enum_type))
            return token
1626 1627
        elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
            token = self.token()
1628
            if token is not None and token[0] == "sep" and token[1] == "(":
1629 1630 1631 1632
                token = self.token()
                token = self.parseVirEnumDecl(token)
            else:
                self.error("parsing VIR_ENUM_DECL: expecting '('", token)
1633
            if token is not None:
1634 1635 1636 1637 1638 1639
                self.lexer.push(token)
                token = ("name", "virenumdecl")
            return token

        elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
            token = self.token()
1640
            if token is not None and token[0] == "sep" and token[1] == "(":
1641 1642 1643 1644
                token = self.token()
                token = self.parseVirEnumImpl(token)
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
1645
            if token is not None:
1646 1647 1648
                self.lexer.push(token)
                token = ("name", "virenumimpl")
            return token
1649

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
        elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
            token = self.token()
            if token is not None and token[0] == "sep" and token[1] == "(":
                token = self.token()
                token = self.parseVirLogInit(token)
            else:
                self.error("parsing VIR_LOG_INIT: expecting '('", token)
            if token is not None:
                self.lexer.push(token)
                token = ("name", "virloginit")
            return token

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
        elif token[0] == "name":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
        else:
            self.error("parsing type %s: expecting a name" % (self.type),
                       token)
            return token
        token = self.token()
1672
        while token is not None and (token[0] == "op" or
1673 1674 1675
              token[0] == "name" and token[1] == "const"):
            self.type = self.type + " " + token[1]
            token = self.token()
1676 1677

         #
1678 1679
         # if there is a parenthesis here, this means a function type
         #
1680
        if token is not None and token[0] == "sep" and token[1] == '(':
1681 1682
            self.type = self.type + token[1]
            token = self.token()
1683
            while token is not None and token[0] == "op" and token[1] == '*':
1684 1685
                self.type = self.type + token[1]
                token = self.token()
1686
            if token is None or token[0] != "name" :
1687
                self.error("parsing function type, name expected", token)
1688 1689 1690 1691
                return token
            self.type = self.type + token[1]
            nametok = token
            token = self.token()
1692
            if token is not None and token[0] == "sep" and token[1] == ')':
1693 1694
                self.type = self.type + token[1]
                token = self.token()
1695
                if token is not None and token[0] == "sep" and token[1] == '(':
1696
                    token = self.token()
1697 1698 1699
                    type = self.type
                    token = self.parseSignature(token)
                    self.type = type
1700
                else:
1701
                    self.error("parsing function type, '(' expected", token)
1702 1703
                    return token
            else:
1704
                self.error("parsing function type, ')' expected", token)
1705 1706 1707 1708 1709 1710 1711 1712
                return token
            self.lexer.push(token)
            token = nametok
            return token

         #
         # do some lookahead for arrays
         #
1713
        if token is not None and token[0] == "name":
1714 1715
            nametok = token
            token = self.token()
1716
            if token is not None and token[0] == "sep" and token[1] == '[':
1717
                self.type = self.type + " " + nametok[1]
1718
                while token is not None and token[0] == "sep" and token[1] == '[':
1719 1720
                    self.type = self.type + token[1]
                    token = self.token()
1721
                    while token is not None and token[0] != 'sep' and \
1722 1723 1724
                          token[1] != ']' and token[1] != ';':
                        self.type = self.type + token[1]
                        token = self.token()
1725
                if token is not None and token[0] == 'sep' and token[1] == ']':
1726 1727 1728
                    self.type = self.type + token[1]
                    token = self.token()
                else:
1729
                    self.error("parsing array type, ']' expected", token)
1730
                    return token
1731
            elif token is not None and token[0] == "sep" and token[1] == ':':
1732 1733 1734 1735 1736 1737 1738
                 # remove :12 in case it's a limited int size
                token = self.token()
                token = self.token()
            self.lexer.push(token)
            token = nametok

        return token
1739 1740 1741 1742 1743 1744

     #
     # Parse a signature: '(' has been parsed and we scan the type definition
     #    up to the ')' included
    def parseSignature(self, token):
        signature = []
1745
        if token is not None and token[0] == "sep" and token[1] == ')':
1746 1747 1748
            self.signature = []
            token = self.token()
            return token
1749
        while token is not None:
1750
            token = self.parseType(token)
1751
            if token is not None and token[0] == "name":
1752 1753
                signature.append((self.type, token[1], None))
                token = self.token()
1754
            elif token is not None and token[0] == "sep" and token[1] == ',':
1755 1756
                token = self.token()
                continue
1757
            elif token is not None and token[0] == "sep" and token[1] == ')':
1758 1759 1760 1761 1762
                 # only the type was provided
                if self.type == "...":
                    signature.append((self.type, "...", None))
                else:
                    signature.append((self.type, None, None))
1763
            if token is not None and token[0] == "sep":
1764 1765 1766 1767 1768 1769 1770 1771
                if token[1] == ',':
                    token = self.token()
                    continue
                elif token[1] == ')':
                    token = self.token()
                    break
        self.signature = signature
        return token
1772

1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
    # this dict contains the functions that are allowed to use [unsigned]
    # long for legacy reasons in their signature and return type. this list is
    # fixed. new procedures and public APIs have to use [unsigned] long long
    long_legacy_functions = \
      { "virGetVersion"                  : (False, ("libVer", "typeVer")),
        "virConnectGetLibVersion"        : (False, ("libVer")),
        "virConnectGetVersion"           : (False, ("hvVer")),
        "virDomainGetMaxMemory"          : (True,  ()),
        "virDomainMigrate"               : (False, ("flags", "bandwidth")),
        "virDomainMigrate2"              : (False, ("flags", "bandwidth")),
        "virDomainMigrateBegin3"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateConfirm3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateDirect"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateFinish"         : (False, ("flags")),
        "virDomainMigrateFinish2"        : (False, ("flags")),
        "virDomainMigrateFinish3"        : (False, ("flags")),
        "virDomainMigratePeer2Peer"      : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare2"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel"  : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel3" : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI"          : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI2"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion1"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion2"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateSetMaxSpeed"    : (False, ("bandwidth")),
        "virDomainSetMaxMemory"          : (False, ("memory")),
        "virDomainSetMemory"             : (False, ("memory")),
1805
        "virDomainSetMemoryFlags"        : (False, ("memory")),
E
Eric Blake 已提交
1806
        "virDomainBlockCommit"           : (False, ("bandwidth")),
1807
        "virDomainBlockJobSetSpeed"      : (False, ("bandwidth")),
1808
        "virDomainBlockPull"             : (False, ("bandwidth")),
1809
        "virDomainBlockRebase"           : (False, ("bandwidth")),
1810
        "virDomainMigrateGetMaxSpeed"    : (False, ("bandwidth")) }
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835

    def checkLongLegacyFunction(self, name, return_type, signature):
        if "long" in return_type and "long long" not in return_type:
            try:
                if not CParser.long_legacy_functions[name][0]:
                    raise Exception()
            except:
                self.error(("function '%s' is not allowed to return long, "
                            "use long long instead") % (name))

        for param in signature:
            if "long" in param[0] and "long long" not in param[0]:
                try:
                    if param[1] not in CParser.long_legacy_functions[name][1]:
                        raise Exception()
                except:
                    self.error(("function '%s' is not allowed to take long "
                                "parameter '%s', use long long instead")
                               % (name, param[1]))

    # this dict contains the structs that are allowed to use [unsigned]
    # long for legacy reasons. this list is fixed. new structs have to use
    # [unsigned] long long
    long_legacy_struct_fields = \
      { "_virDomainInfo"                 : ("maxMem", "memory"),
1836 1837
        "_virNodeInfo"                   : ("memory"),
        "_virDomainBlockJobInfo"         : ("bandwidth") }
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849

    def checkLongLegacyStruct(self, name, fields):
        for field in fields:
            if "long" in field[0] and "long long" not in field[0]:
                try:
                    if field[1] not in CParser.long_legacy_struct_fields[name]:
                        raise Exception()
                except:
                    self.error(("struct '%s' is not allowed to contain long "
                                "field '%s', use long long instead") \
                               % (name, field[1]))

1850 1851 1852 1853 1854 1855 1856
     #
     # Parse a global definition, be it a type, variable or function
     # the extern "C" blocks are a bit nasty and require it to recurse.
     #
    def parseGlobal(self, token):
        static = 0
        if token[1] == 'extern':
1857
            token = self.token()
1858
            if token is None:
1859 1860 1861 1862
                return token
            if token[0] == 'string':
                if token[1] == 'C':
                    token = self.token()
1863
                    if token is None:
1864 1865 1866 1867
                        return token
                    if token[0] == 'sep' and token[1] == "{":
                        token = self.token()
#                        print 'Entering extern "C line ', self.lineno()
1868
                        while token is not None and (token[0] != 'sep' or
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
                              token[1] != "}"):
                            if token[0] == 'name':
                                token = self.parseGlobal(token)
                            else:
                                self.error(
                                 "token %s %s unexpected at the top level" % (
                                        token[0], token[1]))
                                token = self.parseGlobal(token)
#                        print 'Exiting extern "C" line', self.lineno()
                        token = self.token()
                        return token
                else:
                    return token
        elif token[1] == 'static':
            static = 1
            token = self.token()
1885
            if token is None or  token[0] != 'name':
1886 1887 1888 1889 1890 1891 1892 1893
                return token

        if token[1] == 'typedef':
            token = self.token()
            return self.parseTypedef(token)
        else:
            token = self.parseType(token)
            type_orig = self.type
1894
        if token is None or token[0] != "name":
1895 1896 1897 1898
            return token
        type = type_orig
        self.name = token[1]
        token = self.token()
1899
        while token is not None and (token[0] == "sep" or token[0] == "op"):
1900 1901 1902 1903
            if token[0] == "sep":
                if token[1] == "[":
                    type = type + token[1]
                    token = self.token()
1904
                    while token is not None and (token[0] != "sep" or \
1905 1906 1907 1908
                          token[1] != ";"):
                        type = type + token[1]
                        token = self.token()

1909
            if token is not None and token[0] == "op" and token[1] == "=":
1910 1911 1912 1913 1914 1915 1916 1917 1918
                 #
                 # Skip the initialization of the variable
                 #
                token = self.token()
                if token[0] == 'sep' and token[1] == '{':
                    token = self.token()
                    token = self.parseBlock(token)
                else:
                    self.comment = None
1919
                    while token is not None and (token[0] != "sep" or \
1920 1921 1922
                          (token[1] != ';' and token[1] != ',')):
                            token = self.token()
                self.comment = None
1923
                if token is None or token[0] != "sep" or (token[1] != ';' and
1924 1925 1926
                   token[1] != ','):
                    self.error("missing ';' or ',' after value")

1927
            if token is not None and token[0] == "sep":
1928 1929 1930 1931
                if token[1] == ";":
                    self.comment = None
                    token = self.token()
                    if type == "struct":
1932
                        self.checkLongLegacyStruct(self.name, self.struct_fields)
1933 1934 1935 1936 1937 1938 1939 1940 1941
                        self.index_add(self.name, self.filename,
                             not self.is_header, "struct", self.struct_fields)
                    else:
                        self.index_add(self.name, self.filename,
                             not self.is_header, "variable", type)
                    break
                elif token[1] == "(":
                    token = self.token()
                    token = self.parseSignature(token)
1942
                    if token is None:
1943 1944
                        return None
                    if token[0] == "sep" and token[1] == ";":
1945
                        self.checkLongLegacyFunction(self.name, type, self.signature)
1946 1947 1948 1949 1950 1951
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), 1)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
                    elif token[0] == "sep" and token[1] == "{":
1952
                        self.checkLongLegacyFunction(self.name, type, self.signature)
1953 1954 1955 1956 1957
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), static)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
1958
                        token = self.parseBlock(token)
1959 1960 1961 1962 1963 1964
                elif token[1] == ',':
                    self.comment = None
                    self.index_add(self.name, self.filename, static,
                                    "variable", type)
                    type = type_orig
                    token = self.token()
1965
                    while token is not None and token[0] == "sep":
1966 1967
                        type = type + token[1]
                        token = self.token()
1968
                    if token is not None and token[0] == "name":
1969 1970 1971 1972 1973 1974
                        self.name = token[1]
                        token = self.token()
                else:
                    break

        return token
1975 1976

    def parse(self):
1977 1978
        if not quiet:
            print "Parsing %s" % (self.filename)
1979
        token = self.token()
1980
        while token is not None:
1981
            if token[0] == 'name':
1982
                token = self.parseGlobal(token)
1983
            else:
1984 1985 1986 1987 1988
                self.error("token %s %s unexpected at the top level" % (
                       token[0], token[1]))
                token = self.parseGlobal(token)
                return
        self.parseTopComment(self.top_comment)
1989
        return self.index
1990

1991 1992 1993

class docBuilder:
    """A documentation builder"""
J
Jiri Denemark 已提交
1994
    def __init__(self, name, path='.', directories=['.'], includes=[]):
1995
        self.name = name
J
Jiri Denemark 已提交
1996
        self.path = path
1997
        self.directories = directories
1998 1999 2000 2001
        if name == "libvirt":
            self.includes = includes + included_files.keys()
        elif name == "libvirt-qemu":
            self.includes = includes + qemu_included_files.keys()
2002 2003
        elif name == "libvirt-lxc":
            self.includes = includes + lxc_included_files.keys()
2004 2005 2006
        self.modules = {}
        self.headers = {}
        self.idx = index()
2007
        self.xref = {}
2008 2009
        self.index = {}
        self.basename = name
2010
        self.errors = 0
2011

2012 2013 2014 2015 2016
    def warning(self, msg):
        global warnings
        warnings = warnings + 1
        print msg

2017 2018 2019 2020
    def error(self, msg):
        self.errors += 1
        print >>sys.stderr, "Error:", msg

2021
    def indexString(self, id, str):
2022
        if str is None:
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
            return
        str = string.replace(str, "'", ' ')
        str = string.replace(str, '"', ' ')
        str = string.replace(str, "/", ' ')
        str = string.replace(str, '*', ' ')
        str = string.replace(str, "[", ' ')
        str = string.replace(str, "]", ' ')
        str = string.replace(str, "(", ' ')
        str = string.replace(str, ")", ' ')
        str = string.replace(str, "<", ' ')
        str = string.replace(str, '>', ' ')
        str = string.replace(str, "&", ' ')
        str = string.replace(str, '#', ' ')
        str = string.replace(str, ",", ' ')
        str = string.replace(str, '.', ' ')
        str = string.replace(str, ';', ' ')
        tokens = string.split(str)
        for token in tokens:
            try:
                c = token[0]
                if string.find(string.letters, c) < 0:
                    pass
                elif len(token) < 3:
                    pass
                else:
                    lower = string.lower(token)
                    # TODO: generalize this a bit
                    if lower == 'and' or lower == 'the':
                        pass
                    elif self.xref.has_key(token):
                        self.xref[token].append(id)
                    else:
                        self.xref[token] = [id]
            except:
                pass
2058 2059

    def analyze(self):
2060 2061
        if not quiet:
            print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
2062
        self.idx.analyze()
2063 2064

    def scanHeaders(self):
2065 2066 2067
        for header in self.headers.keys():
            parser = CParser(header)
            idx = parser.parse()
2068
            self.headers[header] = idx
2069
            self.idx.merge(idx)
2070 2071

    def scanModules(self):
2072 2073 2074 2075 2076 2077
        for module in self.modules.keys():
            parser = CParser(module)
            idx = parser.parse()
            # idx.analyze()
            self.modules[module] = idx
            self.idx.merge_public(idx)
2078 2079 2080

    def scan(self):
        for directory in self.directories:
2081 2082 2083 2084 2085
            files = glob.glob(directory + "/*.c")
            for file in files:
                skip = 1
                for incl in self.includes:
                    if string.find(file, incl) != -1:
2086
                        skip = 0
2087 2088
                        break
                if skip == 0:
2089
                    self.modules[file] = None
2090 2091 2092 2093 2094
            files = glob.glob(directory + "/*.h")
            for file in files:
                skip = 1
                for incl in self.includes:
                    if string.find(file, incl) != -1:
2095
                        skip = 0
2096 2097
                        break
                if skip == 0:
2098
                    self.headers[file] = None
2099 2100
        self.scanHeaders()
        self.scanModules()
2101

2102 2103
    def modulename_file(self, file):
        module = os.path.basename(file)
2104 2105 2106 2107 2108
        if module[-2:] == '.h':
            module = module[:-2]
        elif module[-2:] == '.c':
            module = module[:-2]
        return module
2109 2110 2111 2112

    def serialize_enum(self, output, name):
        id = self.idx.enums[name]
        output.write("    <enum name='%s' file='%s'" % (name,
2113
                     self.modulename_file(id.header)))
2114
        if id.info is not None:
2115
            info = id.info
2116
            if info[0] is not None and info[0] != '':
2117 2118 2119 2120
                try:
                    val = eval(info[0])
                except:
                    val = info[0]
2121
                output.write(" value='%s'" % (val))
2122
            if info[2] is not None and info[2] != '':
2123
                output.write(" type='%s'" % info[2])
2124
            if info[1] is not None and info[1] != '':
2125
                output.write(" info='%s'" % escape(info[1]))
2126 2127 2128 2129 2130
        output.write("/>\n")

    def serialize_macro(self, output, name):
        id = self.idx.macros[name]
        output.write("    <macro name='%s' file='%s'>\n" % (name,
2131
                     self.modulename_file(id.header)))
2132
        if id.info is not None:
2133
            try:
2134
                (args, desc) = id.info
2135
                if desc is not None and desc != "":
2136 2137 2138 2139
                    output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
                    self.indexString(name, desc)
                for arg in args:
                    (name, desc) = arg
2140
                    if desc is not None and desc != "":
2141 2142 2143 2144 2145
                        output.write("      <arg name='%s' info='%s'/>\n" % (
                                     name, escape(desc)))
                        self.indexString(name, desc)
                    else:
                        output.write("      <arg name='%s'/>\n" % (name))
2146 2147 2148 2149
            except:
                pass
        output.write("    </macro>\n")

2150 2151 2152 2153 2154
    def serialize_union(self, output, field, desc):
        output.write("      <field name='%s' type='union' info='%s'>\n" % (field[1] , desc))
        output.write("        <union>\n")
        for f in field[3]:
            desc = f[2]
2155
            if desc is None:
2156 2157 2158 2159 2160 2161 2162 2163
                desc = ''
            else:
                desc = escape(desc)
            output.write("          <field name='%s' type='%s' info='%s'/>\n" % (f[1] , f[0], desc))

        output.write("        </union>\n")
        output.write("      </field>\n")

2164 2165
    def serialize_typedef(self, output, name):
        id = self.idx.typedefs[name]
2166 2167 2168 2169 2170 2171 2172
        if id.info[0:7] == 'struct ':
            output.write("    <struct name='%s' file='%s' type='%s'" % (
                     name, self.modulename_file(id.header), id.info))
            name = id.info[7:]
            if self.idx.structs.has_key(name) and ( \
               type(self.idx.structs[name].info) == type(()) or
                type(self.idx.structs[name].info) == type([])):
2173
                output.write(">\n")
2174 2175 2176 2177
                try:
                    for field in self.idx.structs[name].info:
                        desc = field[2]
                        self.indexString(name, desc)
2178
                        if desc is None:
2179 2180 2181
                            desc = ''
                        else:
                            desc = escape(desc)
2182 2183 2184 2185
                        if field[0] == "union":
                            self.serialize_union(output, field, desc)
                        else:
                            output.write("      <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
2186
                except:
2187
                    self.warning("Failed to serialize struct %s" % (name))
2188 2189
                output.write("    </struct>\n")
            else:
2190
                output.write("/>\n")
2191 2192 2193
        else :
            output.write("    <typedef name='%s' file='%s' type='%s'" % (
                         name, self.modulename_file(id.header), id.info))
2194
            try:
2195
                desc = id.extra
2196
                if desc is not None and desc != "":
2197 2198 2199 2200 2201 2202
                    output.write(">\n      <info><![CDATA[%s]]></info>\n" % (desc))
                    output.write("    </typedef>\n")
                else:
                    output.write("/>\n")
            except:
                output.write("/>\n")
2203 2204 2205

    def serialize_variable(self, output, name):
        id = self.idx.variables[name]
2206
        if id.info is not None:
2207 2208 2209 2210 2211
            output.write("    <variable name='%s' file='%s' type='%s'/>\n" % (
                    name, self.modulename_file(id.header), id.info))
        else:
            output.write("    <variable name='%s' file='%s'/>\n" % (
                    name, self.modulename_file(id.header)))
2212

2213 2214
    def serialize_function(self, output, name):
        id = self.idx.functions[name]
2215
        if name == debugsym and not quiet:
2216
            print "=>", id
2217 2218

        output.write("    <%s name='%s' file='%s' module='%s'>\n" % (id.type,
2219 2220 2221 2222 2223
                     name, self.modulename_file(id.header),
                     self.modulename_file(id.module)))
        #
        # Processing of conditionals modified by Bill 1/1/05
        #
2224
        if id.conditionals is not None:
2225 2226 2227 2228 2229
            apstr = ""
            for cond in id.conditionals:
                if apstr != "":
                    apstr = apstr + " &amp;&amp; "
                apstr = apstr + cond
2230
            output.write("      <cond>%s</cond>\n"% (apstr))
2231 2232 2233 2234
        try:
            (ret, params, desc) = id.info
            output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
            self.indexString(name, desc)
2235
            if ret[0] is not None:
2236 2237
                if ret[0] == "void":
                    output.write("      <return type='void'/>\n")
2238
                elif (ret[1] is None or ret[1] == '') and not ignored_functions.has_key(name):
2239
                    self.error("Missing documentation for return of function `%s'" % name)
2240 2241 2242 2243 2244 2245 2246
                else:
                    output.write("      <return type='%s' info='%s'/>\n" % (
                             ret[0], escape(ret[1])))
                    self.indexString(name, ret[1])
            for param in params:
                if param[0] == 'void':
                    continue
2247
                if (param[2] is None or param[2] == ''):
2248 2249 2250 2251
                    if ignored_functions.has_key(name):
                        output.write("      <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
                    else:
                        self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
2252 2253 2254 2255
                else:
                    output.write("      <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
                    self.indexString(name, param[2])
        except:
2256 2257
            print >>sys.stderr, "Exception:", sys.exc_info()[1]
            self.warning("Failed to save function %s info: %s" % (name, `id.info`))
2258 2259 2260 2261
        output.write("    </%s>\n" % (id.type))

    def serialize_exports(self, output, file):
        module = self.modulename_file(file)
2262 2263
        output.write("    <file name='%s'>\n" % (module))
        dict = self.headers[file]
2264
        if dict.info is not None:
2265 2266 2267 2268 2269 2270 2271
            for data in ('Summary', 'Description', 'Author'):
                try:
                    output.write("     <%s>%s</%s>\n" % (
                                 string.lower(data),
                                 escape(dict.info[data]),
                                 string.lower(data)))
                except:
2272
                    self.warning("Header %s lacks a %s description" % (module, data))
2273 2274 2275 2276
            if dict.info.has_key('Description'):
                desc = dict.info['Description']
                if string.find(desc, "DEPRECATED") != -1:
                    output.write("     <deprecated/>\n")
2277 2278

        ids = dict.macros.keys()
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
        ids.sort()
        for id in uniq(ids):
            # Macros are sometime used to masquerade other types.
            if dict.functions.has_key(id):
                continue
            if dict.variables.has_key(id):
                continue
            if dict.typedefs.has_key(id):
                continue
            if dict.structs.has_key(id):
                continue
2290 2291
            if dict.unions.has_key(id):
                continue
2292 2293 2294
            if dict.enums.has_key(id):
                continue
            output.write("     <exports symbol='%s' type='macro'/>\n" % (id))
2295
        ids = dict.enums.keys()
2296 2297 2298
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='enum'/>\n" % (id))
2299
        ids = dict.typedefs.keys()
2300 2301 2302
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='typedef'/>\n" % (id))
2303
        ids = dict.structs.keys()
2304 2305 2306
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='struct'/>\n" % (id))
2307
        ids = dict.variables.keys()
2308 2309 2310
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='variable'/>\n" % (id))
2311
        ids = dict.functions.keys()
2312 2313 2314 2315
        ids.sort()
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='function'/>\n" % (id))
        output.write("    </file>\n")
2316 2317 2318 2319 2320

    def serialize_xrefs_files(self, output):
        headers = self.headers.keys()
        headers.sort()
        for file in headers:
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
            module = self.modulename_file(file)
            output.write("    <file name='%s'>\n" % (module))
            dict = self.headers[file]
            ids = uniq(dict.functions.keys() + dict.variables.keys() + \
                  dict.macros.keys() + dict.typedefs.keys() + \
                  dict.structs.keys() + dict.enums.keys())
            ids.sort()
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </file>\n")
2331 2332 2333 2334
        pass

    def serialize_xrefs_functions(self, output):
        funcs = {}
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                for param in params:
                    if param[0] == 'void':
                        continue
                    if funcs.has_key(param[0]):
                        funcs[param[0]].append(name)
                    else:
                        funcs[param[0]] = [name]
            except:
                pass
        typ = funcs.keys()
        typ.sort()
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
            ids = funcs[type]
            ids.sort()
            pid = ''    # not sure why we have dups, but get rid of them!
            for id in ids:
                if id != pid:
                    output.write("      <ref name='%s'/>\n" % (id))
                    pid = id
            output.write("    </type>\n")
2363 2364 2365

    def serialize_xrefs_constructors(self, output):
        funcs = {}
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                if ret[0] == "void":
                    continue
                if funcs.has_key(ret[0]):
                    funcs[ret[0]].append(name)
                else:
                    funcs[ret[0]] = [name]
            except:
                pass
        typ = funcs.keys()
        typ.sort()
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
            ids = funcs[type]
            ids.sort()
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </type>\n")
2390 2391

    def serialize_xrefs_alpha(self, output):
2392 2393 2394 2395 2396
        letter = None
        ids = self.idx.identifiers.keys()
        ids.sort()
        for id in ids:
            if id[0] != letter:
2397
                if letter is not None:
2398 2399 2400 2401
                    output.write("    </letter>\n")
                letter = id[0]
                output.write("    <letter name='%s'>\n" % (letter))
            output.write("      <ref name='%s'/>\n" % (id))
2402
        if letter is not None:
2403
            output.write("    </letter>\n")
2404 2405 2406

    def serialize_xrefs_references(self, output):
        typ = self.idx.identifiers.keys()
2407 2408 2409 2410 2411 2412 2413 2414
        typ.sort()
        for id in typ:
            idf = self.idx.identifiers[id]
            module = idf.header
            output.write("    <reference name='%s' href='%s'/>\n" % (id,
                         'html/' + self.basename + '-' +
                         self.modulename_file(module) + '.html#' +
                         id))
2415 2416 2417

    def serialize_xrefs_index(self, output):
        index = self.xref
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
        typ = index.keys()
        typ.sort()
        letter = None
        count = 0
        chunk = 0
        chunks = []
        for id in typ:
            if len(index[id]) > 30:
                continue
            if id[0] != letter:
2428 2429
                if letter is None or count > 200:
                    if letter is not None:
2430 2431 2432 2433 2434 2435 2436
                        output.write("      </letter>\n")
                        output.write("    </chunk>\n")
                        count = 0
                        chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
                    output.write("    <chunk name='chunk%s'>\n" % (chunk))
                    first_letter = id[0]
                    chunk = chunk + 1
2437
                elif letter is not None:
2438 2439 2440 2441
                    output.write("      </letter>\n")
                letter = id[0]
                output.write("      <letter name='%s'>\n" % (letter))
            output.write("        <word name='%s'>\n" % (id))
2442
            tokens = index[id]
2443 2444 2445 2446 2447 2448 2449 2450 2451
            tokens.sort()
            tok = None
            for token in tokens:
                if tok == token:
                    continue
                tok = token
                output.write("          <ref name='%s'/>\n" % (token))
                count = count + 1
            output.write("        </word>\n")
2452
        if letter is not None:
2453 2454 2455 2456 2457 2458 2459 2460 2461
            output.write("      </letter>\n")
            output.write("    </chunk>\n")
            if count != 0:
                chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
            output.write("    <chunks>\n")
            for ch in chunks:
                output.write("      <chunk name='%s' start='%s' end='%s'/>\n" % (
                             ch[0], ch[1], ch[2]))
            output.write("    </chunks>\n")
2462 2463

    def serialize_xrefs(self, output):
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
        output.write("  <references>\n")
        self.serialize_xrefs_references(output)
        output.write("  </references>\n")
        output.write("  <alpha>\n")
        self.serialize_xrefs_alpha(output)
        output.write("  </alpha>\n")
        output.write("  <constructors>\n")
        self.serialize_xrefs_constructors(output)
        output.write("  </constructors>\n")
        output.write("  <functions>\n")
        self.serialize_xrefs_functions(output)
        output.write("  </functions>\n")
        output.write("  <files>\n")
        self.serialize_xrefs_files(output)
        output.write("  </files>\n")
        output.write("  <index>\n")
        self.serialize_xrefs_index(output)
        output.write("  </index>\n")
2482 2483

    def serialize(self):
J
Jiri Denemark 已提交
2484
        filename = "%s/%s-api.xml" % (self.path, self.name)
2485 2486
        if not quiet:
            print "Saving XML description %s" % (filename)
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<api name='%s'>\n" % self.name)
        output.write("  <files>\n")
        headers = self.headers.keys()
        headers.sort()
        for file in headers:
            self.serialize_exports(output, file)
        output.write("  </files>\n")
        output.write("  <symbols>\n")
        macros = self.idx.macros.keys()
        macros.sort()
        for macro in macros:
            self.serialize_macro(output, macro)
        enums = self.idx.enums.keys()
        enums.sort()
        for enum in enums:
            self.serialize_enum(output, enum)
        typedefs = self.idx.typedefs.keys()
        typedefs.sort()
        for typedef in typedefs:
            self.serialize_typedef(output, typedef)
        variables = self.idx.variables.keys()
        variables.sort()
        for variable in variables:
            self.serialize_variable(output, variable)
        functions = self.idx.functions.keys()
        functions.sort()
        for function in functions:
            self.serialize_function(output, function)
        output.write("  </symbols>\n")
        output.write("</api>\n")
        output.close()

2521 2522 2523 2524
        if self.errors > 0:
            print >>sys.stderr, "apibuild.py: %d error(s) encountered during generation" % self.errors
            sys.exit(3)

J
Jiri Denemark 已提交
2525
        filename = "%s/%s-refs.xml" % (self.path, self.name)
2526 2527
        if not quiet:
            print "Saving XML Cross References %s" % (filename)
2528 2529 2530 2531 2532 2533 2534 2535
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<apirefs name='%s'>\n" % self.name)
        self.serialize_xrefs(output)
        output.write("</apirefs>\n")
        output.close()


2536
def rebuild(name):
2537
    if name not in ["libvirt", "libvirt-qemu", "libvirt-lxc"]:
J
Ján Tomko 已提交
2538
        self.warning("rebuild() failed, unknown module %s") % name
2539
        return None
2540
    builder = None
2541 2542
    srcdir = os.environ["srcdir"]
    if glob.glob(srcdir + "/../src/libvirt.c") != [] :
2543
        if not quiet:
2544
            print "Rebuilding API description for %s" % name
J
Jiri Denemark 已提交
2545 2546 2547 2548 2549
        dirs = [srcdir + "/../src",
                srcdir + "/../src/util",
                srcdir + "/../include/libvirt"]
        if glob.glob(srcdir + "/../include/libvirt/libvirt.h") == [] :
            dirs.append("../include/libvirt")
2550
        builder = docBuilder(name, srcdir, dirs, [])
D
Daniel Veillard 已提交
2551
    elif glob.glob("src/libvirt.c") != [] :
2552
        if not quiet:
2553 2554
            print "Rebuilding API description for %s" % name
        builder = docBuilder(name, srcdir,
J
Jiri Denemark 已提交
2555
                             ["src", "src/util", "include/libvirt"],
2556
                             [])
2557
    else:
2558
        self.warning("rebuild() failed, unable to guess the module")
2559
        return None
2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
    builder.scan()
    builder.analyze()
    builder.serialize()
    return builder

#
# for debugging the parser
#
def parse(filename):
    parser = CParser(filename)
    idx = parser.parse()
    return idx

if __name__ == "__main__":
    if len(sys.argv) > 1:
        debug = 1
        parse(sys.argv[1])
    else:
2578 2579
        rebuild("libvirt")
        rebuild("libvirt-qemu")
2580
        rebuild("libvirt-lxc")
2581 2582 2583 2584
    if warnings > 0:
        sys.exit(2)
    else:
        sys.exit(0)