apibuild.py 100.1 KB
Newer Older
1
#!/usr/bin/env python
2 3 4 5 6 7 8 9
#
# This is the API builder, it parses the C sources and build the
# API formal description in XML.
#
# See Copyright for the status of this software.
#
# daniel@veillard.com
#
10 11 12

from __future__ import print_function

13 14
import os, sys
import glob
15
import re
16

17 18 19
quiet=True
warnings=0
debug=False
20 21 22 23 24
debugsym=None

#
# C parser analysis code
#
25
included_files = {
26
  "libvirt-common.h": "header with general libvirt API definitions",
27
  "libvirt-domain.h": "header with general libvirt API definitions",
28
  "libvirt-domain-snapshot.h": "header with general libvirt API definitions",
29
  "libvirt-event.h": "header with general libvirt API definitions",
30
  "libvirt-host.h": "header with general libvirt API definitions",
31
  "libvirt-interface.h": "header with general libvirt API definitions",
32
  "libvirt-network.h": "header with general libvirt API definitions",
33
  "libvirt-nodedev.h": "header with general libvirt API definitions",
34
  "libvirt-nwfilter.h": "header with general libvirt API definitions",
35
  "libvirt-secret.h": "header with general libvirt API definitions",
36
  "libvirt-storage.h": "header with general libvirt API definitions",
37
  "libvirt-stream.h": "header with general libvirt API definitions",
38 39
  "virterror.h": "header with error specific API definitions",
  "libvirt.c": "Main interfaces for the libvirt library",
40
  "libvirt-domain.c": "Domain interfaces for the libvirt library",
41
  "libvirt-domain-snapshot.c": "Domain snapshot interfaces for the libvirt library",
42
  "libvirt-host.c": "Host interfaces for the libvirt library",
43
  "libvirt-interface.c": "Interface interfaces for the libvirt library",
44
  "libvirt-network.c": "Network interfaces for the libvirt library",
45
  "libvirt-nodedev.c": "Node device interfaces for the libvirt library",
46
  "libvirt-nwfilter.c": "NWFilter interfaces for the libvirt library",
47
  "libvirt-secret.c": "Secret interfaces for the libvirt library",
48
  "libvirt-storage.c": "Storage interfaces for the libvirt library",
49
  "libvirt-stream.c": "Stream interfaces for the libvirt library",
50
  "virerror.c": "implements error handling and reporting code for libvirt",
51
  "virevent.c": "event loop for monitoring file handles",
52
  "virtypedparam.c": "virTypedParameters APIs",
53 54
}

55 56 57 58 59
qemu_included_files = {
  "libvirt-qemu.h": "header with QEMU specific API definitions",
  "libvirt-qemu.c": "Implementations for the QEMU specific APIs",
}

60 61 62 63 64
lxc_included_files = {
  "libvirt-lxc.h": "header with LXC specific API definitions",
  "libvirt-lxc.c": "Implementations for the LXC specific APIs",
}

65 66 67 68 69
admin_included_files = {
  "libvirt-admin.h": "header with admin specific API definitions",
  "libvirt-admin.c": "Implementations for the admin specific APIs",
}

70 71
ignored_words = {
  "ATTRIBUTE_UNUSED": (0, "macro keyword"),
72
  "ATTRIBUTE_SENTINEL": (0, "macro keyword"),
73
  "VIR_DEPRECATED": (0, "macro keyword"),
74
  "VIR_EXPORT_VAR": (0, "macro keyword"),
75 76 77
  "WINAPI": (0, "Windows keyword"),
  "__declspec": (3, "Windows keyword"),
  "__stdcall": (0, "Windows keyword"),
78 79
}

D
Daniel Veillard 已提交
80
ignored_functions = {
81
  "virConnectSupportsFeature": "private function for remote access",
D
Daniel Veillard 已提交
82 83 84 85 86
  "virDomainMigrateFinish": "private function for migration",
  "virDomainMigrateFinish2": "private function for migration",
  "virDomainMigratePerform": "private function for migration",
  "virDomainMigratePrepare": "private function for migration",
  "virDomainMigratePrepare2": "private function for migration",
C
Chris Lalancette 已提交
87
  "virDomainMigratePrepareTunnel": "private function for tunnelled migration",
88 89 90 91 92 93
  "virDomainMigrateBegin3": "private function for migration",
  "virDomainMigrateFinish3": "private function for migration",
  "virDomainMigratePerform3": "private function for migration",
  "virDomainMigratePrepare3": "private function for migration",
  "virDomainMigrateConfirm3": "private function for migration",
  "virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
94
  "DllMain": "specific function for Win32",
95
  "virTypedParamsValidate": "internal function in virtypedparam.c",
96
  "virTypedParameterValidateSet": "internal function in virtypedparam.c",
97 98
  "virTypedParameterAssign": "internal function in virtypedparam.c",
  "virTypedParameterAssignFromStr": "internal function in virtypedparam.c",
99
  "virTypedParameterToString": "internal function in virtypedparam.c",
100
  "virTypedParamsCheck": "internal function in virtypedparam.c",
101
  "virTypedParamsCopy": "internal function in virtypedparam.c",
102 103 104 105 106 107
  "virDomainMigrateBegin3Params": "private function for migration",
  "virDomainMigrateFinish3Params": "private function for migration",
  "virDomainMigratePerform3Params": "private function for migration",
  "virDomainMigratePrepare3Params": "private function for migration",
  "virDomainMigrateConfirm3Params": "private function for migration",
  "virDomainMigratePrepareTunnel3Params": "private function for tunnelled migration",
J
Jiri Denemark 已提交
108
  "virErrorCopyNew": "private",
D
Daniel Veillard 已提交
109 110
}

111 112 113 114 115 116
ignored_macros = {
  "_virSchedParameter": "backward compatibility macro for virTypedParameter",
  "_virBlkioParameter": "backward compatibility macro for virTypedParameter",
  "_virMemoryParameter": "backward compatibility macro for virTypedParameter",
}

117 118
# macros that should be completely skipped
hidden_macros = {
119 120
  "VIR_DEPRECATED": "internal macro to mark deprecated apis",
  "VIR_EXPORT_VAR": "internal macro to mark exported vars",
121 122
}

123
def escape(raw):
124 125 126 127 128
    raw = raw.replace('&', '&')
    raw = raw.replace('<', '&lt;')
    raw = raw.replace('>', '&gt;')
    raw = raw.replace("'", '&apos;')
    raw = raw.replace('"', '&quot;')
129 130 131 132 133 134
    return raw

def uniq(items):
    d = {}
    for item in items:
        d[item]=1
135
    k = sorted(d.keys())
136
    return k
137 138 139 140 141

class identifier:
    def __init__(self, name, header=None, module=None, type=None, lineno = 0,
                 info=None, extra=None, conditionals = None):
        self.name = name
142 143 144 145 146 147 148
        self.header = header
        self.module = module
        self.type = type
        self.info = info
        self.extra = extra
        self.lineno = lineno
        self.static = 0
149
        if conditionals is None or len(conditionals) == 0:
150 151 152
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
153
        if self.name == debugsym and not quiet:
154 155
            print("=> define %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals)))
156 157 158

    def __repr__(self):
        r = "%s %s:" % (self.type, self.name)
159 160
        if self.static:
            r = r + " static"
161
        if self.module is not None:
162
            r = r + " from %s" % (self.module)
163
        if self.info is not None:
164
            r = r + " " + repr(self.info)
165
        if self.extra is not None:
166
            r = r + " " + repr(self.extra)
167
        if self.conditionals is not None:
168
            r = r + " " + repr(self.conditionals)
169
        return r
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186


    def set_header(self, header):
        self.header = header
    def set_module(self, module):
        self.module = module
    def set_type(self, type):
        self.type = type
    def set_info(self, info):
        self.info = info
    def set_extra(self, extra):
        self.extra = extra
    def set_lineno(self, lineno):
        self.lineno = lineno
    def set_static(self, static):
        self.static = static
    def set_conditionals(self, conditionals):
187
        if conditionals is None or len(conditionals) == 0:
188 189 190
            self.conditionals = None
        else:
            self.conditionals = conditionals[:]
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

    def get_name(self):
        return self.name
    def get_header(self):
        return self.module
    def get_module(self):
        return self.module
    def get_type(self):
        return self.type
    def get_info(self):
        return self.info
    def get_lineno(self):
        return self.lineno
    def get_extra(self):
        return self.extra
    def get_static(self):
        return self.static
    def get_conditionals(self):
        return self.conditionals

    def update(self, header, module, type = None, info = None, extra=None,
               conditionals=None):
213
        if self.name == debugsym and not quiet:
214 215
            print("=> update %s : %s" % (debugsym, (module, type, info,
                                         extra, conditionals)))
216
        if header is not None and self.header is None:
217
            self.set_header(module)
218
        if module is not None and (self.module is None or self.header == self.module):
219
            self.set_module(module)
220
        if type is not None and self.type is None:
221
            self.set_type(type)
222
        if info is not None:
223
            self.set_info(info)
224
        if extra is not None:
225
            self.set_extra(extra)
226
        if conditionals is not None:
227
            self.set_conditionals(conditionals)
228 229 230 231 232 233

class index:
    def __init__(self, name = "noname"):
        self.name = name
        self.identifiers = {}
        self.functions = {}
234 235 236
        self.variables = {}
        self.includes = {}
        self.structs = {}
237
        self.unions = {}
238 239 240 241 242
        self.enums = {}
        self.typedefs = {}
        self.macros = {}
        self.references = {}
        self.info = {}
243

244 245 246
    def warning(self, msg):
        global warnings
        warnings = warnings + 1
247
        print(msg)
248

249 250
    def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
251
            return None
252 253
        d = None
        try:
254 255 256 257 258
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d
259

260
        if d is not None and static == 1:
261
            d.set_static(1)
262

263
        if d is not None and name is not None and type is not None:
264
            self.references[name] = d
265

266
        if name == debugsym and not quiet:
267
            print("New ref: %s" % (d))
268

269
        return d
270 271 272

    def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
        if name[0:2] == '__':
273
            return None
274 275
        d = None
        try:
276 277 278 279 280 281
           d = self.identifiers[name]
           d.update(header, module, type, lineno, info, extra, conditionals)
        except:
           d = identifier(name, header, module, type, lineno, info, extra, conditionals)
           self.identifiers[name] = d

282
        if d is not None and static == 1:
283 284
            d.set_static(1)

285
        if d is not None and name is not None and type is not None:
286 287 288 289 290 291 292 293 294 295
            if type == "function":
                self.functions[name] = d
            elif type == "functype":
                self.functions[name] = d
            elif type == "variable":
                self.variables[name] = d
            elif type == "include":
                self.includes[name] = d
            elif type == "struct":
                self.structs[name] = d
296 297
            elif type == "union":
                self.unions[name] = d
298 299 300 301 302 303 304
            elif type == "enum":
                self.enums[name] = d
            elif type == "typedef":
                self.typedefs[name] = d
            elif type == "macro":
                self.macros[name] = d
            else:
305
                self.warning("Unable to register type ", type)
306

307
        if name == debugsym and not quiet:
308
            print("New symbol: %s" % (d))
309 310

        return d
311 312 313 314 315 316 317

    def merge(self, idx):
        for id in idx.functions.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
A
Andrea Bolognani 已提交
318
             if id in self.macros:
319
                 del self.macros[id]
A
Andrea Bolognani 已提交
320
             if id in self.functions:
321 322
                 self.warning("function %s from %s redeclared in %s" % (
                    id, self.functions[id].header, idx.functions[id].header))
323 324 325
             else:
                 self.functions[id] = idx.functions[id]
                 self.identifiers[id] = idx.functions[id]
326 327 328 329 330
        for id in idx.variables.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
A
Andrea Bolognani 已提交
331
             if id in self.macros:
332
                 del self.macros[id]
A
Andrea Bolognani 已提交
333
             if id in self.variables:
334 335
                 self.warning("variable %s from %s redeclared in %s" % (
                    id, self.variables[id].header, idx.variables[id].header))
336 337 338
             else:
                 self.variables[id] = idx.variables[id]
                 self.identifiers[id] = idx.variables[id]
339
        for id in idx.structs.keys():
A
Andrea Bolognani 已提交
340
             if id in self.structs:
341 342
                 self.warning("struct %s from %s redeclared in %s" % (
                    id, self.structs[id].header, idx.structs[id].header))
343 344 345
             else:
                 self.structs[id] = idx.structs[id]
                 self.identifiers[id] = idx.structs[id]
346
        for id in idx.unions.keys():
A
Andrea Bolognani 已提交
347
             if id in self.unions:
348 349
                 print("union %s from %s redeclared in %s" % (
                    id, self.unions[id].header, idx.unions[id].header))
350 351 352
             else:
                 self.unions[id] = idx.unions[id]
                 self.identifiers[id] = idx.unions[id]
353
        for id in idx.typedefs.keys():
A
Andrea Bolognani 已提交
354
             if id in self.typedefs:
355 356
                 self.warning("typedef %s from %s redeclared in %s" % (
                    id, self.typedefs[id].header, idx.typedefs[id].header))
357 358 359
             else:
                 self.typedefs[id] = idx.typedefs[id]
                 self.identifiers[id] = idx.typedefs[id]
360 361 362 363 364
        for id in idx.macros.keys():
              #
              # macro might be used to override functions or variables
              # definitions
              #
A
Andrea Bolognani 已提交
365
             if id in self.variables:
366
                 continue
A
Andrea Bolognani 已提交
367
             if id in self.functions:
368
                 continue
A
Andrea Bolognani 已提交
369
             if id in self.enums:
370
                 continue
A
Andrea Bolognani 已提交
371
             if id in self.macros:
372 373
                 self.warning("macro %s from %s redeclared in %s" % (
                    id, self.macros[id].header, idx.macros[id].header))
374 375 376
             else:
                 self.macros[id] = idx.macros[id]
                 self.identifiers[id] = idx.macros[id]
377
        for id in idx.enums.keys():
A
Andrea Bolognani 已提交
378
             if id in self.enums:
379 380
                 self.warning("enum %s from %s redeclared in %s" % (
                    id, self.enums[id].header, idx.enums[id].header))
381 382 383
             else:
                 self.enums[id] = idx.enums[id]
                 self.identifiers[id] = idx.enums[id]
384 385 386

    def merge_public(self, idx):
        for id in idx.functions.keys():
A
Andrea Bolognani 已提交
387
             if id in self.functions:
388 389 390
                 # check that function condition agrees with header
                 if idx.functions[id].conditionals != \
                    self.functions[id].conditionals:
391 392 393 394
                     self.warning("Header condition differs from Function for %s:" \
                                      % id)
                     self.warning("  H: %s" % self.functions[id].conditionals)
                     self.warning("  C: %s" % idx.functions[id].conditionals)
395 396 397
                 up = idx.functions[id]
                 self.functions[id].update(None, up.module, up.type, up.info, up.extra)
         #     else:
398 399
         #         print("Function %s from %s is not declared in headers" % (
         #               id, idx.functions[id].module))
400
         # TODO: do the same for variables.
401 402 403

    def analyze_dict(self, type, dict):
        count = 0
404
        public = 0
405
        for name in dict.keys():
406 407 408 409
            id = dict[name]
            count = count + 1
            if id.static == 0:
                public = public + 1
410
        if count != public:
411
            print("  %d %s , %d public" % (count, type, public))
412
        elif count != 0:
413
            print("  %d public %s" % (count, type))
414 415 416


    def analyze(self):
417 418 419 420 421 422 423
        if not quiet:
            self.analyze_dict("functions", self.functions)
            self.analyze_dict("variables", self.variables)
            self.analyze_dict("structs", self.structs)
            self.analyze_dict("unions", self.unions)
            self.analyze_dict("typedefs", self.typedefs)
            self.analyze_dict("macros", self.macros)
424

425 426 427 428 429
class CLexer:
    """A lexer for the C language, tokenize the input by reading and
       analyzing it line by line"""
    def __init__(self, input):
        self.input = input
430 431 432
        self.tokens = []
        self.line = ""
        self.lineno = 0
433 434 435

    def getline(self):
        line = ''
436 437 438 439 440
        while line == '':
            line = self.input.readline()
            if not line:
                return None
            self.lineno = self.lineno + 1
441 442
            line = line.lstrip()
            line = line.rstrip()
443 444 445 446 447 448
            if line == '':
                continue
            while line[-1] == '\\':
                line = line[:-1]
                n = self.input.readline()
                self.lineno = self.lineno + 1
449 450
                n = n.lstrip()
                n = n.rstrip()
451 452 453 454
                if not n:
                    break
                else:
                    line = line + n
455
        return line
456

457 458 459 460
    def getlineno(self):
        return self.lineno

    def push(self, token):
461
        self.tokens.insert(0, token)
462 463

    def debug(self):
464 465 466
        print("Last token: ", self.last)
        print("Token queue: ", self.tokens)
        print("Line %d end: " % (self.lineno), self.line)
467 468 469

    def token(self):
        while self.tokens == []:
470 471 472 473 474
            if self.line == "":
                line = self.getline()
            else:
                line = self.line
                self.line = ""
475
            if line is None:
476 477 478
                return None

            if line[0] == '#':
479
                self.tokens = list(map((lambda x: ('preproc', x)),
J
John Ferlan 已提交
480
                                       line.split()))
481 482 483 484 485 486 487 488

                # We might have whitespace between the '#' and preproc
                # macro name, so instead of having a single token element
                # of '#define' we might end up with '#' and 'define'. This
                # merges them back together
                if self.tokens[0][1] == "#":
                    self.tokens[0] = ('preproc', self.tokens[0][1] + self.tokens[1][1])
                    self.tokens = self.tokens[:1] + self.tokens[2:]
489
                break
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
            l = len(line)
            if line[0] == '"' or line[0] == "'":
                end = line[0]
                line = line[1:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == end:
                            self.line = line[i+1:]
                            line = line[:i]
                            l = i
                            found = 1
                            break
                        if line[i] == '\\':
                            i = i + 1
                        i = i + 1
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
512
                        if line is None:
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
                            return None
                self.last = ('string', tok)
                return self.last

            if l >= 2 and line[0] == '/' and line[1] == '*':
                line = line[2:]
                found = 0
                tok = ""
                while found == 0:
                    i = 0
                    l = len(line)
                    while i < l:
                        if line[i] == '*' and i+1 < l and line[i+1] == '/':
                            self.line = line[i+2:]
                            line = line[:i-1]
                            l = i
                            found = 1
                            break
                        i = i + 1
                    if tok != "":
                        tok = tok + "\n"
                    tok = tok + line
                    if found == 0:
                        line = self.getline()
537
                        if line is None:
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
                            return None
                self.last = ('comment', tok)
                return self.last
            if l >= 2 and line[0] == '/' and line[1] == '/':
                line = line[2:]
                self.last = ('comment', line)
                return self.last
            i = 0
            while i < l:
                if line[i] == '/' and i+1 < l and line[i+1] == '/':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '/' and i+1 < l and line[i+1] == '*':
                    self.line = line[i:]
                    line = line[:i]
                    break
                if line[i] == '"' or line[i] == "'":
                    self.line = line[i:]
                    line = line[:i]
                    break
                i = i + 1
            l = len(line)
            i = 0
            while i < l:
                if line[i] == ' ' or line[i] == '\t':
                    i = i + 1
                    continue
                o = ord(line[i])
                if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
                   (o >= 48 and o <= 57):
                    s = i
                    while i < l:
                        o = ord(line[i])
                        if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
573 574
                           (o >= 48 and o <= 57) or \
                           (" \t(){}:;,+-*/%&!|[]=><".find(line[i]) == -1):
575 576 577 578 579
                            i = i + 1
                        else:
                            break
                    self.tokens.append(('name', line[s:i]))
                    continue
580
                if "(){}:;,[]".find(line[i]) != -1:
581
#                 if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
582 583 584 585 586
#                   line[i] == '}' or line[i] == ':' or line[i] == ';' or \
#                   line[i] == ',' or line[i] == '[' or line[i] == ']':
                    self.tokens.append(('sep', line[i]))
                    i = i + 1
                    continue
587
                if "+-*><=/%&!|.".find(line[i]) != -1:
588
#                 if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
589 590 591 592 593 594 595 596 597 598 599
#                   line[i] == '>' or line[i] == '<' or line[i] == '=' or \
#                   line[i] == '/' or line[i] == '%' or line[i] == '&' or \
#                   line[i] == '!' or line[i] == '|' or line[i] == '.':
                    if line[i] == '.' and  i + 2 < l and \
                       line[i+1] == '.' and line[i+2] == '.':
                        self.tokens.append(('name', '...'))
                        i = i + 3
                        continue

                    j = i + 1
                    if j < l and (
600
                       "+-*><=/%&!|".find(line[j]) != -1):
601 602 603 604 605 606 607 608 609 610 611 612 613 614
#                       line[j] == '+' or line[j] == '-' or line[j] == '*' or \
#                       line[j] == '>' or line[j] == '<' or line[j] == '=' or \
#                       line[j] == '/' or line[j] == '%' or line[j] == '&' or \
#                       line[j] == '!' or line[j] == '|'):
                        self.tokens.append(('op', line[i:j+1]))
                        i = j + 1
                    else:
                        self.tokens.append(('op', line[i]))
                        i = i + 1
                    continue
                s = i
                while i < l:
                    o = ord(line[i])
                    if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
615 616
                       (o >= 48 and o <= 57) or \
                       (" \t(){}:;,+-*/%&!|[]=><".find(line[i]) == -1):
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
#                        line[i] != ' ' and line[i] != '\t' and
#                        line[i] != '(' and line[i] != ')' and
#                        line[i] != '{'  and line[i] != '}' and
#                        line[i] != ':' and line[i] != ';' and
#                        line[i] != ',' and line[i] != '+' and
#                        line[i] != '-' and line[i] != '*' and
#                        line[i] != '/' and line[i] != '%' and
#                        line[i] != '&' and line[i] != '!' and
#                        line[i] != '|' and line[i] != '[' and
#                        line[i] != ']' and line[i] != '=' and
#                        line[i] != '*' and line[i] != '>' and
#                        line[i] != '<'):
                        i = i + 1
                    else:
                        break
                self.tokens.append(('name', line[s:i]))

        tok = self.tokens[0]
        self.tokens = self.tokens[1:]
        self.last = tok
        return tok
638

639 640 641 642
class CParser:
    """The C module parser"""
    def __init__(self, filename, idx = None):
        self.filename = filename
643 644 645 646
        if len(filename) > 2 and filename[-2:] == '.h':
            self.is_header = 1
        else:
            self.is_header = 0
647
        self.input = open(filename)
648
        self.lexer = CLexer(self.input)
649
        if idx is None:
650 651 652 653 654 655 656 657 658 659
            self.index = index()
        else:
            self.index = idx
        self.top_comment = ""
        self.last_comment = ""
        self.comment = None
        self.collect_ref = 0
        self.no_error = 0
        self.conditionals = []
        self.defines = []
660 661 662 663 664 665 666 667 668 669 670 671 672 673

    def collect_references(self):
        self.collect_ref = 1

    def stop_error(self):
        self.no_error = 1

    def start_error(self):
        self.no_error = 0

    def lineno(self):
        return self.lexer.getlineno()

    def index_add(self, name, module, static, type, info=None, extra = None):
674 675 676 677 678 679
        if self.is_header == 1:
            self.index.add(name, module, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
        else:
            self.index.add(name, None, module, static, type, self.lineno(),
                           info, extra, self.conditionals)
680 681 682

    def index_add_ref(self, name, module, static, type, info=None,
                      extra = None):
683 684 685 686 687 688
        if self.is_header == 1:
            self.index.add_ref(name, module, module, static, type,
                               self.lineno(), info, extra, self.conditionals)
        else:
            self.index.add_ref(name, None, module, static, type, self.lineno(),
                               info, extra, self.conditionals)
689 690

    def warning(self, msg):
691 692
        global warnings
        warnings = warnings + 1
693
        if self.no_error:
694
            return
695
        print(msg)
696 697 698

    def error(self, msg, token=-1):
        if self.no_error:
699
            return
700

701
        print("Parse Error: " + msg)
702
        if token != -1:
703
            print("Got token ", token)
704 705
        self.lexer.debug()
        sys.exit(1)
706 707

    def debug(self, msg, token=-1):
708
        print("Debug: " + msg)
709
        if token != -1:
710
            print("Got token ", token)
711
        self.lexer.debug()
712 713

    def parseTopComment(self, comment):
714
        res = {}
715
        lines = comment.split("\n")
716 717
        item = None
        for line in lines:
C
Claudio Bley 已提交
718
            line = line.lstrip().lstrip('*').lstrip()
719 720 721 722 723 724 725

            m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
            if m:
                item = m.group(1)
                line = m.group(2).lstrip()

            if item:
A
Andrea Bolognani 已提交
726
                if item in res:
727 728 729 730
                    res[item] = res[item] + " " + line
                else:
                    res[item] = line
        self.index.info = res
731

732 733 734 735 736 737 738 739 740 741 742 743 744
    def strip_lead_star(self, line):
        l = len(line)
        i = 0
        while i < l:
            if line[i] == ' ' or line[i] == '\t':
                i += 1
            elif line[i] == '*':
                return line[:i] + line[i + 1:]
            else:
                 return line
        return line

    def cleanupComment(self):
745
        if not isinstance(self.comment, str):
746 747 748 749 750 751 752 753
            return
        # remove the leading * on multi-line comments
        lines = self.comment.splitlines(True)
        com = ""
        for line in lines:
            com = com + self.strip_lead_star(line)
        self.comment = com.strip()

754
    def parseComment(self, token):
755
        com = token[1]
756
        if self.top_comment == "":
757
            self.top_comment = com
758
        if self.comment is None or com[0] == '*':
759
            self.comment = com
760
        else:
761
            self.comment = self.comment + com
762
        token = self.lexer.token()
763

764
        if self.comment.find("DOC_DISABLE") != -1:
765
            self.stop_error()
766

767
        if self.comment.find("DOC_ENABLE") != -1:
768
            self.start_error()
769

770
        return token
771 772 773 774 775 776

    #
    # Parse a comment block associate to a typedef
    #
    def parseTypeComment(self, name, quiet = 0):
        if name[0:2] == '__':
777
            quiet = 1
778 779

        args = []
780
        desc = ""
781

782
        if self.comment is None:
783 784 785
            if not quiet:
                self.warning("Missing comment for type %s" % (name))
            return((args, desc))
786
        if self.comment[0] != '*':
787 788 789
            if not quiet:
                self.warning("Missing * in type comment for %s" % (name))
            return((args, desc))
790
        lines = self.comment.split('\n')
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted type comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
806
            l = l.strip()
807 808 809
            desc = desc + " " + l
            del lines[0]

810
        desc = desc.strip()
811 812 813 814 815 816

        if quiet == 0:
            if desc == "":
                self.warning("Type comment for %s lack description of the macro" % (name))

        return(desc)
817 818 819 820
    #
    # Parse a comment block associate to a macro
    #
    def parseMacroComment(self, name, quiet = 0):
821 822
        global ignored_macros

823
        if name[0:2] == '__':
824
            quiet = 1
A
Andrea Bolognani 已提交
825
        if name in ignored_macros:
826
            quiet = 1
827 828

        args = []
829
        desc = ""
830

831
        if self.comment is None:
832 833 834
            if not quiet:
                self.warning("Missing comment for macro %s" % (name))
            return((args, desc))
835
        if self.comment[0] != '*':
836 837 838
            if not quiet:
                self.warning("Missing * in macro comment for %s" % (name))
            return((args, desc))
839
        lines = self.comment.split('\n')
840 841 842 843 844 845 846 847 848 849 850 851 852
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted macro comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return((args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
853 854 855
                (arg, desc) = l.split(':', 1)
                desc = desc.strip()
                arg = arg.strip()
856
            except:
857 858 859 860 861 862
                if not quiet:
                    self.warning("Misformatted macro comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
863
            l = lines[0].strip()
864 865 866
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
867
                desc = desc + ' ' + l.strip()
868 869 870 871
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
872
            args.append((arg, desc))
873 874 875 876 877 878 879
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = ""
        while len(lines) > 0:
            l = lines[0]
            while len(l) > 0 and l[0] == '*':
                l = l[1:]
880
            l = l.strip()
881 882
            desc = desc + " " + l
            del lines[0]
883

884
        desc = desc.strip()
885

886 887 888
        if quiet == 0:
            if desc == "":
                self.warning("Macro comment for %s lack description of the macro" % (name))
889

890
        return((args, desc))
891 892

     #
893
     # Parse a comment block and merge the information found in the
894 895 896 897
     # parameters descriptions, finally returns a block as complete
     # as possible
     #
    def mergeFunctionComment(self, name, description, quiet = 0):
D
Daniel Veillard 已提交
898 899
        global ignored_functions

900
        if name == 'main':
901
            quiet = 1
902
        if name[0:2] == '__':
903
            quiet = 1
A
Andrea Bolognani 已提交
904
        if name in ignored_functions:
D
Daniel Veillard 已提交
905
            quiet = 1
906

907 908 909
        (ret, args) = description
        desc = ""
        retdesc = ""
910

911
        if self.comment is None:
912 913 914
            if not quiet:
                self.warning("Missing comment for function %s" % (name))
            return(((ret[0], retdesc), args, desc))
915
        if self.comment[0] != '*':
916 917 918
            if not quiet:
                self.warning("Missing * in function comment for %s" % (name))
            return(((ret[0], retdesc), args, desc))
919
        lines = self.comment.split('\n')
920 921 922 923 924 925 926 927 928 929 930 931 932 933
        if lines[0] == '*':
            del lines[0]
        if lines[0] != "* %s:" % (name):
            if not quiet:
                self.warning("Misformatted function comment for %s" % (name))
                self.warning("  Expecting '* %s:' got '%s'" % (name, lines[0]))
            return(((ret[0], retdesc), args, desc))
        del lines[0]
        while lines[0] == '*':
            del lines[0]
        nbargs = len(args)
        while len(lines) > 0 and lines[0][0:3] == '* @':
            l = lines[0][3:]
            try:
934 935 936
                (arg, desc) = l.split(':', 1)
                desc = desc.strip()
                arg = arg.strip()
937
            except:
938 939 940 941 942 943
                if not quiet:
                    self.warning("Misformatted function comment for %s" % (name))
                    self.warning("  problem with '%s'" % (lines[0]))
                del lines[0]
                continue
            del lines[0]
944
            l = lines[0].strip()
945 946 947
            while len(l) > 2 and l[0:3] != '* @':
                while l[0] == '*':
                    l = l[1:]
948
                desc = desc + ' ' + l.strip()
949 950 951 952 953 954 955 956
                del lines[0]
                if len(lines) == 0:
                    break
                l = lines[0]
            i = 0
            while i < nbargs:
                if args[i][1] == arg:
                    args[i] = (args[i][0], arg, desc)
957
                    break
958 959 960 961 962 963 964 965 966 967 968 969
                i = i + 1
            if i >= nbargs:
                if not quiet:
                    self.warning("Unable to find arg %s from function comment for %s" % (
                       arg, name))
        while len(lines) > 0 and lines[0] == '*':
            del lines[0]
        desc = None
        while len(lines) > 0:
            l = lines[0]
            i = 0
            # Remove all leading '*', followed by at most one ' ' character
970
            # since we need to preserve correct indentation of code examples
971 972 973 974 975 976
            while i < len(l) and l[i] == '*':
                i = i + 1
            if i > 0:
                if i < len(l) and l[i] == ' ':
                    i = i + 1
                l = l[i:]
977
            if len(l) >= 6 and l[0:7] == "Returns":
978
                try:
979
                    l = l.split(' ', 1)[1]
980 981
                except:
                    l = ""
982
                retdesc = l.strip()
983 984 985 986 987
                del lines[0]
                while len(lines) > 0:
                    l = lines[0]
                    while len(l) > 0 and l[0] == '*':
                        l = l[1:]
988
                    l = l.strip()
989 990 991 992 993 994 995 996 997 998 999
                    retdesc = retdesc + " " + l
                    del lines[0]
            else:
                if desc is not None:
                    desc = desc + "\n" + l
                else:
                    desc = l
                del lines[0]

        if desc is None:
            desc = ""
1000 1001
        retdesc = retdesc.strip()
        desc = desc.strip()
1002 1003 1004 1005 1006 1007 1008

        if quiet == 0:
             #
             # report missing comments
             #
            i = 0
            while i < nbargs:
1009
                if args[i][2] is None and args[i][0] != "void" and args[i][1] is not None:
1010 1011 1012 1013 1014 1015 1016 1017 1018
                    self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
                i = i + 1
            if retdesc == "" and ret[0] != "void":
                self.warning("Function comment for %s lacks description of return value" % (name))
            if desc == "":
                self.warning("Function comment for %s lacks description of the function" % (name))


        return(((ret[0], retdesc), args, desc))
1019 1020

    def parsePreproc(self, token):
1021
        if debug:
1022
            print("=> preproc ", token, self.lexer.tokens)
1023
        name = token[1]
1024 1025
        if name == "#include":
            token = self.lexer.token()
1026
            if token is None:
1027 1028 1029 1030 1031 1032 1033 1034
                return None
            if token[0] == 'preproc':
                self.index_add(token[1], self.filename, not self.is_header,
                                "include")
                return self.lexer.token()
            return token
        if name == "#define":
            token = self.lexer.token()
1035
            if token is None:
1036 1037 1038 1039 1040 1041
                return None
            if token[0] == 'preproc':
                 # TODO macros with arguments
                name = token[1]
                lst = []
                token = self.lexer.token()
1042
                while token is not None and token[0] == 'preproc' and \
1043 1044 1045
                      token[1][0] != '#':
                    lst.append(token[1])
                    token = self.lexer.token()
1046
                try:
1047
                    name = name.split('(') [0]
1048 1049
                except:
                    pass
1050 1051 1052 1053 1054

                # skip hidden macros
                if name in hidden_macros:
                    return token

1055 1056 1057 1058
                strValue = None
                if len(lst) == 1 and lst[0][0] == '"' and lst[0][-1] == '"':
                    strValue = lst[0][1:-1]
                (args, desc) = self.parseMacroComment(name, not self.is_header)
1059
                self.index_add(name, self.filename, not self.is_header,
1060
                               "macro", (args, desc, strValue))
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
                return token

        #
        # Processing of conditionals modified by Bill 1/1/05
        #
        # We process conditionals (i.e. tokens from #ifdef, #ifndef,
        # #if, #else and #endif) for headers and mainline code,
        # store the ones from the header in libxml2-api.xml, and later
        # (in the routine merge_public) verify that the two (header and
        # mainline code) agree.
        #
        # There is a small problem with processing the headers. Some of
        # the variables are not concerned with enabling / disabling of
        # library functions (e.g. '__XML_PARSER_H__'), and we don't want
        # them to be included in libxml2-api.xml, or involved in
        # the check between the header and the mainline code.  To
        # accomplish this, we ignore any conditional which doesn't include
        # the string 'ENABLED'
        #
        if name == "#ifdef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
1084
                if apstr.find('ENABLED') != -1:
1085 1086 1087 1088 1089 1090 1091
                    self.conditionals.append("defined(%s)" % apstr)
            except:
                pass
        elif name == "#ifndef":
            apstr = self.lexer.tokens[0][1]
            try:
                self.defines.append(apstr)
1092
                if apstr.find('ENABLED') != -1:
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
                    self.conditionals.append("!defined(%s)" % apstr)
            except:
                pass
        elif name == "#if":
            apstr = ""
            for tok in self.lexer.tokens:
                if apstr != "":
                    apstr = apstr + " "
                apstr = apstr + tok[1]
            try:
                self.defines.append(apstr)
1104
                if apstr.find('ENABLED') != -1:
1105 1106 1107 1108 1109
                    self.conditionals.append(apstr)
            except:
                pass
        elif name == "#else":
            if self.conditionals != [] and \
1110
               self.defines[-1].find('ENABLED') != -1:
1111 1112 1113
                self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
        elif name == "#endif":
            if self.conditionals != [] and \
1114
               self.defines[-1].find('ENABLED') != -1:
1115 1116 1117
                self.conditionals = self.conditionals[:-1]
            self.defines = self.defines[:-1]
        token = self.lexer.token()
1118
        while token is not None and token[0] == 'preproc' and \
1119 1120 1121
            token[1][0] != '#':
            token = self.lexer.token()
        return token
1122 1123 1124 1125 1126 1127

     #
     # token acquisition on top of the lexer, it handle internally
     # preprocessor and comments since they are logically not part of
     # the program structure.
     #
1128 1129 1130
    def push(self, tok):
        self.lexer.push(tok)

1131 1132 1133 1134
    def token(self):
        global ignored_words

        token = self.lexer.token()
1135
        while token is not None:
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
            if token[0] == 'comment':
                token = self.parseComment(token)
                continue
            elif token[0] == 'preproc':
                token = self.parsePreproc(token)
                continue
            elif token[0] == "name" and token[1] == "__const":
                token = ("name", "const")
                return token
            elif token[0] == "name" and token[1] == "__attribute":
                token = self.lexer.token()
1147
                while token is not None and token[1] != ";":
1148 1149
                    token = self.lexer.token()
                return token
A
Andrea Bolognani 已提交
1150
            elif token[0] == "name" and token[1] in ignored_words:
1151 1152 1153 1154 1155 1156 1157 1158 1159
                (n, info) = ignored_words[token[1]]
                i = 0
                while i < n:
                    token = self.lexer.token()
                    i = i + 1
                token = self.lexer.token()
                continue
            else:
                if debug:
1160
                    print("=> ", token)
1161 1162
                return token
        return None
1163 1164 1165 1166 1167

     #
     # Parse a typedef, it records the type and its name.
     #
    def parseTypedef(self, token):
1168
        if token is None:
1169 1170
            return None
        token = self.parseType(token)
1171
        if token is None:
1172 1173 1174 1175 1176
            self.error("parsing typedef")
            return None
        base_type = self.type
        type = base_type
         #self.debug("end typedef type", token)
1177
        while token is not None:
1178 1179 1180
            if token[0] == "name":
                name = token[1]
                signature = self.signature
1181
                if signature is not None:
1182
                    type = type.split('(')[0]
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
                    d = self.mergeFunctionComment(name,
                            ((type, None), signature), 1)
                    self.index_add(name, self.filename, not self.is_header,
                                    "functype", d)
                else:
                    if base_type == "struct":
                        self.index_add(name, self.filename, not self.is_header,
                                        "struct", type)
                        base_type = "struct " + name
                    else:
                        # TODO report missing or misformatted comments
                        info = self.parseTypeComment(name, 1)
                        self.index_add(name, self.filename, not self.is_header,
                                    "typedef", type, info)
                token = self.token()
            else:
                self.error("parsing typedef: expecting a name")
                return token
             #self.debug("end typedef", token)
1202
            if token is not None and token[0] == 'sep' and token[1] == ',':
1203 1204
                type = base_type
                token = self.token()
1205
                while token is not None and token[0] == "op":
1206 1207
                    type = type + token[1]
                    token = self.token()
1208
            elif token is not None and token[0] == 'sep' and token[1] == ';':
1209
                break
1210
            elif token is not None and token[0] == 'name':
1211
                type = base_type
1212
                continue
1213 1214 1215 1216 1217
            else:
                self.error("parsing typedef: expecting ';'", token)
                return token
        token = self.token()
        return token
1218

1219 1220 1221 1222 1223
     #
     # Parse a C code block, used for functions it parse till
     # the balancing } included
     #
    def parseBlock(self, token):
1224
        while token is not None:
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.comment = None
                token = self.token()
                return token
            else:
                if self.collect_ref == 1:
                    oldtok = token
                    token = self.token()
                    if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
                        if token[0] == "sep" and token[1] == "(":
                            self.index_add_ref(oldtok[1], self.filename,
                                                0, "function")
                            token = self.token()
                        elif token[0] == "name":
                            token = self.token()
                            if token[0] == "sep" and (token[1] == ";" or
                               token[1] == "," or token[1] == "="):
                                self.index_add_ref(oldtok[1], self.filename,
                                                    0, "type")
                    elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")
                    elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
                        self.index_add_ref(oldtok[1], self.filename,
                                            0, "typedef")

                else:
                    token = self.token()
        return token
1257 1258 1259 1260 1261 1262

     #
     # Parse a C struct definition till the balancing }
     #
    def parseStruct(self, token):
        fields = []
1263
         #self.debug("start parseStruct", token)
1264
        while token is not None:
1265 1266 1267 1268 1269 1270
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.struct_fields = fields
                 #self.debug("end parseStruct", token)
1271
                 #print(fields)
1272 1273 1274 1275 1276 1277 1278
                token = self.token()
                return token
            else:
                base_type = self.type
                 #self.debug("before parseType", token)
                token = self.parseType(token)
                 #self.debug("after parseType", token)
1279
                if token is not None and token[0] == "name":
1280 1281 1282 1283 1284
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
1285 1286 1287 1288 1289 1290 1291
                        self.cleanupComment()
                        if self.type == "union":
                            fields.append((self.type, fname, self.comment,
                                           self.union_fields))
                            self.union_fields = []
                        else:
                            fields.append((self.type, fname, self.comment))
1292 1293 1294
                        self.comment = None
                    else:
                        self.error("parseStruct: expecting ;", token)
1295
                elif token is not None and token[0] == "sep" and token[1] == "{":
1296 1297
                    token = self.token()
                    token = self.parseTypeBlock(token)
1298
                    if token is not None and token[0] == "name":
1299
                        token = self.token()
1300
                    if token is not None and token[0] == "sep" and token[1] == ";":
1301 1302 1303 1304 1305 1306
                        token = self.token()
                    else:
                        self.error("parseStruct: expecting ;", token)
                else:
                    self.error("parseStruct: name", token)
                    token = self.token()
1307
                self.type = base_type
1308
        self.struct_fields = fields
1309
         #self.debug("end parseStruct", token)
1310
         #print(fields)
1311
        return token
1312

1313 1314 1315 1316 1317 1318
     #
     # Parse a C union definition till the balancing }
     #
    def parseUnion(self, token):
        fields = []
        # self.debug("start parseUnion", token)
1319
        while token is not None:
1320 1321 1322 1323 1324 1325
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                self.union_fields = fields
                # self.debug("end parseUnion", token)
1326
                # print(fields)
1327 1328 1329 1330 1331 1332 1333
                token = self.token()
                return token
            else:
                base_type = self.type
                # self.debug("before parseType", token)
                token = self.parseType(token)
                # self.debug("after parseType", token)
1334
                if token is not None and token[0] == "name":
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
                    fname = token[1]
                    token = self.token()
                    if token[0] == "sep" and token[1] == ";":
                        self.comment = None
                        token = self.token()
                        self.cleanupComment()
                        fields.append((self.type, fname, self.comment))
                        self.comment = None
                    else:
                        self.error("parseUnion: expecting ;", token)
1345
                elif token is not None and token[0] == "sep" and token[1] == "{":
1346 1347
                    token = self.token()
                    token = self.parseTypeBlock(token)
1348
                    if token is not None and token[0] == "name":
1349
                        token = self.token()
1350
                    if token is not None and token[0] == "sep" and token[1] == ";":
1351 1352 1353 1354 1355 1356
                        token = self.token()
                    else:
                        self.error("parseUnion: expecting ;", token)
                else:
                    self.error("parseUnion: name", token)
                    token = self.token()
1357
                self.type = base_type
1358 1359
        self.union_fields = fields
        # self.debug("end parseUnion", token)
1360
        # print(fields)
1361 1362
        return token

1363 1364 1365 1366 1367
     #
     # Parse a C enum block, parse till the balancing }
     #
    def parseEnumBlock(self, token):
        self.enums = []
1368 1369
        name = None
        comment = ""
E
Eric Blake 已提交
1370
        value = "-1"
1371
        commentsBeforeVal = self.comment is not None
1372
        while token is not None:
1373 1374 1375 1376
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
1377
                if name is not None:
1378
                    self.cleanupComment()
1379
                    if self.comment is not None:
1380 1381 1382 1383 1384 1385
                        comment = self.comment
                        self.comment = None
                    self.enums.append((name, value, comment))
                token = self.token()
                return token
            elif token[0] == "name":
J
Jiri Denemark 已提交
1386 1387 1388
                self.cleanupComment()
                if name is not None:
                    if self.comment is not None:
1389
                        comment = self.comment.strip()
J
Jiri Denemark 已提交
1390 1391 1392 1393 1394 1395 1396 1397 1398
                        self.comment = None
                    self.enums.append((name, value, comment))
                name = token[1]
                comment = ""
                token = self.token()
                if token[0] == "op" and token[1][0] == "=":
                    value = ""
                    if len(token[1]) > 1:
                        value = token[1][1:]
1399
                    token = self.token()
J
Jiri Denemark 已提交
1400 1401
                    while token[0] != "sep" or (token[1] != ',' and
                          token[1] != '}'):
1402
                        # We might be dealing with '1U << 12' here
1403
                        value = value + re.sub("^(\d+)U$","\\1", token[1])
1404
                        token = self.token()
J
Jiri Denemark 已提交
1405 1406 1407 1408 1409 1410 1411
                else:
                    try:
                        value = "%d" % (int(value) + 1)
                    except:
                        self.warning("Failed to compute value of enum %s" % (name))
                        value=""
                if token[0] == "sep" and token[1] == ",":
1412 1413 1414 1415
                    if commentsBeforeVal:
                        self.cleanupComment()
                        self.enums.append((name, value, self.comment))
                        name = comment = self.comment = None
J
Jiri Denemark 已提交
1416
                    token = self.token()
1417 1418 1419
            else:
                token = self.token()
        return token
1420

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
    def parseVirEnumDecl(self, token):
        if token[0] != "name":
            self.error("parsing VIR_ENUM_DECL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_ENUM_DECL: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

    def parseVirEnumImpl(self, token):
        # First the type name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
        token = self.token()

        # Now the sentinel name
        if token[0] != "name":
            self.error("parsing VIR_ENUM_IMPL: expecting name", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        if token[1] != ',':
            self.error("parsing VIR_ENUM_IMPL: expecting ','", token)

        token = self.token()

        # Now a list of strings (optional comments)
        while token is not None:
            isGettext = False
            # First a string, optionally with N_(...)
            if token[0] == 'name':
                if token[1] != 'N_':
                    self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
                token = self.token()
                if token[0] != "sep" or token[1] != '(':
                    self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
                token = self.token()
                isGettext = True

                if token[0] != "string":
                    self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
                token = self.token()
            elif token[0] == "string":
                token = self.token()
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting a string", token)

            # Then a separator
            if token[0] == "sep":
                if isGettext and token[1] == ')':
                    token = self.token()

                if token[1] == ',':
                    token = self.token()

                if token[1] == ')':
                    token = self.token()
                    break

            # Then an optional comment
            if token[0] == "comment":
                token = self.token()


        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
    def parseVirLogInit(self, token):
        if token[0] != "string":
            self.error("parsing VIR_LOG_INIT: expecting string", token)

        token = self.token()

        if token[0] != "sep":
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        if token[1] != ')':
            self.error("parsing VIR_LOG_INIT: expecting ')'", token)

        token = self.token()
        if token[0] == "sep" and token[1] == ';':
            token = self.token()

        return token

1528
     #
1529
     # Parse a C definition block, used for structs or unions it parse till
1530 1531 1532
     # the balancing }
     #
    def parseTypeBlock(self, token):
1533
        while token is not None:
1534 1535 1536 1537 1538 1539 1540 1541 1542
            if token[0] == "sep" and token[1] == "{":
                token = self.token()
                token = self.parseTypeBlock(token)
            elif token[0] == "sep" and token[1] == "}":
                token = self.token()
                return token
            else:
                token = self.token()
        return token
1543 1544 1545 1546 1547 1548 1549 1550

     #
     # Parse a type: the fact that the type name can either occur after
     #    the definition or within the definition makes it a little harder
     #    if inside, the name token is pushed back before returning
     #
    def parseType(self, token):
        self.type = ""
1551
        self.struct_fields = []
1552
        self.union_fields = []
1553
        self.signature = None
1554
        if token is None:
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
            return token

        while token[0] == "name" and (
              token[1] == "const" or \
              token[1] == "unsigned" or \
              token[1] == "signed"):
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
1566

1567
        if token[0] == "name" and token[1] == "long":
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]

            # some read ahead for long long
            oldtmp = token
            token = self.token()
            if token[0] == "name" and token[1] == "long":
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp

1582 1583
            oldtmp = token
            token = self.token()
1584
            if token[0] == "name" and token[1] == "int":
1585 1586 1587 1588
                self.type = self.type + " " + token[1]
            else:
                self.push(token)
                token = oldtmp
1589 1590

        elif token[0] == "name" and token[1] == "short":
1591 1592 1593 1594
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
1595

1596
        elif token[0] == "name" and token[1] == "struct":
1597 1598 1599 1600 1601 1602 1603 1604 1605
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1606
            if token is not None and token[0] == "sep" and token[1] == "{":
1607 1608
                token = self.token()
                token = self.parseStruct(token)
1609
            elif token is not None and token[0] == "op" and token[1] == "*":
1610 1611
                self.type = self.type + " " + nametok[1] + " *"
                token = self.token()
1612
                while token is not None and token[0] == "op" and token[1] == "*":
1613 1614 1615 1616 1617 1618 1619 1620
                    self.type = self.type + " *"
                    token = self.token()
                if token[0] == "name":
                    nametok = token
                    token = self.token()
                else:
                    self.error("struct : expecting name", token)
                    return token
1621
            elif token is not None and token[0] == "name" and nametok is not None:
1622 1623 1624
                self.type = self.type + " " + nametok[1]
                return token

1625
            if nametok is not None:
1626 1627 1628
                self.lexer.push(token)
                token = nametok
            return token
1629

1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
        elif token[0] == "name" and token[1] == "union":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            token = self.token()
            nametok = None
            if token[0] == "name":
                nametok = token
                token = self.token()
1640
            if token is not None and token[0] == "sep" and token[1] == "{":
1641 1642
                token = self.token()
                token = self.parseUnion(token)
1643
            elif token is not None and token[0] == "name" and nametok is not None:
1644 1645 1646
                self.type = self.type + " " + nametok[1]
                return token

1647
            if nametok is not None:
1648 1649 1650 1651
                self.lexer.push(token)
                token = nametok
            return token

1652
        elif token[0] == "name" and token[1] == "enum":
1653 1654 1655 1656 1657 1658
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
            self.enums = []
            token = self.token()
1659
            if token is not None and token[0] == "sep" and token[1] == "{":
1660 1661
                # drop comments before the enum block
                self.comment = None
1662 1663 1664 1665 1666
                token = self.token()
                token = self.parseEnumBlock(token)
            else:
                self.error("parsing enum: expecting '{'", token)
            enum_type = None
1667
            if token is not None and token[0] != "name":
1668 1669 1670 1671 1672 1673 1674 1675 1676
                self.lexer.push(token)
                token = ("name", "enum")
            else:
                enum_type = token[1]
            for enum in self.enums:
                self.index_add(enum[0], self.filename,
                               not self.is_header, "enum",
                               (enum[1], enum[2], enum_type))
            return token
1677 1678
        elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
            token = self.token()
1679
            if token is not None and token[0] == "sep" and token[1] == "(":
1680 1681 1682 1683
                token = self.token()
                token = self.parseVirEnumDecl(token)
            else:
                self.error("parsing VIR_ENUM_DECL: expecting '('", token)
1684
            if token is not None:
1685 1686 1687 1688 1689 1690
                self.lexer.push(token)
                token = ("name", "virenumdecl")
            return token

        elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
            token = self.token()
1691
            if token is not None and token[0] == "sep" and token[1] == "(":
1692 1693 1694 1695
                token = self.token()
                token = self.parseVirEnumImpl(token)
            else:
                self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
1696
            if token is not None:
1697 1698 1699
                self.lexer.push(token)
                token = ("name", "virenumimpl")
            return token
1700

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
        elif token[0] == "name" and token[1] == "VIR_LOG_INIT":
            token = self.token()
            if token is not None and token[0] == "sep" and token[1] == "(":
                token = self.token()
                token = self.parseVirLogInit(token)
            else:
                self.error("parsing VIR_LOG_INIT: expecting '('", token)
            if token is not None:
                self.lexer.push(token)
                token = ("name", "virloginit")
            return token

1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
        elif token[0] == "name":
            if self.type == "":
                self.type = token[1]
            else:
                self.type = self.type + " " + token[1]
        else:
            self.error("parsing type %s: expecting a name" % (self.type),
                       token)
            return token
        token = self.token()
1723
        while token is not None and (token[0] == "op" or
1724 1725 1726
              token[0] == "name" and token[1] == "const"):
            self.type = self.type + " " + token[1]
            token = self.token()
1727 1728

         #
1729 1730
         # if there is a parenthesis here, this means a function type
         #
1731
        if token is not None and token[0] == "sep" and token[1] == '(':
1732 1733
            self.type = self.type + token[1]
            token = self.token()
1734
            while token is not None and token[0] == "op" and token[1] == '*':
1735 1736
                self.type = self.type + token[1]
                token = self.token()
1737
            if token is None or token[0] != "name" :
1738
                self.error("parsing function type, name expected", token)
1739 1740 1741 1742
                return token
            self.type = self.type + token[1]
            nametok = token
            token = self.token()
1743
            if token is not None and token[0] == "sep" and token[1] == ')':
1744 1745
                self.type = self.type + token[1]
                token = self.token()
1746
                if token is not None and token[0] == "sep" and token[1] == '(':
1747
                    token = self.token()
1748 1749 1750
                    type = self.type
                    token = self.parseSignature(token)
                    self.type = type
1751
                else:
1752
                    self.error("parsing function type, '(' expected", token)
1753 1754
                    return token
            else:
1755
                self.error("parsing function type, ')' expected", token)
1756 1757 1758 1759 1760 1761 1762 1763
                return token
            self.lexer.push(token)
            token = nametok
            return token

         #
         # do some lookahead for arrays
         #
1764
        if token is not None and token[0] == "name":
1765 1766
            nametok = token
            token = self.token()
1767
            if token is not None and token[0] == "sep" and token[1] == '[':
1768
                self.type = self.type + " " + nametok[1]
1769
                while token is not None and token[0] == "sep" and token[1] == '[':
1770 1771
                    self.type = self.type + token[1]
                    token = self.token()
1772
                    while token is not None and token[0] != 'sep' and \
1773 1774 1775
                          token[1] != ']' and token[1] != ';':
                        self.type = self.type + token[1]
                        token = self.token()
1776
                if token is not None and token[0] == 'sep' and token[1] == ']':
1777 1778 1779
                    self.type = self.type + token[1]
                    token = self.token()
                else:
1780
                    self.error("parsing array type, ']' expected", token)
1781
                    return token
1782
            elif token is not None and token[0] == "sep" and token[1] == ':':
1783 1784 1785 1786 1787 1788 1789
                 # remove :12 in case it's a limited int size
                token = self.token()
                token = self.token()
            self.lexer.push(token)
            token = nametok

        return token
1790 1791 1792 1793 1794 1795

     #
     # Parse a signature: '(' has been parsed and we scan the type definition
     #    up to the ')' included
    def parseSignature(self, token):
        signature = []
1796
        if token is not None and token[0] == "sep" and token[1] == ')':
1797 1798 1799
            self.signature = []
            token = self.token()
            return token
1800
        while token is not None:
1801
            token = self.parseType(token)
1802
            if token is not None and token[0] == "name":
1803 1804
                signature.append((self.type, token[1], None))
                token = self.token()
1805
            elif token is not None and token[0] == "sep" and token[1] == ',':
1806 1807
                token = self.token()
                continue
1808
            elif token is not None and token[0] == "sep" and token[1] == ')':
1809 1810 1811 1812 1813
                 # only the type was provided
                if self.type == "...":
                    signature.append((self.type, "...", None))
                else:
                    signature.append((self.type, None, None))
1814
            if token is not None and token[0] == "sep":
1815 1816 1817 1818 1819 1820 1821 1822
                if token[1] == ',':
                    token = self.token()
                    continue
                elif token[1] == ')':
                    token = self.token()
                    break
        self.signature = signature
        return token
1823

1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
    # this dict contains the functions that are allowed to use [unsigned]
    # long for legacy reasons in their signature and return type. this list is
    # fixed. new procedures and public APIs have to use [unsigned] long long
    long_legacy_functions = \
      { "virGetVersion"                  : (False, ("libVer", "typeVer")),
        "virConnectGetLibVersion"        : (False, ("libVer")),
        "virConnectGetVersion"           : (False, ("hvVer")),
        "virDomainGetMaxMemory"          : (True,  ()),
        "virDomainMigrate"               : (False, ("flags", "bandwidth")),
        "virDomainMigrate2"              : (False, ("flags", "bandwidth")),
        "virDomainMigrateBegin3"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateConfirm3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateDirect"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateFinish"         : (False, ("flags")),
        "virDomainMigrateFinish2"        : (False, ("flags")),
        "virDomainMigrateFinish3"        : (False, ("flags")),
        "virDomainMigratePeer2Peer"      : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePerform3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare"        : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare2"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepare3"       : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel"  : (False, ("flags", "bandwidth")),
        "virDomainMigratePrepareTunnel3" : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI"          : (False, ("flags", "bandwidth")),
        "virDomainMigrateToURI2"         : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion1"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion2"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateVersion3"       : (False, ("flags", "bandwidth")),
        "virDomainMigrateSetMaxSpeed"    : (False, ("bandwidth")),
        "virDomainSetMaxMemory"          : (False, ("memory")),
        "virDomainSetMemory"             : (False, ("memory")),
1856
        "virDomainSetMemoryFlags"        : (False, ("memory")),
E
Eric Blake 已提交
1857
        "virDomainBlockCommit"           : (False, ("bandwidth")),
1858
        "virDomainBlockJobSetSpeed"      : (False, ("bandwidth")),
1859
        "virDomainBlockPull"             : (False, ("bandwidth")),
1860
        "virDomainBlockRebase"           : (False, ("bandwidth")),
1861
        "virDomainMigrateGetMaxSpeed"    : (False, ("bandwidth")) }
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886

    def checkLongLegacyFunction(self, name, return_type, signature):
        if "long" in return_type and "long long" not in return_type:
            try:
                if not CParser.long_legacy_functions[name][0]:
                    raise Exception()
            except:
                self.error(("function '%s' is not allowed to return long, "
                            "use long long instead") % (name))

        for param in signature:
            if "long" in param[0] and "long long" not in param[0]:
                try:
                    if param[1] not in CParser.long_legacy_functions[name][1]:
                        raise Exception()
                except:
                    self.error(("function '%s' is not allowed to take long "
                                "parameter '%s', use long long instead")
                               % (name, param[1]))

    # this dict contains the structs that are allowed to use [unsigned]
    # long for legacy reasons. this list is fixed. new structs have to use
    # [unsigned] long long
    long_legacy_struct_fields = \
      { "_virDomainInfo"                 : ("maxMem", "memory"),
1887 1888
        "_virNodeInfo"                   : ("memory"),
        "_virDomainBlockJobInfo"         : ("bandwidth") }
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900

    def checkLongLegacyStruct(self, name, fields):
        for field in fields:
            if "long" in field[0] and "long long" not in field[0]:
                try:
                    if field[1] not in CParser.long_legacy_struct_fields[name]:
                        raise Exception()
                except:
                    self.error(("struct '%s' is not allowed to contain long "
                                "field '%s', use long long instead") \
                               % (name, field[1]))

1901 1902 1903 1904 1905 1906 1907
     #
     # Parse a global definition, be it a type, variable or function
     # the extern "C" blocks are a bit nasty and require it to recurse.
     #
    def parseGlobal(self, token):
        static = 0
        if token[1] == 'extern':
1908
            token = self.token()
1909
            if token is None:
1910 1911 1912 1913
                return token
            if token[0] == 'string':
                if token[1] == 'C':
                    token = self.token()
1914
                    if token is None:
1915 1916 1917
                        return token
                    if token[0] == 'sep' and token[1] == "{":
                        token = self.token()
1918
#                        print('Entering extern "C line ', self.lineno())
1919
                        while token is not None and (token[0] != 'sep' or
1920 1921 1922 1923 1924 1925 1926 1927
                              token[1] != "}"):
                            if token[0] == 'name':
                                token = self.parseGlobal(token)
                            else:
                                self.error(
                                 "token %s %s unexpected at the top level" % (
                                        token[0], token[1]))
                                token = self.parseGlobal(token)
1928
#                        print('Exiting extern "C" line', self.lineno())
1929 1930 1931 1932 1933 1934 1935
                        token = self.token()
                        return token
                else:
                    return token
        elif token[1] == 'static':
            static = 1
            token = self.token()
1936
            if token is None or  token[0] != 'name':
1937 1938 1939 1940 1941 1942 1943 1944
                return token

        if token[1] == 'typedef':
            token = self.token()
            return self.parseTypedef(token)
        else:
            token = self.parseType(token)
            type_orig = self.type
1945
        if token is None or token[0] != "name":
1946 1947 1948 1949
            return token
        type = type_orig
        self.name = token[1]
        token = self.token()
1950
        while token is not None and (token[0] == "sep" or token[0] == "op"):
1951 1952 1953 1954
            if token[0] == "sep":
                if token[1] == "[":
                    type = type + token[1]
                    token = self.token()
1955
                    while token is not None and (token[0] != "sep" or \
1956 1957 1958 1959
                          token[1] != ";"):
                        type = type + token[1]
                        token = self.token()

1960
            if token is not None and token[0] == "op" and token[1] == "=":
1961 1962 1963 1964 1965 1966 1967 1968 1969
                 #
                 # Skip the initialization of the variable
                 #
                token = self.token()
                if token[0] == 'sep' and token[1] == '{':
                    token = self.token()
                    token = self.parseBlock(token)
                else:
                    self.comment = None
1970
                    while token is not None and (token[0] != "sep" or \
1971 1972 1973
                          (token[1] != ';' and token[1] != ',')):
                            token = self.token()
                self.comment = None
1974
                if token is None or token[0] != "sep" or (token[1] != ';' and
1975 1976 1977
                   token[1] != ','):
                    self.error("missing ';' or ',' after value")

1978
            if token is not None and token[0] == "sep":
1979 1980 1981 1982
                if token[1] == ";":
                    self.comment = None
                    token = self.token()
                    if type == "struct":
1983
                        self.checkLongLegacyStruct(self.name, self.struct_fields)
1984 1985 1986 1987 1988 1989 1990 1991 1992
                        self.index_add(self.name, self.filename,
                             not self.is_header, "struct", self.struct_fields)
                    else:
                        self.index_add(self.name, self.filename,
                             not self.is_header, "variable", type)
                    break
                elif token[1] == "(":
                    token = self.token()
                    token = self.parseSignature(token)
1993
                    if token is None:
1994 1995
                        return None
                    if token[0] == "sep" and token[1] == ";":
1996
                        self.checkLongLegacyFunction(self.name, type, self.signature)
1997 1998 1999 2000 2001 2002
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), 1)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
                    elif token[0] == "sep" and token[1] == "{":
2003
                        self.checkLongLegacyFunction(self.name, type, self.signature)
2004 2005 2006 2007 2008
                        d = self.mergeFunctionComment(self.name,
                                ((type, None), self.signature), static)
                        self.index_add(self.name, self.filename, static,
                                        "function", d)
                        token = self.token()
2009
                        token = self.parseBlock(token)
2010 2011 2012 2013 2014 2015
                elif token[1] == ',':
                    self.comment = None
                    self.index_add(self.name, self.filename, static,
                                    "variable", type)
                    type = type_orig
                    token = self.token()
2016
                    while token is not None and token[0] == "sep":
2017 2018
                        type = type + token[1]
                        token = self.token()
2019
                    if token is not None and token[0] == "name":
2020 2021 2022 2023 2024 2025
                        self.name = token[1]
                        token = self.token()
                else:
                    break

        return token
2026 2027

    def parse(self):
2028
        if not quiet:
2029
            print("Parsing %s" % (self.filename))
2030
        token = self.token()
2031
        while token is not None:
2032
            if token[0] == 'name':
2033
                token = self.parseGlobal(token)
2034
            else:
2035 2036 2037 2038 2039
                self.error("token %s %s unexpected at the top level" % (
                       token[0], token[1]))
                token = self.parseGlobal(token)
                return
        self.parseTopComment(self.top_comment)
2040
        return self.index
2041

2042 2043 2044

class docBuilder:
    """A documentation builder"""
J
Jiri Denemark 已提交
2045
    def __init__(self, name, path='.', directories=['.'], includes=[]):
2046
        self.name = name
J
Jiri Denemark 已提交
2047
        self.path = path
2048
        self.directories = directories
2049
        if name == "libvirt":
2050
            self.includes = includes + list(included_files.keys())
2051
        elif name == "libvirt-qemu":
2052
            self.includes = includes + list(qemu_included_files.keys())
2053
        elif name == "libvirt-lxc":
2054
            self.includes = includes + list(lxc_included_files.keys())
2055
        elif name == "libvirt-admin":
2056
            self.includes = includes + list(admin_included_files.keys())
2057 2058 2059
        self.modules = {}
        self.headers = {}
        self.idx = index()
2060
        self.xref = {}
2061 2062
        self.index = {}
        self.basename = name
2063
        self.errors = 0
2064

2065 2066 2067
    def warning(self, msg):
        global warnings
        warnings = warnings + 1
2068
        print(msg)
2069

2070 2071
    def error(self, msg):
        self.errors += 1
2072
        print("Error:", msg, file=sys.stderr)
2073

2074
    def indexString(self, id, str):
2075
        if str is None:
2076
            return
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
        str = str.replace("'", ' ')
        str = str.replace('"', ' ')
        str = str.replace("/", ' ')
        str = str.replace('*', ' ')
        str = str.replace("[", ' ')
        str = str.replace("]", ' ')
        str = str.replace("(", ' ')
        str = str.replace(")", ' ')
        str = str.replace("<", ' ')
        str = str.replace('>', ' ')
        str = str.replace("&", ' ')
        str = str.replace('#', ' ')
        str = str.replace(",", ' ')
        str = str.replace('.', ' ')
        str = str.replace(';', ' ')
        tokens = str.split()
2093
        for token in tokens:
C
Cole Robinson 已提交
2094 2095 2096 2097 2098 2099 2100 2101 2102
            c = token[0]
            if not re.match(r"[a-zA-Z]", c):
                pass
            elif len(token) < 3:
                pass
            else:
                lower = token.lower()
                # TODO: generalize this a bit
                if lower == 'and' or lower == 'the':
2103
                    pass
C
Cole Robinson 已提交
2104 2105
                elif token in self.xref:
                    self.xref[token].append(id)
2106
                else:
C
Cole Robinson 已提交
2107
                    self.xref[token] = [id]
2108 2109

    def analyze(self):
2110
        if not quiet:
2111
            print("Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys())))
2112
        self.idx.analyze()
2113 2114

    def scanHeaders(self):
2115 2116 2117
        for header in self.headers.keys():
            parser = CParser(header)
            idx = parser.parse()
2118
            self.headers[header] = idx
2119
            self.idx.merge(idx)
2120 2121

    def scanModules(self):
2122 2123 2124 2125 2126 2127
        for module in self.modules.keys():
            parser = CParser(module)
            idx = parser.parse()
            # idx.analyze()
            self.modules[module] = idx
            self.idx.merge_public(idx)
2128 2129 2130

    def scan(self):
        for directory in self.directories:
2131 2132 2133 2134
            files = glob.glob(directory + "/*.c")
            for file in files:
                skip = 1
                for incl in self.includes:
2135
                    if file.find(incl) != -1:
2136
                        skip = 0
2137 2138
                        break
                if skip == 0:
2139
                    self.modules[file] = None
2140 2141 2142 2143
            files = glob.glob(directory + "/*.h")
            for file in files:
                skip = 1
                for incl in self.includes:
2144
                    if file.find(incl) != -1:
2145
                        skip = 0
2146 2147
                        break
                if skip == 0:
2148
                    self.headers[file] = None
2149 2150
        self.scanHeaders()
        self.scanModules()
2151

2152 2153
    def modulename_file(self, file):
        module = os.path.basename(file)
2154 2155 2156 2157 2158
        if module[-2:] == '.h':
            module = module[:-2]
        elif module[-2:] == '.c':
            module = module[:-2]
        return module
2159 2160 2161 2162

    def serialize_enum(self, output, name):
        id = self.idx.enums[name]
        output.write("    <enum name='%s' file='%s'" % (name,
2163
                     self.modulename_file(id.header)))
2164
        if id.info is not None:
2165
            info = id.info
2166
            if info[0] is not None and info[0] != '':
2167 2168 2169 2170
                try:
                    val = eval(info[0])
                except:
                    val = info[0]
2171
                output.write(" value='%s'" % (val))
2172
            if info[2] is not None and info[2] != '':
2173
                output.write(" type='%s'" % info[2])
2174
            if info[1] is not None and info[1] != '':
2175
                output.write(" info='%s'" % escape(info[1]))
2176 2177 2178 2179
        output.write("/>\n")

    def serialize_macro(self, output, name):
        id = self.idx.macros[name]
2180
        output.write("    <macro name='%s' file='%s'" % (name,
2181
                     self.modulename_file(id.header)))
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
        if id.info is None:
            args = []
            desc = None
            strValue = None
        else:
            (args, desc, strValue) = id.info

        if strValue is not None:
            output.write(" string='%s'" % strValue)
        output.write(">\n")

        if desc is not None and desc != "":
            output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
            self.indexString(name, desc)
        for arg in args:
            (name, desc) = arg
            if desc is not None and desc != "":
                output.write("      <arg name='%s' info='%s'/>\n" % (
                             name, escape(desc)))
                self.indexString(name, desc)
            else:
                output.write("      <arg name='%s'/>\n" % (name))
2204 2205
        output.write("    </macro>\n")

2206 2207 2208 2209 2210
    def serialize_union(self, output, field, desc):
        output.write("      <field name='%s' type='union' info='%s'>\n" % (field[1] , desc))
        output.write("        <union>\n")
        for f in field[3]:
            desc = f[2]
2211
            if desc is None:
2212 2213 2214 2215 2216 2217 2218 2219
                desc = ''
            else:
                desc = escape(desc)
            output.write("          <field name='%s' type='%s' info='%s'/>\n" % (f[1] , f[0], desc))

        output.write("        </union>\n")
        output.write("      </field>\n")

2220 2221
    def serialize_typedef(self, output, name):
        id = self.idx.typedefs[name]
2222 2223 2224 2225
        if id.info[0:7] == 'struct ':
            output.write("    <struct name='%s' file='%s' type='%s'" % (
                     name, self.modulename_file(id.header), id.info))
            name = id.info[7:]
2226 2227
            if (name in self.idx.structs and
                    isinstance(self.idx.structs[name].info, (list, tuple))):
2228
                output.write(">\n")
2229 2230 2231 2232
                try:
                    for field in self.idx.structs[name].info:
                        desc = field[2]
                        self.indexString(name, desc)
2233
                        if desc is None:
2234 2235 2236
                            desc = ''
                        else:
                            desc = escape(desc)
2237 2238 2239 2240
                        if field[0] == "union":
                            self.serialize_union(output, field, desc)
                        else:
                            output.write("      <field name='%s' type='%s' info='%s'/>\n" % (field[1] , field[0], desc))
2241
                except:
2242
                    self.warning("Failed to serialize struct %s" % (name))
2243 2244
                output.write("    </struct>\n")
            else:
2245
                output.write("/>\n")
2246 2247 2248
        else :
            output.write("    <typedef name='%s' file='%s' type='%s'" % (
                         name, self.modulename_file(id.header), id.info))
2249
            try:
2250
                desc = id.extra
2251
                if desc is not None and desc != "":
2252 2253 2254 2255 2256 2257
                    output.write(">\n      <info><![CDATA[%s]]></info>\n" % (desc))
                    output.write("    </typedef>\n")
                else:
                    output.write("/>\n")
            except:
                output.write("/>\n")
2258 2259 2260

    def serialize_variable(self, output, name):
        id = self.idx.variables[name]
2261
        if id.info is not None:
2262 2263 2264 2265 2266
            output.write("    <variable name='%s' file='%s' type='%s'/>\n" % (
                    name, self.modulename_file(id.header), id.info))
        else:
            output.write("    <variable name='%s' file='%s'/>\n" % (
                    name, self.modulename_file(id.header)))
2267

2268 2269
    def serialize_function(self, output, name):
        id = self.idx.functions[name]
2270
        if name == debugsym and not quiet:
2271
            print("=>", id)
2272

2273
        # NB: this is consumed by a regex in 'getAPIFilenames' in hvsupport.pl
2274
        output.write("    <%s name='%s' file='%s' module='%s'>\n" % (id.type,
2275 2276 2277 2278 2279
                     name, self.modulename_file(id.header),
                     self.modulename_file(id.module)))
        #
        # Processing of conditionals modified by Bill 1/1/05
        #
2280
        if id.conditionals is not None:
2281 2282 2283 2284 2285
            apstr = ""
            for cond in id.conditionals:
                if apstr != "":
                    apstr = apstr + " &amp;&amp; "
                apstr = apstr + cond
2286
            output.write("      <cond>%s</cond>\n"% (apstr))
2287 2288 2289 2290
        try:
            (ret, params, desc) = id.info
            output.write("      <info><![CDATA[%s]]></info>\n" % (desc))
            self.indexString(name, desc)
2291
            if ret[0] is not None:
2292 2293
                if ret[0] == "void":
                    output.write("      <return type='void'/>\n")
A
Andrea Bolognani 已提交
2294
                elif (ret[1] is None or ret[1] == '') and name not in ignored_functions:
2295
                    self.error("Missing documentation for return of function `%s'" % name)
2296 2297 2298 2299 2300 2301 2302
                else:
                    output.write("      <return type='%s' info='%s'/>\n" % (
                             ret[0], escape(ret[1])))
                    self.indexString(name, ret[1])
            for param in params:
                if param[0] == 'void':
                    continue
2303
                if (param[2] is None or param[2] == ''):
A
Andrea Bolognani 已提交
2304
                    if name in ignored_functions:
2305 2306 2307
                        output.write("      <arg name='%s' type='%s' info=''/>\n" % (param[1], param[0]))
                    else:
                        self.error("Missing documentation for arg `%s' of function `%s'" % (param[1], name))
2308 2309 2310 2311
                else:
                    output.write("      <arg name='%s' type='%s' info='%s'/>\n" % (param[1], param[0], escape(param[2])))
                    self.indexString(name, param[2])
        except:
2312
            print("Exception:", sys.exc_info()[1], file=sys.stderr)
2313
            self.warning("Failed to save function %s info: %s" % (name, repr(id.info)))
2314 2315 2316 2317
        output.write("    </%s>\n" % (id.type))

    def serialize_exports(self, output, file):
        module = self.modulename_file(file)
2318 2319
        output.write("    <file name='%s'>\n" % (module))
        dict = self.headers[file]
2320
        if dict.info is not None:
2321 2322 2323
            for data in ('Summary', 'Description', 'Author'):
                try:
                    output.write("     <%s>%s</%s>\n" % (
C
Cole Robinson 已提交
2324
                                 data.lower(),
2325
                                 escape(dict.info[data]),
C
Cole Robinson 已提交
2326 2327
                                 data.lower()))
                except KeyError:
2328
                    self.warning("Header %s lacks a %s description" % (module, data))
A
Andrea Bolognani 已提交
2329
            if 'Description' in dict.info:
2330
                desc = dict.info['Description']
2331
                if desc.find("DEPRECATED") != -1:
2332
                    output.write("     <deprecated/>\n")
2333

2334
        ids = sorted(dict.macros.keys())
2335 2336
        for id in uniq(ids):
            # Macros are sometime used to masquerade other types.
A
Andrea Bolognani 已提交
2337
            if id in dict.functions:
2338
                continue
A
Andrea Bolognani 已提交
2339
            if id in dict.variables:
2340
                continue
A
Andrea Bolognani 已提交
2341
            if id in dict.typedefs:
2342
                continue
A
Andrea Bolognani 已提交
2343
            if id in dict.structs:
2344
                continue
A
Andrea Bolognani 已提交
2345
            if id in dict.unions:
2346
                continue
A
Andrea Bolognani 已提交
2347
            if id in dict.enums:
2348 2349
                continue
            output.write("     <exports symbol='%s' type='macro'/>\n" % (id))
2350
        ids = sorted(dict.enums.keys())
2351 2352
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='enum'/>\n" % (id))
2353
        ids = sorted(dict.typedefs.keys())
2354 2355
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='typedef'/>\n" % (id))
2356
        ids = sorted(dict.structs.keys())
2357 2358
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='struct'/>\n" % (id))
2359
        ids = sorted(dict.variables.keys())
2360 2361
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='variable'/>\n" % (id))
2362
        ids = sorted(dict.functions.keys())
2363 2364 2365
        for id in uniq(ids):
            output.write("     <exports symbol='%s' type='function'/>\n" % (id))
        output.write("    </file>\n")
2366 2367

    def serialize_xrefs_files(self, output):
2368
        headers = sorted(self.headers.keys())
2369
        for file in headers:
2370 2371 2372
            module = self.modulename_file(file)
            output.write("    <file name='%s'>\n" % (module))
            dict = self.headers[file]
2373 2374 2375 2376 2377 2378
            ids = uniq(list(dict.functions.keys()) + \
                       list(dict.variables.keys()) + \
                       list(dict.macros.keys()) + \
                       list(dict.typedefs.keys()) + \
                       list(dict.structs.keys()) + \
                       list(dict.enums.keys()))
2379 2380 2381 2382
            ids.sort()
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </file>\n")
2383 2384 2385 2386
        pass

    def serialize_xrefs_functions(self, output):
        funcs = {}
2387 2388 2389 2390 2391 2392 2393
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                for param in params:
                    if param[0] == 'void':
                        continue
A
Andrea Bolognani 已提交
2394
                    if param[0] in funcs:
2395 2396 2397 2398 2399
                        funcs[param[0]].append(name)
                    else:
                        funcs[param[0]] = [name]
            except:
                pass
2400
        typ = sorted(funcs.keys())
2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
            ids = funcs[type]
            ids.sort()
            pid = ''    # not sure why we have dups, but get rid of them!
            for id in ids:
                if id != pid:
                    output.write("      <ref name='%s'/>\n" % (id))
                    pid = id
            output.write("    </type>\n")
2414 2415 2416

    def serialize_xrefs_constructors(self, output):
        funcs = {}
2417 2418 2419 2420 2421 2422
        for name in self.idx.functions.keys():
            id = self.idx.functions[name]
            try:
                (ret, params, desc) = id.info
                if ret[0] == "void":
                    continue
A
Andrea Bolognani 已提交
2423
                if ret[0] in funcs:
2424 2425 2426 2427 2428
                    funcs[ret[0]].append(name)
                else:
                    funcs[ret[0]] = [name]
            except:
                pass
2429
        typ = sorted(funcs.keys())
2430 2431 2432 2433 2434
        for type in typ:
            if type == '' or type == 'void' or type == "int" or \
               type == "char *" or type == "const char *" :
                continue
            output.write("    <type name='%s'>\n" % (type))
2435
            ids = sorted(funcs[type])
2436 2437 2438
            for id in ids:
                output.write("      <ref name='%s'/>\n" % (id))
            output.write("    </type>\n")
2439 2440

    def serialize_xrefs_alpha(self, output):
2441
        letter = None
2442
        ids = sorted(self.idx.identifiers.keys())
2443 2444
        for id in ids:
            if id[0] != letter:
2445
                if letter is not None:
2446 2447 2448 2449
                    output.write("    </letter>\n")
                letter = id[0]
                output.write("    <letter name='%s'>\n" % (letter))
            output.write("      <ref name='%s'/>\n" % (id))
2450
        if letter is not None:
2451
            output.write("    </letter>\n")
2452 2453

    def serialize_xrefs_references(self, output):
2454
        typ = sorted(self.idx.identifiers.keys())
2455 2456 2457 2458 2459 2460 2461
        for id in typ:
            idf = self.idx.identifiers[id]
            module = idf.header
            output.write("    <reference name='%s' href='%s'/>\n" % (id,
                         'html/' + self.basename + '-' +
                         self.modulename_file(module) + '.html#' +
                         id))
2462 2463 2464

    def serialize_xrefs_index(self, output):
        index = self.xref
2465
        typ = sorted(index.keys())
2466 2467 2468 2469 2470 2471 2472 2473
        letter = None
        count = 0
        chunk = 0
        chunks = []
        for id in typ:
            if len(index[id]) > 30:
                continue
            if id[0] != letter:
2474 2475
                if letter is None or count > 200:
                    if letter is not None:
2476 2477 2478 2479 2480 2481 2482
                        output.write("      </letter>\n")
                        output.write("    </chunk>\n")
                        count = 0
                        chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
                    output.write("    <chunk name='chunk%s'>\n" % (chunk))
                    first_letter = id[0]
                    chunk = chunk + 1
2483
                elif letter is not None:
2484 2485 2486 2487
                    output.write("      </letter>\n")
                letter = id[0]
                output.write("      <letter name='%s'>\n" % (letter))
            output.write("        <word name='%s'>\n" % (id))
2488
            tokens = index[id]
2489 2490 2491 2492 2493 2494 2495 2496 2497
            tokens.sort()
            tok = None
            for token in tokens:
                if tok == token:
                    continue
                tok = token
                output.write("          <ref name='%s'/>\n" % (token))
                count = count + 1
            output.write("        </word>\n")
2498
        if letter is not None:
2499 2500 2501 2502 2503 2504 2505 2506 2507
            output.write("      </letter>\n")
            output.write("    </chunk>\n")
            if count != 0:
                chunks.append(["chunk%s" % (chunk -1), first_letter, letter])
            output.write("    <chunks>\n")
            for ch in chunks:
                output.write("      <chunk name='%s' start='%s' end='%s'/>\n" % (
                             ch[0], ch[1], ch[2]))
            output.write("    </chunks>\n")
2508 2509

    def serialize_xrefs(self, output):
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
        output.write("  <references>\n")
        self.serialize_xrefs_references(output)
        output.write("  </references>\n")
        output.write("  <alpha>\n")
        self.serialize_xrefs_alpha(output)
        output.write("  </alpha>\n")
        output.write("  <constructors>\n")
        self.serialize_xrefs_constructors(output)
        output.write("  </constructors>\n")
        output.write("  <functions>\n")
        self.serialize_xrefs_functions(output)
        output.write("  </functions>\n")
        output.write("  <files>\n")
        self.serialize_xrefs_files(output)
        output.write("  </files>\n")
        output.write("  <index>\n")
        self.serialize_xrefs_index(output)
        output.write("  </index>\n")
2528 2529

    def serialize(self):
J
Jiri Denemark 已提交
2530
        filename = "%s/%s-api.xml" % (self.path, self.name)
2531
        if not quiet:
2532
            print("Saving XML description %s" % (filename))
2533 2534 2535 2536
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<api name='%s'>\n" % self.name)
        output.write("  <files>\n")
2537
        headers = sorted(self.headers.keys())
2538 2539 2540 2541
        for file in headers:
            self.serialize_exports(output, file)
        output.write("  </files>\n")
        output.write("  <symbols>\n")
2542
        macros = sorted(self.idx.macros.keys())
2543 2544
        for macro in macros:
            self.serialize_macro(output, macro)
2545
        enums = sorted(self.idx.enums.keys())
2546 2547
        for enum in enums:
            self.serialize_enum(output, enum)
2548
        typedefs = sorted(self.idx.typedefs.keys())
2549 2550
        for typedef in typedefs:
            self.serialize_typedef(output, typedef)
2551
        variables = sorted(self.idx.variables.keys())
2552 2553
        for variable in variables:
            self.serialize_variable(output, variable)
2554
        functions = sorted(self.idx.functions.keys())
2555 2556 2557 2558 2559 2560
        for function in functions:
            self.serialize_function(output, function)
        output.write("  </symbols>\n")
        output.write("</api>\n")
        output.close()

2561
        if self.errors > 0:
2562
            print("apibuild.py: %d error(s) encountered during generation" % self.errors, file=sys.stderr)
2563 2564
            sys.exit(3)

J
Jiri Denemark 已提交
2565
        filename = "%s/%s-refs.xml" % (self.path, self.name)
2566
        if not quiet:
2567
            print("Saving XML Cross References %s" % (filename))
2568 2569 2570 2571 2572 2573 2574 2575
        output = open(filename, "w")
        output.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n')
        output.write("<apirefs name='%s'>\n" % self.name)
        self.serialize_xrefs(output)
        output.write("</apirefs>\n")
        output.close()


A
Andrea Bolognani 已提交
2576 2577 2578 2579
class app:
    def warning(self, msg):
        global warnings
        warnings = warnings + 1
2580
        print(msg)
A
Andrea Bolognani 已提交
2581 2582 2583

    def rebuild(self, name):
        if name not in ["libvirt", "libvirt-qemu", "libvirt-lxc", "libvirt-admin"]:
A
Andrea Bolognani 已提交
2584
            self.warning("rebuild() failed, unknown module %s" % name)
A
Andrea Bolognani 已提交
2585 2586 2587 2588 2589 2590 2591 2592
            return None
        builder = None
        srcdir = os.path.abspath((os.environ["srcdir"]))
        builddir = os.path.abspath((os.environ["builddir"]))
        if srcdir == builddir:
            builddir = None
        if glob.glob(srcdir + "/../src/libvirt.c") != [] :
            if not quiet:
2593
                print("Rebuilding API description for %s" % name)
A
Andrea Bolognani 已提交
2594 2595 2596
            dirs = [srcdir + "/../src",
                    srcdir + "/../src/util",
                    srcdir + "/../include/libvirt"]
2597 2598
            if (builddir and
                not os.path.exists(srcdir + "/../include/libvirt/libvirt-common.h")):
A
Andrea Bolognani 已提交
2599 2600 2601 2602
                dirs.append(builddir + "/../include/libvirt")
            builder = docBuilder(name, srcdir, dirs, [])
        elif glob.glob("src/libvirt.c") != [] :
            if not quiet:
2603
                print("Rebuilding API description for %s" % name)
A
Andrea Bolognani 已提交
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
            builder = docBuilder(name, srcdir,
                                 ["src", "src/util", "include/libvirt"],
                                 [])
        else:
            self.warning("rebuild() failed, unable to guess the module")
            return None
        builder.scan()
        builder.analyze()
        builder.serialize()
        return builder

    #
    # for debugging the parser
    #
    def parse(self, filename):
        parser = CParser(filename)
        idx = parser.parse()
        return idx
2622 2623 2624


if __name__ == "__main__":
A
Andrea Bolognani 已提交
2625
    app = app()
2626 2627
    if len(sys.argv) > 1:
        debug = 1
A
Andrea Bolognani 已提交
2628
        app.parse(sys.argv[1])
2629
    else:
A
Andrea Bolognani 已提交
2630 2631 2632 2633
        app.rebuild("libvirt")
        app.rebuild("libvirt-qemu")
        app.rebuild("libvirt-lxc")
        app.rebuild("libvirt-admin")
2634 2635 2636 2637
    if warnings > 0:
        sys.exit(2)
    else:
        sys.exit(0)