xen_internal.c 66.4 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
D
Daniel Veillard 已提交
4
 * Copyright (C) 2005, 2006 Red Hat, Inc.
5 6 7 8 9 10
 *
 * See COPYING.LIB for the License of this software
 *
 * Daniel Veillard <veillard@redhat.com>
 */

11 12
#ifdef WITH_XEN

13 14
#include <stdio.h>
#include <string.h>
15
/* required for uint8_t, uint32_t, etc ... */
16 17 18 19 20 21 22
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
23
#include <limits.h>
24
#include <stdint.h>
25 26 27
#include <regex.h>
#include <errno.h>
#include <sys/utsname.h>
28 29

/* required for dom0_getdomaininfo_t */
30
#include <xen/dom0_ops.h>
31
#include <xen/version.h>
32
#include <xen/xen.h>
33
#include <xen/linux/privcmd.h>
34

35 36 37
/* required for shutdown flags */
#include <xen/sched.h>

38 39
#include "xml.h"

40 41
/* #define DEBUG */
/*
42
 * so far there is 2 versions of the structures usable for doing
43 44 45 46
 * hypervisor calls.
 */
/* the old one */
typedef struct v0_hypercall_struct {
47 48
    unsigned long op;
    unsigned long arg[5];
49 50 51 52 53 54 55 56 57 58
} v0_hypercall_t;
#define XEN_V0_IOCTL_HYPERCALL_CMD \
        _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))

/* the new one */
typedef struct v1_hypercall_struct
{
    uint64_t op;
    uint64_t arg[5];
} v1_hypercall_t;
59 60
#define XEN_V1_IOCTL_HYPERCALL_CMD                  \
    _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
61

62 63 64 65 66 67 68 69
typedef v1_hypercall_t hypercall_t;

#ifndef __HYPERVISOR_sysctl
#define __HYPERVISOR_sysctl 35
#endif
#ifndef __HYPERVISOR_domctl
#define __HYPERVISOR_domctl 36
#endif
70

71 72
static int xen_ioctl_hypercall_cmd = 0;
static int initialized = 0;
73
static int in_init = 0;
74
static int hv_version = 0;
75 76 77
static int hypervisor_version = 2;
static int sys_interface_version = -1;
static int dom_interface_version = -1;
78
static int kb_per_pages = 0;
79

80 81 82 83 84 85 86 87
/* Regular expressions used by xenHypervisorGetCapabilities, and
 * compiled once by xenHypervisorInit.  Note that these are POSIX.2
 * extended regular expressions (regex(7)).
 */
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
static regex_t flags_hvm_rec;
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
static regex_t flags_pae_rec;
88
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
89 90
static regex_t xen_cap_rec;

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/*
 * The content of the structures for a getdomaininfolist system hypercall
 */
#ifndef DOMFLAGS_DYING
#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
#define DOMFLAGS_CPUSHIFT       8
#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
#define DOMFLAGS_SHUTDOWNSHIFT 16
#endif

106 107 108 109 110 111 112 113 114 115 116
/*
 * These flags explain why a system is in the state of "shutdown".  Normally,
 * They are defined in xen/sched.h
 */
#ifndef SHUTDOWN_poweroff
#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
#endif

117 118 119 120 121 122 123 124 125
#define XEN_V0_OP_GETDOMAININFOLIST	38
#define XEN_V1_OP_GETDOMAININFOLIST	38
#define XEN_V2_OP_GETDOMAININFOLIST	6

struct xen_v0_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
126
    unsigned long shared_info_frame; /* MFN of shared_info struct */
127 128 129 130 131 132 133 134
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
struct xen_v2_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
    uint64_t shared_info_frame; /* MFN of shared_info struct */
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;

union xen_getdomaininfo {
    struct xen_v0_getdomaininfo v0;
    struct xen_v2_getdomaininfo v2;
};
typedef union xen_getdomaininfo xen_getdomaininfo;

union xen_getdomaininfolist {
    struct xen_v0_getdomaininfo *v0;
    struct xen_v2_getdomaininfo *v2;
};
typedef union xen_getdomaininfolist xen_getdomaininfolist;

#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size)                      \
    (hypervisor_version < 2 ?                                           \
     ((domlist.v0 = malloc(sizeof(xen_v0_getdomaininfo)*(size))) != NULL) : \
     ((domlist.v2 = malloc(sizeof(xen_v2_getdomaininfo)*(size))) != NULL))

#define XEN_GETDOMAININFOLIST_FREE(domlist)     \
    (hypervisor_version < 2 ?                   \
     free(domlist.v0) :                         \
     free(domlist.v2))

#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size)                  \
    (hypervisor_version < 2 ?                                       \
     memset(domlist.v0, 0, sizeof(xen_v0_getdomaininfo) * size) :   \
     memset(domlist.v2, 0, sizeof(xen_v2_getdomaininfo) * size))

#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n)    \
    (hypervisor_version < 2 ?                       \
     domlist.v0[n].domain :                         \
     domlist.v2[n].domain)

181 182 183 184
#define XEN_GETDOMAININFOLIST_DATA(domlist)     \
    (hypervisor_version < 2 ?                   \
     (void*)(domlist->v0) :                     \
     (void*)(domlist->v2))
185

186 187 188 189
#define XEN_GETDOMAININFO_SIZE                  \
    (hypervisor_version < 2 ?                   \
     sizeof(xen_v0_getdomaininfo) :             \
     sizeof(xen_v2_getdomaininfo))
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210

#define XEN_GETDOMAININFO_CLEAR(dominfo)                        \
    (hypervisor_version < 2 ?                                   \
     memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) :   \
     memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))

#define XEN_GETDOMAININFO_DOMAIN(dominfo)       \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.domain :                        \
     dominfo.v2.domain)

#define XEN_GETDOMAININFO_CPUTIME(dominfo)      \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.cpu_time :                      \
     dominfo.v2.cpu_time)

#define XEN_GETDOMAININFO_CPUCOUNT(dominfo)     \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.nr_online_vcpus :               \
     dominfo.v2.nr_online_vcpus)

211 212 213 214 215
#define XEN_GETDOMAININFO_MAXCPUID(dominfo)  \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_vcpu_id :                   \
     dominfo.v2.max_vcpu_id)

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
#define XEN_GETDOMAININFO_FLAGS(dominfo)        \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.flags :                         \
     dominfo.v2.flags)

#define XEN_GETDOMAININFO_TOT_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.tot_pages :                     \
     dominfo.v2.tot_pages)

#define XEN_GETDOMAININFO_MAX_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_pages :                     \
     dominfo.v2.max_pages)



struct xen_v0_getdomaininfolistop {
234 235 236 237 238
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v0_getdomaininfo *buffer;
    uint32_t  num_domains;
};
239 240 241 242 243 244 245 246 247 248 249 250
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;


struct xen_v2_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v2_getdomaininfo *buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;


251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

struct xen_v0_domainop {
    domid_t   domain;
};
typedef struct xen_v0_domainop xen_v0_domainop;

/*
 * The informations for a destroydomain system hypercall
 */
#define XEN_V0_OP_DESTROYDOMAIN	9
#define XEN_V1_OP_DESTROYDOMAIN	9
#define XEN_V2_OP_DESTROYDOMAIN	2

/*
 * The informations for a pausedomain system hypercall
 */
#define XEN_V0_OP_PAUSEDOMAIN	10
#define XEN_V1_OP_PAUSEDOMAIN	10
#define XEN_V2_OP_PAUSEDOMAIN	3

/*
 * The informations for an unpausedomain system hypercall
 */
#define XEN_V0_OP_UNPAUSEDOMAIN	11
#define XEN_V1_OP_UNPAUSEDOMAIN	11
#define XEN_V2_OP_UNPAUSEDOMAIN	4

/*
 * The informations for an setmaxmem system hypercall
 */
#define XEN_V0_OP_SETMAXMEM	28
#define XEN_V1_OP_SETMAXMEM	28
283
#define XEN_V2_OP_SETMAXMEM	11
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342

struct xen_v0_setmaxmem {
    domid_t	domain;
    uint64_t	maxmem;
};
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;

struct xen_v2_setmaxmem {
    uint64_t	maxmem;
};
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;

/*
 * The informations for an setmaxvcpu system hypercall
 */
#define XEN_V0_OP_SETMAXVCPU	41
#define XEN_V1_OP_SETMAXVCPU	41
#define XEN_V2_OP_SETMAXVCPU	15

struct xen_v0_setmaxvcpu {
    domid_t	domain;
    uint32_t	maxvcpu;
};
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;

struct xen_v2_setmaxvcpu {
    uint32_t	maxvcpu;
};
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;

/*
 * The informations for an setvcpumap system hypercall
 * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
 * hence the difference in structures
 */
#define XEN_V0_OP_SETVCPUMAP	20
#define XEN_V1_OP_SETVCPUMAP	20
#define XEN_V2_OP_SETVCPUMAP	9

struct xen_v0_setvcpumap {
    domid_t	domain;
    uint32_t	vcpu;
    cpumap_t    cpumap;
};
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;

struct xen_v2_cpumap {
    uint8_t    *bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2_cpumap cpumap;
};
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
/*
 * The informations for an vcpuinfo system hypercall
 */
#define XEN_V0_OP_GETVCPUINFO   43
#define XEN_V1_OP_GETVCPUINFO	43
#define XEN_V2_OP_GETVCPUINFO   14

struct xen_v0_vcpuinfo {
    domid_t	domain;		/* owner's domain */
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
    cpumap_t	cpumap;		/* deprecated in V2 */
};
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;

struct xen_v2_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;

/*
 * from V2 the pinning of a vcpu is read with a separate call
 */
#define XEN_V2_OP_GETVCPUMAP	25
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;

379 380 381 382 383 384 385 386 387
/*
 * The hypercall operation structures also have changed on
 * changeset 86d26e6ec89b
 */
/* the old structure */
struct xen_op_v0 {
    uint32_t cmd;
    uint32_t interface_version;
    union {
388 389 390 391 392 393 394
        xen_v0_getdomaininfolistop getdomaininfolist;
        xen_v0_domainop          domain;
        xen_v0_setmaxmem         setmaxmem;
        xen_v0_setmaxvcpu        setmaxvcpu;
        xen_v0_setvcpumap        setvcpumap;
        xen_v0_vcpuinfo          getvcpuinfo;
        uint8_t padding[128];
395 396 397 398 399 400 401 402 403 404
    } u;
};
typedef struct xen_op_v0 xen_op_v0;
typedef struct xen_op_v0 xen_op_v1;

/* the new structure for systems operations */
struct xen_op_v2_sys {
    uint32_t cmd;
    uint32_t interface_version;
    union {
405 406
        xen_v2_getdomaininfolistop getdomaininfolist;
        uint8_t padding[128];
407 408 409 410 411 412 413 414 415 416
    } u;
};
typedef struct xen_op_v2_sys xen_op_v2_sys;

/* the new structure for domains operation */
struct xen_op_v2_dom {
    uint32_t cmd;
    uint32_t interface_version;
    domid_t  domain;
    union {
417 418 419 420 421 422
        xen_v2_setmaxmem         setmaxmem;
        xen_v2_setmaxvcpu        setmaxvcpu;
        xen_v2_setvcpumap        setvcpumap;
        xen_v2_vcpuinfo          getvcpuinfo;
        xen_v2_getvcpumap        getvcpumap;
        uint8_t padding[128];
423 424 425
    } u;
};
typedef struct xen_op_v2_dom xen_op_v2_dom;
426 427

#include "internal.h"
428
#include "driver.h"
429
#include "xen_unified.h"
430 431 432 433
#include "xen_internal.h"

#define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"

434
#ifndef PROXY
435
static const char * xenHypervisorGetType(virConnectPtr conn);
436
static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
437
#endif
438

439
#ifndef PROXY
440 441
virDriver xenHypervisorDriver = {
    -1,
442
    "Xen",
443 444 445
    (DOM0_INTERFACE_VERSION >> 24) * 1000000 +
    ((DOM0_INTERFACE_VERSION >> 16) & 0xFF) * 1000 +
    (DOM0_INTERFACE_VERSION & 0xFFFF),
446 447
    xenHypervisorOpen, /* open */
    xenHypervisorClose, /* close */
448
    xenHypervisorGetType, /* type */
449
    xenHypervisorGetVersion, /* version */
450
    xenHypervisorGetMaxVcpus, /* getMaxVcpus */
451
    NULL, /* nodeGetInfo */
452
    xenHypervisorGetCapabilities, /* getCapabilities */
453 454
    xenHypervisorListDomains, /* listDomains */
    xenHypervisorNumOfDomains, /* numOfDomains */
455 456 457 458 459 460 461
    NULL, /* domainCreateLinux */
    NULL, /* domainLookupByID */
    NULL, /* domainLookupByUUID */
    NULL, /* domainLookupByName */
    xenHypervisorPauseDomain, /* domainSuspend */
    xenHypervisorResumeDomain, /* domainResume */
    NULL, /* domainShutdown */
462
    NULL, /* domainReboot */
463 464
    xenHypervisorDestroyDomain, /* domainDestroy */
    NULL, /* domainGetOSType */
465
    xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
466
    xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
467
    NULL, /* domainSetMemory */
468 469
    xenHypervisorGetDomainInfo, /* domainGetInfo */
    NULL, /* domainSave */
470
    NULL, /* domainRestore */
D
Daniel Veillard 已提交
471
    NULL, /* domainCoreDump */
472 473
    xenHypervisorSetVcpus, /* domainSetVcpus */
    xenHypervisorPinVcpu, /* domainPinVcpu */
474
    xenHypervisorGetVcpus, /* domainGetVcpus */
475
    xenHypervisorGetVcpuMax, /* domainGetMaxVcpus */
476
    NULL, /* domainDumpXML */
477 478 479 480 481
    NULL, /* listDefinedDomains */
    NULL, /* numOfDefinedDomains */
    NULL, /* domainCreate */
    NULL, /* domainDefineXML */
    NULL, /* domainUndefine */
482 483
    NULL, /* domainAttachDevice */
    NULL, /* domainDetachDevice */
484 485
    NULL, /* domainGetAutostart */
    NULL, /* domainSetAutostart */
486
};
487
#endif /* !PROXY */
488

489 490 491 492
/**
 * virXenError:
 * @error: the error number
 * @info: extra information string
493
 * @value: extra information number
494 495 496 497
 *
 * Handle an error at the xend daemon interface
 */
static void
498 499
virXenError(virErrorNumber error, const char *info, int value)
{
500
    const char *errmsg;
501

502
    if ((error == VIR_ERR_OK) || (in_init != 0))
503 504 505
        return;

    errmsg = __virErrorMsg(error, info);
506
    __virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
                    errmsg, info, NULL, value, 0, errmsg, info);
}

/**
 * virXenPerror:
 * @conn: the connection (if available)
 * @msg: name of system call or file (as in perror(3))
 *
 * Raise error from a failed system call, using errno as the source.
 */
static void
virXenPerror (virConnectPtr conn, const char *msg)
{
    char *msg_s;

    msg_s = malloc (strlen (msg) + 10);
    if (msg_s) {
        strcpy (msg_s, msg);
        strcat (msg_s, ": %s");
    }

    __virRaiseError (conn, NULL, NULL,
                     VIR_FROM_XEN, VIR_ERR_SYSTEM_ERROR, VIR_ERR_ERROR,
                     msg, NULL, NULL, errno, 0,
                     msg_s ? msg_s : msg, strerror (errno));
532 533
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/**
 * xenHypervisorDoV0Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor operation though the old interface,
 * this leads to an hypervisor call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
{
    int ret;
    v0_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = hv_version << 8;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}
/**
 * xenHypervisorDoV1Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor v1 operation, this leads to an hypervisor call through
 * ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Sys:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor operation structure
 *
 * Do an hypervisor v2 stsyem operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = sys_interface_version;
    hc.op = __HYPERVISOR_sysctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Dom:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor domain operation structure
 *
 * Do an hypervisor v2 domain operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = dom_interface_version;
    hc.op = __HYPERVISOR_domctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * virXen_getdomaininfolist:
 * @handle: the hypervisor handle
 * @first_domain: first domain in the range
 * @maxids: maximum number of domains to list
 * @dominfos: output structures
 *
 * Do a low level hypercall to list existing domains informations
 *
 * Returns the number of domains or -1 in case of failure
 */
static int
virXen_getdomaininfolist(int handle, int first_domain, int maxids,
714
                         xen_getdomaininfolist *dominfos)
715 716 717
{
    int ret = -1;

718 719
    if (mlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
              XEN_GETDOMAININFO_SIZE * maxids) < 0) {
720
        virXenError(VIR_ERR_XEN_CALL, " locking",
721
                    XEN_GETDOMAININFO_SIZE * maxids);
722 723 724 725 726 727
        return (-1);
    }
    if (hypervisor_version > 1) {
        xen_op_v2_sys op;

        memset(&op, 0, sizeof(op));
728 729 730 731 732 733 734 735
        op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v2;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV2Sys(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
736 737 738 739
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
740 741 742 743 744 745 746 747
        op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV1Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
748 749 750 751
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
752 753 754 755 756 757 758 759
        op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV0Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
760
    }
761 762
    if (munlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
                XEN_GETDOMAININFO_SIZE * maxids) < 0) {
763
        virXenError(VIR_ERR_XEN_CALL, " release",
764
                    XEN_GETDOMAININFO_SIZE * maxids);
765 766 767 768 769
        ret = -1;
    }
    return(ret);
}

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
static int
virXen_getdomaininfo(int handle, int first_domain,
                     xen_getdomaininfo *dominfo) {
    xen_getdomaininfolist dominfos;

    if (hypervisor_version < 2) {
        dominfos.v0 = &(dominfo->v0);
    } else {
        dominfos.v2 = &(dominfo->v2);
    }

    return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
}


785
#ifndef PROXY
786 787 788 789 790 791 792 793 794 795
/**
 * virXen_pausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to pause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
796
virXen_pausedomain(int handle, int id)
797 798 799 800 801 802 803
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
804 805 806
        op.cmd = XEN_V2_OP_PAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
807 808 809 810
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
811 812 813
        op.cmd = XEN_V1_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
814 815 816 817
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
818 819 820
        op.cmd = XEN_V0_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
821 822 823 824 825 826 827 828 829 830 831 832 833 834
    }
    return(ret);
}

/**
 * virXen_unpausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to unpause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
835
virXen_unpausedomain(int handle, int id)
836 837 838 839 840 841 842
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
843 844 845
        op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
846 847 848 849
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
850 851 852
        op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
853 854 855 856
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
857 858 859
        op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
860 861 862 863 864 865 866 867 868 869 870 871 872 873
    }
    return(ret);
}

/**
 * virXen_destroydomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to destroy the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
874
virXen_destroydomain(int handle, int id)
875 876 877 878 879 880 881
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
882 883 884
        op.cmd = XEN_V2_OP_DESTROYDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
885 886 887 888
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
889 890 891
        op.cmd = XEN_V1_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
892 893 894 895
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
896 897 898
        op.cmd = XEN_V0_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
    }
    return(ret);
}

/**
 * virXen_setmaxmem:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @memory: the amount of memory in kilobytes
 *
 * Do a low level hypercall to change the max memory amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
914
virXen_setmaxmem(int handle, int id, unsigned long memory)
915 916 917 918 919 920 921
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
922 923 924 925
        op.cmd = XEN_V2_OP_SETMAXMEM;
        op.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV2Dom(handle, &op);
926 927 928 929
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
930 931 932 933
        op.cmd = XEN_V1_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV1Op(handle, &op);
934
    } else if (hypervisor_version == 0) {
935
        xen_op_v0 op;
936 937

        memset(&op, 0, sizeof(op));
938 939 940 941
        op.cmd = XEN_V0_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV0Op(handle, &op);
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
    }
    return(ret);
}

/**
 * virXen_setmaxvcpus:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpus: the numbers of vcpus
 *
 * Do a low level hypercall to change the max vcpus amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
957
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
958 959 960 961 962 963 964
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
965 966 967 968
        op.cmd = XEN_V2_OP_SETMAXVCPU;
        op.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV2Dom(handle, &op);
969 970 971 972
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
973 974 975 976
        op.cmd = XEN_V1_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV1Op(handle, &op);
977
    } else if (hypervisor_version == 0) {
978
        xen_op_v0 op;
979 980

        memset(&op, 0, sizeof(op));
981 982 983 984
        op.cmd = XEN_V0_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV0Op(handle, &op);
985 986 987 988 989 990 991 992 993 994
    }
    return(ret);
}

/**
 * virXen_setvcpumap:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
995
 * @maplen: the size of the bitmap in bytes
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_setvcpumap(int handle, int id, unsigned int vcpu,
                  unsigned char * cpumap, int maplen)
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

1010 1011 1012 1013
        if (mlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
            return (-1);
        }
1014
        memset(&op, 0, sizeof(op));
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
        op.cmd = XEN_V2_OP_SETVCPUMAP;
        op.domain = (domid_t) id;
        op.u.setvcpumap.vcpu = vcpu;
        op.u.setvcpumap.cpumap.bitmap = cpumap;
        op.u.setvcpumap.cpumap.nr_cpus = maplen * 8;
        ret = xenHypervisorDoV2Dom(handle, &op);
        if (munlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " release", maplen);
            ret = -1;
        }
1025
    } else {
1026 1027 1028
        cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
        uint64_t *pm = &xen_cpumap;
        int j;
1029

1030 1031
        if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
            return (-1);
1032

1033 1034 1035
        memset(pm, 0, sizeof(cpumap_t));
        for (j = 0; j < maplen; j++)
            *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
1036 1037

        if (hypervisor_version == 1) {
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV1Op(handle, &op);
        } else if (hypervisor_version == 0) {
            xen_op_v0 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV0Op(handle, &op);
        }
1056 1057 1058
    }
    return(ret);
}
1059 1060
#endif /* !PROXY*/

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
/**
 * virXen_getvcpusinfo:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
 * @maplen: the size of the bitmap in bytes
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
1075
                    unsigned char *cpumap, int maplen)
1076 1077 1078 1079 1080 1081 1082
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1083 1084
        op.cmd = XEN_V2_OP_GETVCPUINFO;
        op.domain = (domid_t) id;
1085
        op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
1086 1087 1088
        ret = xenHypervisorDoV2Dom(handle, &op);
        if (ret < 0)
            return(-1);
1089
        ipt->number = vcpu;
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
        if (op.u.getvcpuinfo.online) {
            if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
            if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
        }
        else ipt->state = VIR_VCPU_OFFLINE;
        ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
        ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
        if ((cpumap != NULL) && (maplen > 0)) {
            if (mlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
                return (-1);
            }
            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V2_OP_GETVCPUMAP;
            op.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap.bitmap = cpumap;
            op.u.setvcpumap.cpumap.nr_cpus = maplen * 8;
            ret = xenHypervisorDoV2Dom(handle, &op);
            if (munlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " release", maplen);
                ret = -1;
            }
        }
1114
    } else {
1115 1116 1117 1118 1119
        int mapl = maplen;
        int cpu;

        if (maplen > (int)sizeof(cpumap_t))
            mapl = (int)sizeof(cpumap_t);
1120 1121

        if (hypervisor_version == 1) {
1122 1123 1124 1125 1126 1127 1128 1129 1130
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV1Op(handle, &op);
            if (ret < 0)
                return(-1);
1131
            ipt->number = vcpu;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        } else if (hypervisor_version == 0) {
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV0Op(handle, &op);
            if (ret < 0)
                return(-1);
1155
            ipt->number = vcpu;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        }
1170 1171 1172
    }
    return(ret);
}
1173

1174 1175 1176 1177 1178 1179
/**
 * xenHypervisorInit:
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
 */
1180
int
D
Daniel P. Berrange 已提交
1181
xenHypervisorInit(void)
1182
{
1183
    int fd, ret, cmd, errcode;
1184
    hypercall_t hc;
1185
    v0_hypercall_t v0_hc;
1186
    xen_getdomaininfo info;
1187
    virVcpuInfoPtr ipt;
1188 1189

    if (initialized) {
1190
        if (hypervisor_version == -1)
1191
            return (-1);
1192
        return(0);
1193 1194
    }
    initialized = 1;
1195
    in_init = 1;
1196

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
    /* Compile regular expressions used by xenHypervisorGetCapabilities.
     * Note that errors here are really internal errors since these
     * regexps should never fail to compile.
     */
    errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_hvm_rec, error, sizeof error);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_pae_rec, error, sizeof error);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &xen_cap_rec, error, sizeof error);
        regfree (&xen_cap_rec);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }

    /* Xen hypervisor version detection begins. */
1233 1234
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
1235
        hypervisor_version = -1;
1236
        return(-1);
1237 1238 1239
    }
    fd = ret;

1240 1241 1242 1243
    /*
     * The size of the hypervisor call block changed July 2006
     * this detect if we are using the new or old hypercall_t structure
     */
1244 1245 1246 1247 1248 1249 1250 1251
    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1252
#ifdef DEBUG
1253
        fprintf(stderr, "Using new hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1254
#endif
1255 1256 1257
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
        goto detect_v2;
1258
    }
1259

1260 1261 1262 1263 1264 1265 1266 1267
    /*
     * check if the old hypercall are actually working
     */
    v0_hc.op = __HYPERVISOR_xen_version;
    v0_hc.arg[0] = (unsigned long) XENVER_version;
    v0_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
1268
    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1269
#ifdef DEBUG
1270
        fprintf(stderr, "Using old hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1271
#endif
1272 1273
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
1274
        hypervisor_version = 0;
1275
        goto done;
1276 1277
    }

1278 1279 1280 1281 1282
    /*
     * we faild to make any hypercall
     */

    hypervisor_version = -1;
1283 1284
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
1285 1286 1287
    in_init = 0;
    return(-1);

1288
 detect_v2:
1289 1290 1291 1292 1293 1294
    /*
     * The hypercalls were refactored into 3 different section in August 2006
     * Try to detect if we are running a version post 3.0.2 with the new ones
     * or the old ones
     */
    hypervisor_version = 2;
1295 1296 1297 1298

    ipt = malloc(sizeof(virVcpuInfo));
    if (ipt == NULL){
#ifdef DEBUG
1299
        fprintf(stderr, "Memory allocation failed at xenHypervisorInit()\n");
1300 1301 1302 1303
#endif
        return(-1);
    }
    /* Currently consider RHEL5.0 Fedora7 and xen-unstable */
1304
    sys_interface_version = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
1305
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
1306 1307 1308
        /* RHEL 5.0 */
        dom_interface_version = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
D
Daniel Veillard 已提交
1309
#ifdef DEBUG
1310
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver3\n");
D
Daniel Veillard 已提交
1311
#endif
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
            goto done;
        }
        /* Fedora 7 */
        dom_interface_version = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver4\n");
#endif
            goto done;
        }
    }

    sys_interface_version = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        /* xen-unstable */
        dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver3 dom ver5\n");
#endif
            goto done;
        }
1334
    }
1335

1336 1337
    hypervisor_version = 1;
    sys_interface_version = -1;
1338
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
D
Daniel Veillard 已提交
1339
#ifdef DEBUG
1340
        fprintf(stderr, "Using hypervisor call v1\n");
D
Daniel Veillard 已提交
1341
#endif
1342
        goto done;
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
    }

    /*
     * we faild to make the getdomaininfolist hypercall
     */

    hypervisor_version = -1;
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
    in_init = 0;
1353 1354
    return(-1);

1355
 done:
1356
    close(fd);
1357
    in_init = 0;
1358 1359 1360
    return(0);
}

1361 1362
/**
 * xenHypervisorOpen:
1363 1364 1365
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
1366 1367 1368
 *
 * Connects to the Xen hypervisor.
 *
1369
 * Returns 0 or -1 in case of error.
1370
 */
1371
int
1372 1373
xenHypervisorOpen(virConnectPtr conn ATTRIBUTE_UNUSED,
                  const char *name ATTRIBUTE_UNUSED, int flags)
1374
{
1375
    int ret;
1376
    xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
1377

1378
    if (initialized == 0)
1379 1380
        if (xenHypervisorInit() == -1)
            return -1;
1381

1382
    priv->handle = -1;
1383

1384
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
1385
    if (ret < 0) {
1386
        if (!(flags & VIR_DRV_OPEN_QUIET))
1387 1388
            virXenError(VIR_ERR_NO_XEN, XEN_HYPERVISOR_SOCKET, 0);
        return (-1);
1389
    }
1390 1391

    priv->handle = ret;
1392

1393
    return(0);
1394 1395 1396 1397
}

/**
 * xenHypervisorClose:
1398
 * @conn: pointer to the connection block
1399 1400 1401 1402 1403
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
1404
int
1405
xenHypervisorClose(virConnectPtr conn)
1406
{
1407
    int ret;
1408
    xenUnifiedPrivatePtr priv;
1409

1410
    if (conn == NULL)
1411
        return (-1);
1412

1413 1414 1415 1416 1417 1418
    priv = (xenUnifiedPrivatePtr) conn->privateData;

    if (priv->handle < 0)
        return -1;

    ret = close(priv->handle);
1419
    if (ret < 0)
1420
        return (-1);
1421

1422
    return (0);
1423 1424 1425
}


1426
#ifndef PROXY
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
/**
 * xenHypervisorGetType:
 * @conn: pointer to the Xen Hypervisor block
 *
 * Get the version level of the Hypervisor running.
 *
 * Returns -1 in case of error, 0 otherwise. if the version can't be
 *    extracted by lack of capacities returns 0 and @hvVer is 0, otherwise
 *    @hvVer value is major * 1,000,000 + minor * 1,000 + release
 */
static const char *
xenHypervisorGetType(virConnectPtr conn)
{
    if (!VIR_IS_CONNECT(conn)) {
        virXenError(VIR_ERR_INVALID_CONN, __FUNCTION__, 0);
        return (NULL);
    }
    return("Xen");
}
1446
#endif
1447

1448 1449
/**
 * xenHypervisorGetVersion:
1450 1451
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
1452 1453 1454
 *
 * Call the hypervisor to extracts his own internal API version
 *
1455
 * Returns 0 in case of success, -1 in case of error
1456
 */
1457 1458
int
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
1459
{
1460 1461 1462 1463 1464 1465
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || hvVer == NULL)
1466
        return (-1);
1467
    *hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
1468
    return(0);
1469 1470
}

1471 1472 1473
/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
1474 1475
 * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
 * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
1476 1477 1478 1479
 *
 * Return the capabilities of this hypervisor.
 */
char *
1480 1481 1482
xenHypervisorMakeCapabilitiesXML(virConnectPtr conn ATTRIBUTE_UNUSED,
                                 const char *hostmachine,
                                 FILE *cpuinfo, FILE *capabilities)
1483 1484
{
    char line[1024], *str, *token;
1485
    regmatch_t subs[4];
1486
    char *saveptr = NULL;
1487 1488 1489 1490
    int i, r;

    char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
    int host_pae = 0;
1491
    struct guest_arch {
1492 1493 1494 1495
        const char *model;
        int bits;
        int hvm;
        int pae;
1496
        int nonpae;
1497
        int ia64_be;
1498
    } guest_archs[32];
1499 1500 1501 1502 1503
    int nr_guest_archs = 0;

    virBufferPtr xml;
    char *xml_str;

1504 1505
    memset(guest_archs, 0, sizeof(guest_archs));

1506 1507 1508 1509
    /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
     * It's not clear if this will work on IA64, let alone other
     * architectures and non-Linux. (XXX)
     */
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
    if (cpuinfo) {
        while (fgets (line, sizeof line, cpuinfo)) {
            if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
                && subs[0].rm_so != -1) {
                strncpy (hvm_type,
                         &line[subs[1].rm_so], subs[1].rm_eo-subs[1].rm_so+1);
                hvm_type[subs[1].rm_eo-subs[1].rm_so] = '\0';
            } else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
                host_pae = 1;
        }
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
    }

    /* Most of the useful info is in /sys/hypervisor/properties/capabilities
     * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
     *
     * It is a space-separated list of supported guest architectures.
     *
     * For x86:
     *    TYP-VER-ARCH[p]
     *    ^   ^   ^    ^
     *    |   |   |    +-- PAE supported
     *    |   |   +------- x86_32 or x86_64
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     *
     * For PPC this file appears to be always empty (?)
     *
     * For IA64:
     *    TYP-VER-ARCH[be]
     *    ^   ^   ^    ^
     *    |   |   |    +-- Big-endian supported
     *    |   |   +------- always "ia64"
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     */

    /* Expecting one line in this file - ignore any more. */
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
    if (fgets (line, sizeof line, capabilities)) {
        /* Split the line into tokens.  strtok_r is OK here because we "own"
         * this buffer.  Parse out the features from each token.
         */
        for (str = line, nr_guest_archs = 0;
             nr_guest_archs < sizeof guest_archs / sizeof guest_archs[0]
                 && (token = strtok_r (str, " ", &saveptr)) != NULL;
             str = NULL) {

            if (regexec (&xen_cap_rec, token, sizeof subs / sizeof subs[0],
                         subs, 0) == 0) {
                int hvm = strncmp (&token[subs[1].rm_so], "hvm", 3) == 0;
                const char *model;
                int bits, pae = 0, nonpae = 0, ia64_be = 0;
                if (strncmp (&token[subs[2].rm_so], "x86_32", 6) == 0) {
                    model = "i686";
                    bits = 32;
                    if (strncmp (&token[subs[3].rm_so], "p", 1) == 0)
                        pae = 1;
                    else
                        nonpae = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "x86_64", 6) == 0) {
                    model = "x86_64";
                    bits = 64;
                }
                else if (strncmp (&token[subs[2].rm_so], "ia64", 4) == 0) {
                    model = "ia64";
                    bits = 64;
                    if (strncmp (&token[subs[3].rm_so], "be", 2) == 0)
                        ia64_be = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "powerpc64", 4) == 0) {
                    model = "ppc64";
                    bits = 64;
                } else {
                    /* XXX surely no other Xen archs exist  */
                    continue;
                }
1586

1587 1588 1589 1590 1591 1592 1593
                /* Search for existing matching (model,hvm) tuple */
                for (i = 0 ; i < nr_guest_archs ; i++) {
                    if (!strcmp(guest_archs[i].model, model) &&
                        guest_archs[i].hvm == hvm) {
                        break;
                    }
                }
1594

1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
                /* Too many arch flavours - highly unlikely ! */
                if (i >= sizeof(guest_archs)/sizeof(guest_archs[0]))
                    continue;
                /* Didn't find a match, so create a new one */
                if (i == nr_guest_archs)
                    nr_guest_archs++;

                guest_archs[i].model = model;
                guest_archs[i].bits = bits;
                guest_archs[i].hvm = hvm;

                /* Careful not to overwrite a previous positive
                   setting with a negative one here - some archs
                   can do both pae & non-pae, but Xen reports
                   separately capabilities so we're merging archs */
                if (pae)
                    guest_archs[i].pae = pae;
                if (nonpae)
                    guest_archs[i].nonpae = nonpae;
                if (ia64_be)
                    guest_archs[i].ia64_be = ia64_be;
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
            }
        }
    }

    /* Construct the final XML. */
    xml = virBufferNew (1024);
    if (!xml) return NULL;
    r = virBufferVSprintf (xml,
                           "\
<capabilities>\n\
  <host>\n\
    <cpu>\n\
      <arch>%s</arch>\n\
      <features>\n",
1630 1631
                           hostmachine);
    if (r == -1) goto vir_buffer_failed;
1632 1633 1634 1635 1636 1637

    if (strcmp (hvm_type, "") != 0) {
        r = virBufferVSprintf (xml,
                               "\
        <%s/>\n",
                               hvm_type);
1638
        if (r == -1) goto vir_buffer_failed;
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
    }
    if (host_pae) {
        r = virBufferAdd (xml, "\
        <pae/>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
      </features>\n\
    </cpu>\n\
  </host>\n", -1);
    if (r == -1) goto vir_buffer_failed;

    for (i = 0; i < nr_guest_archs; ++i) {
        r = virBufferVSprintf (xml,
                               "\
\n\
  <guest>\n\
    <os_type>%s</os_type>\n\
    <arch name=\"%s\">\n\
      <wordsize>%d</wordsize>\n\
1660
      <domain type=\"xen\"></domain>\n",
1661 1662
                               guest_archs[i].hvm ? "hvm" : "xen",
                               guest_archs[i].model,
1663
                               guest_archs[i].bits);
1664 1665
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].hvm) {
1666
            r = virBufferVSprintf (xml,
1667
                              "\
1668
      <emulator>/usr/lib%s/xen/bin/qemu-dm</emulator>\n\
1669 1670
      <machine>pc</machine>\n\
      <machine>isapc</machine>\n\
1671 1672
      <loader>/usr/lib/xen/boot/hvmloader</loader>\n",
                                   guest_archs[i].bits == 64 ? "64" : "");
1673 1674 1675 1676 1677
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </arch>\n\
1678
    <features>\n", -1);
1679 1680 1681 1682
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].pae) {
            r = virBufferAdd (xml,
                              "\
1683 1684 1685 1686 1687 1688
      <pae/>\n", -1);
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].nonpae) {
            r = virBufferAdd (xml,
                              "\
1689
      <nonpae/>\n", -1);
1690 1691 1692 1693 1694
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].ia64_be) {
            r = virBufferAdd (xml,
                              "\
1695
      <ia64_be/>\n", -1);
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </features>\n\
  </guest>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
</capabilities>\n", -1);
    if (r == -1) goto vir_buffer_failed;
    xml_str = strdup (xml->content);
    if (!xml_str) {
        virXenError(VIR_ERR_NO_MEMORY, "strdup", 0);
        goto vir_buffer_failed;
    }
    virBufferFree (xml);

    return xml_str;
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762

 vir_buffer_failed:
    virBufferFree (xml);
    return NULL;
}

/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
 *
 * Return the capabilities of this hypervisor.
 */
char *
xenHypervisorGetCapabilities (virConnectPtr conn)
{
    char *xml;
    FILE *cpuinfo, *capabilities;
    struct utsname utsname;

    /* Really, this never fails - look at the man-page. */
    uname (&utsname);

    cpuinfo = fopen ("/proc/cpuinfo", "r");
    if (cpuinfo == NULL) {
        if (errno != ENOENT) {
            virXenPerror (conn, "/proc/cpuinfo");
            return NULL;
        }
    }

    capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
    if (capabilities == NULL) {
        if (errno != ENOENT) {
            fclose(cpuinfo);
            virXenPerror (conn, "/sys/hypervisor/properties/capabilities");
            return NULL;
        }
    }

    xml = xenHypervisorMakeCapabilitiesXML(conn, utsname.machine, cpuinfo, capabilities);

    if (cpuinfo)
        fclose(cpuinfo);
    if (capabilities)
        fclose(capabilities);

    return xml;
1763 1764
}

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
1776
    xen_getdomaininfolist dominfos;
1777 1778 1779
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;
1780
    xenUnifiedPrivatePtr priv;
1781

1782 1783 1784 1785
    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
1786 1787
        return (-1);

1788 1789
 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
1790
        virXenError(VIR_ERR_NO_MEMORY, _("allocating %d domain info"),
1791 1792
                    maxids);
        return(-1);
1793 1794
    }

1795 1796
    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

1797
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
1798

1799
    XEN_GETDOMAININFOLIST_FREE(dominfos);
1800 1801 1802 1803

    if (ret < 0)
        return (-1);

1804
    nbids = ret;
1805 1806 1807
    if (nbids == maxids) {
        last_maxids *= 2;
        maxids *= 2;
1808
        goto retry;
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
    }
    if ((nbids < 0) || (nbids > maxids))
        return(-1);
    return(nbids);
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
1828
    xen_getdomaininfolist dominfos;
1829
    int ret, nbids, i;
1830 1831 1832 1833
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
1834

1835 1836
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 ||
1837 1838 1839
        (ids == NULL) || (maxids < 1))
        return (-1);

1840
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
1841
        virXenError(VIR_ERR_NO_MEMORY, "allocating %d domain info",
1842 1843
                    maxids);
        return(-1);
1844
    }
1845 1846

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
1847 1848
    memset(ids, 0, maxids * sizeof(int));

1849
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
1850 1851

    if (ret < 0) {
1852
        XEN_GETDOMAININFOLIST_FREE(dominfos);
1853 1854 1855
        return (-1);
    }

1856
    nbids = ret;
1857
    if ((nbids < 0) || (nbids > maxids)) {
1858
        XEN_GETDOMAININFOLIST_FREE(dominfos);
1859 1860 1861 1862
        return(-1);
    }

    for (i = 0;i < nbids;i++) {
1863
        ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
1864 1865
    }

1866
    XEN_GETDOMAININFOLIST_FREE(dominfos);
1867 1868 1869
    return (nbids);
}

1870
/**
1871
 * xenHypervisorGetMaxVcpus:
1872 1873 1874 1875
 *
 * Returns the maximum of CPU defined by Xen.
 */
int
1876 1877
xenHypervisorGetMaxVcpus(virConnectPtr conn,
                         const char *type ATTRIBUTE_UNUSED)
1878
{
1879 1880 1881 1882 1883 1884
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
1885 1886 1887 1888 1889
        return (-1);

    return MAX_VIRT_CPUS;
}

1890
/**
1891 1892 1893
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
1894
 *
1895
 * Retrieve the maximum amount of physical memory allocated to a
1896
 * domain.
1897 1898 1899
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
1900 1901
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
1902
{
1903
    xenUnifiedPrivatePtr priv;
1904
    xen_getdomaininfo dominfo;
1905 1906
    int ret;

1907 1908 1909 1910 1911 1912
    if (conn == NULL)
        return 0;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
        return 0;
1913

1914 1915 1916 1917 1918 1919
    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }

1920
    XEN_GETDOMAININFO_CLEAR(dominfo);
1921

1922
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
1923

1924
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
1925 1926
        return (0);

1927
    return((unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages);
1928 1929
}

1930
#ifndef PROXY
1931 1932 1933
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
1934
 *
1935 1936 1937 1938 1939 1940 1941 1942 1943
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
static unsigned long
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
1944 1945 1946 1947 1948 1949 1950
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return 0;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
1951 1952
        return (0);

1953
    return(xenHypervisorGetDomMaxMemory(domain->conn, domain->id));
1954
}
1955
#endif
1956

1957
/**
1958 1959 1960
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
1961
 * @info: the place where information should be stored
1962
 *
1963
 * Do an hypervisor call to get the related set of domain information.
1964 1965 1966 1967
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
1968
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
1969
{
1970
    xenUnifiedPrivatePtr priv;
1971
    xen_getdomaininfo dominfo;
1972
    int ret;
1973
    uint32_t domain_flags, domain_state, domain_shutdown_cause;
1974 1975 1976 1977 1978 1979

    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }
1980

1981 1982 1983 1984 1985
    if (conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || info == NULL)
1986
        return (-1);
1987

1988
    memset(info, 0, sizeof(virDomainInfo));
1989
    XEN_GETDOMAININFO_CLEAR(dominfo);
1990

1991
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
1992

1993
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
1994
        return (-1);
1995

1996 1997 1998
    domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
    domain_state = domain_flags & 0xFF;
    switch (domain_state) {
1999 2000 2001 2002
	case DOMFLAGS_DYING:
	    info->state = VIR_DOMAIN_SHUTDOWN;
	    break;
	case DOMFLAGS_SHUTDOWN:
2003 2004 2005 2006 2007 2008 2009 2010 2011
            /* The domain is shutdown.  Determine the cause. */
            domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
            switch (domain_shutdown_cause) {
                case SHUTDOWN_crash:
                    info->state = VIR_DOMAIN_CRASHED;
                    break;
                default:
                    info->state = VIR_DOMAIN_SHUTOFF;
            }
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
	    break;
	case DOMFLAGS_PAUSED:
	    info->state = VIR_DOMAIN_PAUSED;
	    break;
	case DOMFLAGS_BLOCKED:
	    info->state = VIR_DOMAIN_BLOCKED;
	    break;
	case DOMFLAGS_RUNNING:
	    info->state = VIR_DOMAIN_RUNNING;
	    break;
	default:
	    info->state = VIR_DOMAIN_NONE;
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
2031
    info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
2032
    info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
2033 2034 2035
    info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
    if(info->maxMem != UINT_MAX)
        info->maxMem *= kb_per_pages;
2036
    info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
2037
    return (0);
2038 2039
}

2040 2041 2042
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
2043
 * @info: the place where information should be stored
2044
 *
2045
 * Do an hypervisor call to get the related set of domain information.
2046 2047 2048 2049 2050 2051
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
2052 2053 2054 2055 2056 2057 2058
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || info == NULL ||
2059
        (domain->id < 0))
2060
        return (-1);
2061

2062
    return(xenHypervisorGetDomInfo(domain->conn, domain->id, info));
2063 2064 2065

}

2066
#ifndef PROXY
2067 2068
/**
 * xenHypervisorPauseDomain:
2069
 * @domain: pointer to the domain block
2070 2071 2072 2073 2074 2075
 *
 * Do an hypervisor call to pause the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2076
xenHypervisorPauseDomain(virDomainPtr domain)
2077
{
2078
    int ret;
2079
    xenUnifiedPrivatePtr priv;
2080

2081 2082 2083 2084 2085
    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2086 2087
        return (-1);

2088
    ret = virXen_pausedomain(priv->handle, domain->id);
2089
    if (ret < 0)
2090 2091
        return (-1);
    return (0);
2092 2093 2094 2095
}

/**
 * xenHypervisorResumeDomain:
2096
 * @domain: pointer to the domain block
2097 2098 2099 2100 2101 2102
 *
 * Do an hypervisor call to resume the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2103
xenHypervisorResumeDomain(virDomainPtr domain)
2104
{
2105
    int ret;
2106 2107 2108 2109
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;
2110

2111 2112
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2113 2114
        return (-1);

2115
    ret = virXen_unpausedomain(priv->handle, domain->id);
2116
    if (ret < 0)
2117 2118
        return (-1);
    return (0);
2119 2120 2121 2122
}

/**
 * xenHypervisorDestroyDomain:
2123
 * @domain: pointer to the domain block
2124 2125 2126 2127 2128 2129
 *
 * Do an hypervisor call to destroy the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2130
xenHypervisorDestroyDomain(virDomainPtr domain)
2131
{
2132
    int ret;
2133 2134 2135 2136
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2137

2138 2139
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2140 2141
        return (-1);

2142
    ret = virXen_destroydomain(priv->handle, domain->id);
2143
    if (ret < 0)
2144 2145
        return (-1);
    return (0);
2146 2147
}

2148 2149
/**
 * xenHypervisorSetMaxMemory:
2150
 * @domain: pointer to the domain block
2151 2152 2153 2154 2155 2156 2157
 * @memory: the max memory size in kilobytes.
 *
 * Do an hypervisor call to change the maximum amount of memory used
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2158
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
2159
{
2160
    int ret;
2161
    xenUnifiedPrivatePtr priv;
2162

2163 2164 2165 2166 2167
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2168 2169
        return (-1);

2170
    ret = virXen_setmaxmem(priv->handle, domain->id, memory);
2171
    if (ret < 0)
2172 2173
        return (-1);
    return (0);
2174
}
2175
#endif /* PROXY */
2176

2177
#ifndef PROXY
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
2191
    int ret;
2192 2193 2194 2195
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2196

2197 2198
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
2199
        return (-1);
2200

2201
    ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
2202
    if (ret < 0)
2203
        return (-1);
2204
    return (0);
2205 2206 2207 2208 2209 2210 2211 2212
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
2213
 *
2214 2215 2216 2217 2218 2219 2220 2221 2222
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
2223
    int ret;
2224
    xenUnifiedPrivatePtr priv;
2225

2226 2227 2228 2229 2230
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2231
        (cpumap == NULL) || (maplen < 1))
2232 2233
        return (-1);

2234
    ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
2235 2236
                            cpumap, maplen);
    if (ret < 0)
2237
        return (-1);
2238
    return (0);
2239
}
2240
#endif
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
 * @cpumaps: pointer to an bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
 *	If cpumaps is NULL, then no cupmap information is returned by the API.
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
2255
 *
2256 2257 2258 2259 2260
 * Extract information about virtual CPUs of domain, store it in info array
 * and also in cpumaps if this pointer is'nt NULL.
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
2261
#ifndef PROXY
2262 2263
int
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
2264
                      unsigned char *cpumaps, int maplen)
2265
{
2266
    xen_getdomaininfo dominfo;
2267
    int ret;
2268
    xenUnifiedPrivatePtr priv;
2269
    virVcpuInfoPtr ipt;
2270
    int nbinfo, i;
2271

2272 2273 2274 2275 2276
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2277 2278
        (info == NULL) || (maxinfo < 1) ||
        (sizeof(cpumap_t) & 7))
2279
        return (-1);
2280
    if ((cpumaps != NULL) && (maplen < 1))
2281
        return -1;
2282 2283

    /* first get the number of virtual CPUs in this domain */
2284
    XEN_GETDOMAININFO_CLEAR(dominfo);
2285
    ret = virXen_getdomaininfo(priv->handle, domain->id,
2286
                               &dominfo);
2287

2288
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
2289
        return (-1);
2290
    nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
2291 2292 2293
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
2294
        memset(cpumaps, 0, maxinfo * maplen);
2295

2296 2297
    for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
        if ((cpumaps != NULL) && (i < maxinfo)) {
2298
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2299 2300 2301 2302 2303 2304
                                      ipt,
                                      (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
                                      maplen);
            if (ret < 0)
                return(-1);
        } else {
2305
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2306 2307 2308 2309
                                      ipt, NULL, 0);
            if (ret < 0)
                return(-1);
        }
2310 2311 2312
    }
    return nbinfo;
}
2313 2314
#endif

2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
/**
 * xenHypervisorGetVcpuMax:
 *
 *  Returns the maximum number of virtual CPUs supported for
 *  the guest VM. If the guest is inactive, this is the maximum
 *  of CPU defined by Xen. If the guest is running this reflect
 *  the maximum number of virtual CPUs the guest was booted with.
 */
int
xenHypervisorGetVcpuMax(virDomainPtr domain)
{
    xen_getdomaininfo dominfo;
    int ret;
    int maxcpu;
2329 2330 2331 2332
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2333

2334 2335
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0)
2336 2337 2338 2339 2340 2341 2342
        return (-1);

    /* inactive domain */
    if (domain->id < 0) {
        maxcpu = MAX_VIRT_CPUS;
    } else {
        XEN_GETDOMAININFO_CLEAR(dominfo);
2343
        ret = virXen_getdomaininfo(priv->handle, domain->id,
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
                                   &dominfo);

        if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
            return (-1);
        maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
    }

    return maxcpu;
}

2354
#endif /* WITH_XEN */
2355 2356 2357 2358 2359
/*
 * vim: set tabstop=4:
 * vim: set shiftwidth=4:
 * vim: set expandtab:
 */
2360 2361 2362 2363 2364 2365 2366 2367
/*
 * Local variables:
 *  indent-tabs-mode: nil
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 * End:
 */