xen_internal.c 25.6 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
D
Daniel Veillard 已提交
4
 * Copyright (C) 2005, 2006 Red Hat, Inc.
5 6 7 8 9 10 11 12
 *
 * See COPYING.LIB for the License of this software
 *
 * Daniel Veillard <veillard@redhat.com>
 */

#include <stdio.h>
#include <string.h>
13
/* required for uint8_t, uint32_t, etc ... */
14 15 16 17 18 19 20
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
21
#include <limits.h>
22 23 24
#include <stdint.h>

/* required for dom0_getdomaininfo_t */
25
#include <xen/dom0_ops.h>
26
#include <xen/version.h>
27
#include <xen/xen.h>
28
#include <xen/linux/privcmd.h>
29

30
/* #ifndef __LINUX_PUBLIC_PRIVCMD_H__ */
31 32 33 34 35 36 37
typedef struct old_hypercall_struct {
    unsigned long op;
    unsigned long arg[5];
} old_hypercall_t;
#define XEN_OLD_IOCTL_HYPERCALL_CMD \
        _IOC(_IOC_NONE, 'P', 0, sizeof(old_hypercall_t))

38 39
typedef struct privcmd_hypercall hypercall_t;
#define XEN_IOCTL_HYPERCALL_CMD IOCTL_PRIVCMD_HYPERCALL
40

41 42 43 44
static int xen_ioctl_hypercall_cmd = 0;
static int old_hypervisor = 0;
static int initialized = 0;
static int hv_version = 0;
45 46

#include "internal.h"
47
#include "driver.h"
48 49 50 51
#include "xen_internal.h"

#define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"

52
#ifndef PROXY
53
static const char * xenHypervisorGetType(virConnectPtr conn);
54
static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
55
#endif
56
static int xenHypervisorInit(void);
57

58
#ifndef PROXY
59
static virDriver xenHypervisorDriver = {
60
    VIR_DRV_XEN_HYPERVISOR,
61
    "Xen",
62 63 64
    (DOM0_INTERFACE_VERSION >> 24) * 1000000 +
    ((DOM0_INTERFACE_VERSION >> 16) & 0xFF) * 1000 +
    (DOM0_INTERFACE_VERSION & 0xFFFF),
65
    xenHypervisorInit, /* init */
66 67
    xenHypervisorOpen, /* open */
    xenHypervisorClose, /* close */
68
    xenHypervisorGetType, /* type */
69
    xenHypervisorGetVersion, /* version */
70
    NULL, /* nodeGetInfo */
71 72
    xenHypervisorListDomains, /* listDomains */
    xenHypervisorNumOfDomains, /* numOfDomains */
73 74 75 76 77 78 79
    NULL, /* domainCreateLinux */
    NULL, /* domainLookupByID */
    NULL, /* domainLookupByUUID */
    NULL, /* domainLookupByName */
    xenHypervisorPauseDomain, /* domainSuspend */
    xenHypervisorResumeDomain, /* domainResume */
    NULL, /* domainShutdown */
80
    NULL, /* domainReboot */
81 82 83 84 85 86
    xenHypervisorDestroyDomain, /* domainDestroy */
    NULL, /* domainFree */
    NULL, /* domainGetName */
    NULL, /* domainGetID */
    NULL, /* domainGetUUID */
    NULL, /* domainGetOSType */
87
    xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
88
    xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
89
    NULL, /* domainSetMemory */
90 91
    xenHypervisorGetDomainInfo, /* domainGetInfo */
    NULL, /* domainSave */
92 93 94
    NULL, /* domainRestore */
    xenHypervisorSetVcpus, /* domainSetVcpus */
    xenHypervisorPinVcpu, /* domainPinVcpu */
95 96
    xenHypervisorGetVcpus, /* domainGetVcpus */
    NULL, /* domainDumpXML */
97
};
98
#endif /* !PROXY */
99

100 101 102 103 104 105 106 107 108
/**
 * virXenError:
 * @conn: the connection if available
 * @error: the error number
 * @info: extra information string
 *
 * Handle an error at the xend daemon interface
 */
static void
109 110
virXenError(virErrorNumber error, const char *info, int value)
{
111
    const char *errmsg;
112

113 114 115 116 117
    if (error == VIR_ERR_OK)
        return;

    errmsg = __virErrorMsg(error, info);
    __virRaiseError(NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
118
                    errmsg, info, NULL, value, 0, errmsg, info, value);
119 120
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
/**
 * xenHypervisorInit:
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
 */
int xenHypervisorInit(void)
{
    int fd, ret, cmd;
    hypercall_t hc;
    old_hypercall_t old_hc;

    if (initialized) {
        if (old_hypervisor == -1)
	    return(-1);
	return(0);
    }
    initialized = 1;

    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
	old_hypervisor = -1;
        return (-1);
    }
    fd = ret;

    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
155
        /* fprintf(stderr, "Using new hypervisor call: %X\n", ret); */
156 157 158 159 160 161 162 163 164 165 166 167
	hv_version = ret;
	xen_ioctl_hypercall_cmd = cmd;
        old_hypervisor = 0;
	goto done;
    }
    
    old_hc.op = __HYPERVISOR_xen_version;
    old_hc.arg[0] = (unsigned long) XENVER_version;
    old_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(old_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &old_hc);
    if ((ret != -1) && (ret != 0)) {
168
        /* fprintf(stderr, "Using old hypervisor call: %X\n", ret); */
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	hv_version = ret;
	xen_ioctl_hypercall_cmd = cmd;
        old_hypervisor = 1;
	goto done;
    }

    old_hypervisor = -1;
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
    return(-1);

done:
    close(fd);
    return(0);

}

186
#ifndef PROXY
187 188 189 190 191 192 193 194 195 196 197 198
/**
 * xenHypervisorRegister:
 *
 * Registers the xenHypervisor driver
 */
void xenHypervisorRegister(void)
{
    if (initialized == 0)
        xenHypervisorInit();

    virRegisterDriver(&xenHypervisorDriver);
}
199
#endif /* !PROXY */
200

201 202
/**
 * xenHypervisorOpen:
203 204 205
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
206 207 208
 *
 * Connects to the Xen hypervisor.
 *
209
 * Returns 0 or -1 in case of error.
210
 */
211
int
212
xenHypervisorOpen(virConnectPtr conn, const char *name, int flags)
213
{
214 215
    int ret;

216 217 218
    if (initialized == 0)
        xenHypervisorInit();

219
    if ((name != NULL) && (strcasecmp(name, "xen")))
220 221 222 223
        return(-1);

    conn->handle = -1;

224
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
225
    if (ret < 0) {
226
        if (!(flags & VIR_DRV_OPEN_QUIET))
227 228
            virXenError(VIR_ERR_NO_XEN, XEN_HYPERVISOR_SOCKET, 0);
        return (-1);
229
    }
230
    conn->handle = ret;
231

232
    return(0);
233 234 235 236
}

/**
 * xenHypervisorClose:
237
 * @conn: pointer to the connection block
238 239 240 241 242
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
243
int
244
xenHypervisorClose(virConnectPtr conn)
245
{
246 247
    int ret;

248
    if ((conn == NULL) || (conn->handle < 0))
249
        return (-1);
250

251
    ret = close(conn->handle);
252
    if (ret < 0)
253 254
        return (-1);
    return (0);
255 256
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
/**
 * xenHypervisorDoOldOp:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor operation though the old interface,
 * this leads to an hypervisor call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoOldOp(int handle, dom0_op_t * op)
{
    int ret;
    old_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = hv_version << 8;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(dom0_op_t));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(dom0_op_t));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}
298 299 300 301 302 303 304 305 306 307
/**
 * xenHypervisorDoOp:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor operation, this leads to an hypervisor call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
308 309
xenHypervisorDoOp(int handle, dom0_op_t * op)
{
310 311 312
    int ret;
    hypercall_t hc;

313 314 315 316
    if (old_hypervisor)
        return(xenHypervisorDoOldOp(handle, op));
 
    memset(&hc, 0, sizeof(hc));
317 318
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
319
    hc.arg[0] = (unsigned long) op;
320

321 322
    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(dom0_op_t));
323
        return (-1);
324
    }
325

326
    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
327
    if (ret < 0) {
328
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
329
    }
330

331 332
    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(dom0_op_t));
333
        ret = -1;
334
    }
335 336

    if (ret < 0)
337 338 339
        return (-1);

    return (0);
340 341
}

342
#ifndef PROXY
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
/**
 * xenHypervisorGetType:
 * @conn: pointer to the Xen Hypervisor block
 *
 * Get the version level of the Hypervisor running.
 *
 * Returns -1 in case of error, 0 otherwise. if the version can't be
 *    extracted by lack of capacities returns 0 and @hvVer is 0, otherwise
 *    @hvVer value is major * 1,000,000 + minor * 1,000 + release
 */
static const char *
xenHypervisorGetType(virConnectPtr conn)
{
    if (!VIR_IS_CONNECT(conn)) {
        virXenError(VIR_ERR_INVALID_CONN, __FUNCTION__, 0);
        return (NULL);
    }
    return("Xen");
}
362
#endif
363

364 365
/**
 * xenHypervisorGetVersion:
366 367
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
368 369 370
 *
 * Call the hypervisor to extracts his own internal API version
 *
371
 * Returns 0 in case of success, -1 in case of error
372
 */
373 374
int
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
375
{
376 377
    if ((conn == NULL) || (conn->handle < 0) || (hvVer == NULL))
        return (-1);
378
    *hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
379
    return(0);
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
    dom0_op_t op;
    dom0_getdomaininfo_t *dominfos;
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;

    if ((conn == NULL) || (conn->handle < 0))
        return (-1);

retry:
    dominfos = malloc(maxids * sizeof(dom0_getdomaininfo_t));
    if (dominfos == NULL) {
        virXenError(VIR_ERR_NO_MEMORY, "failed to allocate %d domain info",
	            maxids);
	return(-1);
    }
    
    memset(dominfos, 0, sizeof(dom0_getdomaininfo_t) * maxids);

    if (mlock(dominfos, sizeof(dom0_getdomaininfo_t) * maxids) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking",
                    sizeof(dom0_getdomaininfo_t) * maxids);
	free(dominfos);
        return (-1);
    }

    op.cmd = DOM0_GETDOMAININFOLIST;
    op.u.getdomaininfolist.first_domain = (domid_t) 0;
    op.u.getdomaininfolist.max_domains = maxids;
    op.u.getdomaininfolist.buffer = dominfos;
    op.u.getdomaininfolist.num_domains = maxids;

    ret = xenHypervisorDoOp(conn->handle, &op);

    if (munlock(dominfos, sizeof(dom0_getdomaininfo_t) * maxids) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " release",
                    sizeof(dom0_getdomaininfo_t) * maxids);
        ret = -1;
    }

    free(dominfos);

    if (ret < 0)
        return (-1);

    nbids = op.u.getdomaininfolist.num_domains;
    if (nbids == maxids) {
        last_maxids *= 2;
        maxids *= 2;
	goto retry;
    }
    if ((nbids < 0) || (nbids > maxids))
        return(-1);
    return(nbids);
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
    dom0_op_t op;
    dom0_getdomaininfo_t *dominfos;
    int ret, nbids, i;

    if ((conn == NULL) || (conn->handle < 0) ||
        (ids == NULL) || (maxids < 1))
        return (-1);

    dominfos = malloc(maxids * sizeof(dom0_getdomaininfo_t));
    if (dominfos == NULL) {
        virXenError(VIR_ERR_NO_MEMORY, "failed to allocate %d domain info",
	            maxids);
	return(-1);
    }
    
    memset(dominfos, 0, sizeof(dom0_getdomaininfo_t) * maxids);
    memset(ids, 0, maxids * sizeof(int));

    if (mlock(dominfos, sizeof(dom0_getdomaininfo_t) * maxids) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking",
                    sizeof(dom0_getdomaininfo_t) * maxids);
	free(dominfos);
        return (-1);
    }

    op.cmd = DOM0_GETDOMAININFOLIST;
    op.u.getdomaininfolist.first_domain = (domid_t) 0;
    op.u.getdomaininfolist.max_domains = maxids;
    op.u.getdomaininfolist.buffer = dominfos;
    op.u.getdomaininfolist.num_domains = maxids;

    ret = xenHypervisorDoOp(conn->handle, &op);

    if (munlock(dominfos, sizeof(dom0_getdomaininfo_t) * maxids) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " release",
                    sizeof(dom0_getdomaininfo_t) * maxids);
        ret = -1;
    }

    if (ret < 0) {
	free(dominfos);
        return (-1);
    }

    nbids = op.u.getdomaininfolist.num_domains;
    if ((nbids < 0) || (nbids > maxids)) {
	free(dominfos);
        return(-1);
    }

    for (i = 0;i < nbids;i++) {
        ids[i] = dominfos[i].domain;
    }

    free(dominfos);
    return (nbids);
}

520
/**
521 522 523
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
524 525
 * 
 * Retrieve the maximum amount of physical memory allocated to a
526
 * domain.
527 528 529
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
530 531
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
532 533 534 535 536
{
    dom0_op_t op;
    dom0_getdomaininfo_t dominfo;
    int ret;

537
    if ((conn == NULL) || (conn->handle < 0))
538 539 540 541 542 543 544 545 546 547 548
        return (0);

    memset(&dominfo, 0, sizeof(dom0_getdomaininfo_t));

    if (mlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking",
                    sizeof(dom0_getdomaininfo_t));
        return (0);
    }

    op.cmd = DOM0_GETDOMAININFOLIST;
549
    op.u.getdomaininfolist.first_domain = (domid_t) id;
550 551 552
    op.u.getdomaininfolist.max_domains = 1;
    op.u.getdomaininfolist.buffer = &dominfo;
    op.u.getdomaininfolist.num_domains = 1;
553
    dominfo.domain = id;
554

555
    ret = xenHypervisorDoOp(conn->handle, &op);
556 557 558 559 560 561 562 563 564 565 566 567 568

    if (munlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " release",
                    sizeof(dom0_getdomaininfo_t));
        ret = -1;
    }

    if (ret < 0)
        return (0);

    return((unsigned long) dominfo.max_pages * 4);
}

569
#ifndef PROXY
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
 * 
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
static unsigned long
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0))
        return (0);

    return(xenHypervisorGetDomMaxMemory(domain->conn, domain->handle));
}
589
#endif
590

591
/**
592 593 594
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
595
 * @info: the place where information should be stored
596
 *
597
 * Do an hypervisor call to get the related set of domain information.
598 599 600 601
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
602
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
603
{
604
    dom0_op_t op;
605
    dom0_getdomaininfo_t dominfo;
606 607
    int ret;

608
    if ((conn == NULL) || (conn->handle < 0) || (info == NULL))
609
        return (-1);
610

611 612
    memset(info, 0, sizeof(virDomainInfo));
    memset(&dominfo, 0, sizeof(dom0_getdomaininfo_t));
613

614
    if (mlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
615 616 617
        virXenError(VIR_ERR_XEN_CALL, " locking",
                    sizeof(dom0_getdomaininfo_t));
        return (-1);
618
    }
619 620

    op.cmd = DOM0_GETDOMAININFOLIST;
621
    op.u.getdomaininfolist.first_domain = (domid_t) id;
622
    op.u.getdomaininfolist.max_domains = 1;
623
    op.u.getdomaininfolist.buffer = &dominfo;
624
    op.u.getdomaininfolist.num_domains = 1;
625
    dominfo.domain = id;
626

627
    ret = xenHypervisorDoOp(conn->handle, &op);
628

629
    if (munlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
630 631
        virXenError(VIR_ERR_XEN_CALL, " release",
                    sizeof(dom0_getdomaininfo_t));
632
        ret = -1;
633
    }
634

635
    if (ret < 0)
636
        return (-1);
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

    switch (dominfo.flags & 0xFF) {
	case DOMFLAGS_DYING:
	    info->state = VIR_DOMAIN_SHUTDOWN;
	    break;
	case DOMFLAGS_SHUTDOWN:
	    info->state = VIR_DOMAIN_SHUTOFF;
	    break;
	case DOMFLAGS_PAUSED:
	    info->state = VIR_DOMAIN_PAUSED;
	    break;
	case DOMFLAGS_BLOCKED:
	    info->state = VIR_DOMAIN_BLOCKED;
	    break;
	case DOMFLAGS_RUNNING:
	    info->state = VIR_DOMAIN_RUNNING;
	    break;
	default:
	    info->state = VIR_DOMAIN_NONE;
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
    info->cpuTime = dominfo.cpu_time;
    info->memory = dominfo.tot_pages * 4;
    info->maxMem = dominfo.max_pages * 4;
    info->nrVirtCpu = dominfo.nr_online_vcpus;
667
    return (0);
668 669
}

670 671 672
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
673
 * @info: the place where information should be stored
674
 *
675
 * Do an hypervisor call to get the related set of domain information.
676 677 678 679 680 681 682 683 684 685 686 687 688 689
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0) || (info == NULL) ||
	(domain->handle < 0))
        return (-1);
    return(xenHypervisorGetDomInfo(domain->conn, domain->handle, info));

}

690
#ifndef PROXY
691 692
/**
 * xenHypervisorPauseDomain:
693
 * @domain: pointer to the domain block
694 695 696 697 698 699
 *
 * Do an hypervisor call to pause the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
700
xenHypervisorPauseDomain(virDomainPtr domain)
701
{
702 703 704
    dom0_op_t op;
    int ret;

705 706 707 708
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0))
        return (-1);

709
    op.cmd = DOM0_PAUSEDOMAIN;
710
    op.u.pausedomain.domain = (domid_t) domain->handle;
711

712
    ret = xenHypervisorDoOp(domain->conn->handle, &op);
713 714

    if (ret < 0)
715 716
        return (-1);
    return (0);
717 718 719 720
}

/**
 * xenHypervisorResumeDomain:
721
 * @domain: pointer to the domain block
722 723 724 725 726 727
 *
 * Do an hypervisor call to resume the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
728
xenHypervisorResumeDomain(virDomainPtr domain)
729
{
730 731 732
    dom0_op_t op;
    int ret;

733 734 735 736
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0))
        return (-1);

737
    op.cmd = DOM0_UNPAUSEDOMAIN;
738
    op.u.unpausedomain.domain = (domid_t) domain->handle;
739

740
    ret = xenHypervisorDoOp(domain->conn->handle, &op);
741 742

    if (ret < 0)
743 744
        return (-1);
    return (0);
745 746 747 748
}

/**
 * xenHypervisorDestroyDomain:
749
 * @domain: pointer to the domain block
750 751 752 753 754 755
 *
 * Do an hypervisor call to destroy the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
756
xenHypervisorDestroyDomain(virDomainPtr domain)
757
{
758 759 760
    dom0_op_t op;
    int ret;

761 762 763 764
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0))
        return (-1);

765
    op.cmd = DOM0_DESTROYDOMAIN;
766
    op.u.destroydomain.domain = (domid_t) domain->handle;
767

768
    ret = xenHypervisorDoOp(domain->conn->handle, &op);
769 770

    if (ret < 0)
771 772
        return (-1);
    return (0);
773 774
}

775 776
/**
 * xenHypervisorSetMaxMemory:
777
 * @domain: pointer to the domain block
778 779 780 781 782 783 784
 * @memory: the max memory size in kilobytes.
 *
 * Do an hypervisor call to change the maximum amount of memory used
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
785
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
786
{
787 788 789
    dom0_op_t op;
    int ret;

790 791 792 793
    if ((domain == NULL) || (domain->conn == NULL) ||
        (domain->conn->handle < 0))
        return (-1);

794
    op.cmd = DOM0_SETDOMAINMAXMEM;
795
    op.u.setdomainmaxmem.domain = (domid_t) domain->handle;
796 797
    op.u.setdomainmaxmem.max_memkb = memory;

798
    ret = xenHypervisorDoOp(domain->conn->handle, &op);
799 800

    if (ret < 0)
801 802
        return (-1);
    return (0);
803
}
804
#endif /* PROXY */
805 806 807 808

/**
 * xenHypervisorCheckID:
 * @domain: pointer to the domain block
809
 * @info: the place where information should be stored
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
 *
 * Do an hypervisor call to verify the domain ID is valid
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorCheckID(virConnectPtr conn, int id)
{
    dom0_op_t op;
    dom0_getdomaininfo_t dominfo;
    int ret;

    if ((conn->handle < 0) || (id < 0))
        return (-1);

    memset(&dominfo, 0, sizeof(dom0_getdomaininfo_t));

    if (mlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking",
                    sizeof(dom0_getdomaininfo_t));
        return (-1);
    }

    op.cmd = DOM0_GETDOMAININFOLIST;
    op.u.getdomaininfolist.first_domain = (domid_t) id;
    op.u.getdomaininfolist.max_domains = 1;
    op.u.getdomaininfolist.buffer = &dominfo;
    op.u.getdomaininfolist.num_domains = 1;
    dominfo.domain = id;

    ret = xenHypervisorDoOp(conn->handle, &op);

    if (munlock(&dominfo, sizeof(dom0_getdomaininfo_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " release",
                    sizeof(dom0_getdomaininfo_t));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

854
#ifndef PROXY
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
    dom0_op_t op;

    if ((domain == NULL) || (domain->conn == NULL) || (domain->conn->handle < 0)
     || (nvcpus < 1))
        return (-1);
    op.cmd = DOM0_MAX_VCPUS;
    op.u.max_vcpus.domain = (domid_t) domain->handle;
    op.u.max_vcpus.max = nvcpus;
    if (xenHypervisorDoOp(domain->conn->handle, &op) < 0)
        return (-1);
    return 0;
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
 * 
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
    dom0_op_t op;
    uint64_t *pm = (uint64_t *)&op.u.setvcpuaffinity.cpumap; 
    int j;

    if ((domain == NULL) || (domain->conn == NULL) || (domain->conn->handle < 0)
     || (cpumap == NULL) || (maplen < 1) || (maplen > (int)sizeof(cpumap_t))
     || (sizeof(cpumap_t) & 7))
        return (-1);
    op.cmd = DOM0_SETVCPUAFFINITY;
    op.u.setvcpuaffinity.domain = (domid_t) domain->handle;
    op.u.setvcpuaffinity.vcpu = vcpu;
    memset(pm, 0, sizeof(cpumap_t));
    for (j = 0; j < maplen; j++)
        *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
    if (xenHypervisorDoOp(domain->conn->handle, &op) < 0)
        return (-1);
    return 0;
}
915
#endif
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
 * @cpumaps: pointer to an bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
 *	If cpumaps is NULL, then no cupmap information is returned by the API.
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
 * 
 * Extract information about virtual CPUs of domain, store it in info array
 * and also in cpumaps if this pointer is'nt NULL.
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
int
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
		      unsigned char *cpumaps, int maplen)
{
    dom0_op_t op;
    uint64_t *pm = (uint64_t *)&op.u.getvcpuinfo.cpumap; 
    virVcpuInfoPtr ipt;
    int nbinfo, mapl, i;
    unsigned char *cpumap;
    int vcpu, cpu;

    if ((domain == NULL) || (domain->conn == NULL) || (domain->conn->handle < 0)
     || (info == NULL) || (maxinfo < 1)
     || (sizeof(cpumap_t) & 7))
        return (-1);
    if (cpumaps != NULL && maplen < 1)
	return -1;

    /* first get the number of virtual CPUs in this domain */
    op.cmd = DOM0_GETDOMAININFO;
    op.u.getdomaininfo.domain = (domid_t) domain->handle;
    if (xenHypervisorDoOp(domain->conn->handle, &op) < 0)
        return (-1);
    nbinfo = (int)op.u.getdomaininfo.max_vcpu_id + 1;
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
	memset(cpumaps, 0, maxinfo * maplen);

    op.cmd = DOM0_GETVCPUINFO;
    for (i=0, ipt=info; i < nbinfo; i++, ipt++) {
        vcpu = op.u.getvcpuinfo.vcpu = i;
        if (xenHypervisorDoOp(domain->conn->handle, &op) < 0)
            return (-1);
        ipt->number = i;
        if (op.u.getvcpuinfo.online) {
            if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
            if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
        }
        else ipt->state = VIR_VCPU_OFFLINE;
        ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
        ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
	if (cpumaps != NULL && vcpu >= 0 && vcpu < maxinfo) {
	    cpumap = (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, vcpu);
	    mapl = (maplen > (int)sizeof(cpumap_t)) ? (int)sizeof(cpumap_t) : maplen;
            for (cpu = 0; cpu < (mapl * CHAR_BIT); cpu++) {
		if (*pm & ((uint64_t)1<<cpu))
		    VIR_USE_CPU(cpumap, cpu);
	    }
	}
    }
    return nbinfo;
}