xen_internal.c 72.6 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
D
Daniel Veillard 已提交
4
 * Copyright (C) 2005, 2006 Red Hat, Inc.
5 6 7 8 9 10
 *
 * See COPYING.LIB for the License of this software
 *
 * Daniel Veillard <veillard@redhat.com>
 */

11 12
#ifdef WITH_XEN

13 14
#include <stdio.h>
#include <string.h>
15
/* required for uint8_t, uint32_t, etc ... */
16 17 18 19 20 21 22
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
23
#include <limits.h>
24
#include <stdint.h>
25 26 27
#include <regex.h>
#include <errno.h>
#include <sys/utsname.h>
28 29

/* required for dom0_getdomaininfo_t */
30
#include <xen/dom0_ops.h>
31
#include <xen/version.h>
32
#include <xen/xen.h>
33
#include <xen/linux/privcmd.h>
34

35 36 37
/* required for shutdown flags */
#include <xen/sched.h>

38 39
#include "xml.h"

40 41
/* #define DEBUG */
/*
42
 * so far there is 2 versions of the structures usable for doing
43 44 45 46
 * hypervisor calls.
 */
/* the old one */
typedef struct v0_hypercall_struct {
47 48
    unsigned long op;
    unsigned long arg[5];
49 50 51 52 53 54 55 56 57 58
} v0_hypercall_t;
#define XEN_V0_IOCTL_HYPERCALL_CMD \
        _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))

/* the new one */
typedef struct v1_hypercall_struct
{
    uint64_t op;
    uint64_t arg[5];
} v1_hypercall_t;
59 60
#define XEN_V1_IOCTL_HYPERCALL_CMD                  \
    _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
61

62 63 64 65 66 67 68 69
typedef v1_hypercall_t hypercall_t;

#ifndef __HYPERVISOR_sysctl
#define __HYPERVISOR_sysctl 35
#endif
#ifndef __HYPERVISOR_domctl
#define __HYPERVISOR_domctl 36
#endif
70

71 72
static int xen_ioctl_hypercall_cmd = 0;
static int initialized = 0;
73
static int in_init = 0;
74
static int hv_version = 0;
75 76 77
static int hypervisor_version = 2;
static int sys_interface_version = -1;
static int dom_interface_version = -1;
78
static int kb_per_pages = 0;
79

80 81 82 83 84 85 86 87
/* Regular expressions used by xenHypervisorGetCapabilities, and
 * compiled once by xenHypervisorInit.  Note that these are POSIX.2
 * extended regular expressions (regex(7)).
 */
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
static regex_t flags_hvm_rec;
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
static regex_t flags_pae_rec;
88
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
89 90
static regex_t xen_cap_rec;

91 92 93 94 95
/*
 * The content of the structures for a getdomaininfolist system hypercall
 */
#ifndef DOMFLAGS_DYING
#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
96
#define DOMFLAGS_HVM       (1<<1) /* Domain is HVM                           */
97 98 99 100 101 102 103 104 105 106
#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
#define DOMFLAGS_CPUSHIFT       8
#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
#define DOMFLAGS_SHUTDOWNSHIFT 16
#endif

107 108 109 110 111 112 113 114 115 116 117
/*
 * These flags explain why a system is in the state of "shutdown".  Normally,
 * They are defined in xen/sched.h
 */
#ifndef SHUTDOWN_poweroff
#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
#endif

118 119 120 121 122 123 124 125 126
#define XEN_V0_OP_GETDOMAININFOLIST	38
#define XEN_V1_OP_GETDOMAININFOLIST	38
#define XEN_V2_OP_GETDOMAININFOLIST	6

struct xen_v0_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
127
    unsigned long shared_info_frame; /* MFN of shared_info struct */
128 129 130 131 132 133 134 135
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;

136 137 138 139 140 141 142 143 144 145 146 147 148 149
struct xen_v2_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
    uint64_t shared_info_frame; /* MFN of shared_info struct */
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

/* As of Hypervisor Call v2,  DomCtl v5 we are now 8-byte aligned
   even on 32-bit archs when dealing with uint64_t */
#define ALIGN_64 __attribute__((aligned(8)))

struct xen_v2d5_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;

169 170 171
union xen_getdomaininfo {
    struct xen_v0_getdomaininfo v0;
    struct xen_v2_getdomaininfo v2;
172
    struct xen_v2d5_getdomaininfo v2d5;
173 174 175 176 177 178
};
typedef union xen_getdomaininfo xen_getdomaininfo;

union xen_getdomaininfolist {
    struct xen_v0_getdomaininfo *v0;
    struct xen_v2_getdomaininfo *v2;
179
    struct xen_v2d5_getdomaininfo *v2d5;
180 181 182 183 184 185
};
typedef union xen_getdomaininfolist xen_getdomaininfolist;

#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size)                      \
    (hypervisor_version < 2 ?                                           \
     ((domlist.v0 = malloc(sizeof(xen_v0_getdomaininfo)*(size))) != NULL) : \
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
     (dom_interface_version < 5 ?                                       \
      ((domlist.v2 = malloc(sizeof(xen_v2_getdomaininfo)*(size))) != NULL) : \
      ((domlist.v2d5 = malloc(sizeof(xen_v2d5_getdomaininfo)*(size))) != NULL)))

#define XEN_GETDOMAININFOLIST_FREE(domlist)        \
    (hypervisor_version < 2 ?                      \
     free(domlist.v0) :                            \
     (dom_interface_version < 5 ?                  \
      free(domlist.v2) :                           \
      free(domlist.v2d5)))

#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size)                     \
    (hypervisor_version < 2 ?                                          \
     memset(domlist.v0, 0, sizeof(xen_v0_getdomaininfo) * size) :      \
     (dom_interface_version < 5 ?                                      \
      memset(domlist.v2, 0, sizeof(xen_v2_getdomaininfo) * size) :     \
      memset(domlist.v2d5, 0, sizeof(xen_v2d5_getdomaininfo) * size)))
203 204 205 206

#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n)    \
    (hypervisor_version < 2 ?                       \
     domlist.v0[n].domain :                         \
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
     (dom_interface_version < 5 ?                   \
      domlist.v2[n].domain :                        \
      domlist.v2d5[n].domain))

#define XEN_GETDOMAININFOLIST_DATA(domlist)        \
    (hypervisor_version < 2 ?                      \
     (void*)(domlist->v0) :                        \
     (dom_interface_version < 5 ?                  \
      (void*)(domlist->v2) :                       \
      (void*)(domlist->v2d5)))

#define XEN_GETDOMAININFO_SIZE                     \
    (hypervisor_version < 2 ?                      \
     sizeof(xen_v0_getdomaininfo) :                \
     (dom_interface_version < 5 ?                  \
      sizeof(xen_v2_getdomaininfo) :               \
      sizeof(xen_v2d5_getdomaininfo)))

#define XEN_GETDOMAININFO_CLEAR(dominfo)                           \
    (hypervisor_version < 2 ?                                      \
     memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) :      \
     (dom_interface_version < 5 ?                                  \
      memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)) :     \
      memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo))))
231 232 233 234

#define XEN_GETDOMAININFO_DOMAIN(dominfo)       \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.domain :                        \
235 236 237
     (dom_interface_version < 5 ?               \
      dominfo.v2.domain :                       \
      dominfo.v2d5.domain))
238 239 240 241

#define XEN_GETDOMAININFO_CPUTIME(dominfo)      \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.cpu_time :                      \
242 243 244
     (dom_interface_version < 5 ?               \
      dominfo.v2.cpu_time :                     \
      dominfo.v2d5.cpu_time))
245 246 247 248

#define XEN_GETDOMAININFO_CPUCOUNT(dominfo)     \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.nr_online_vcpus :               \
249 250 251
     (dom_interface_version < 5 ?               \
      dominfo.v2.nr_online_vcpus :              \
      dominfo.v2d5.nr_online_vcpus))
252

253 254 255
#define XEN_GETDOMAININFO_MAXCPUID(dominfo)  \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_vcpu_id :                   \
256 257 258
     (dom_interface_version < 5 ?               \
      dominfo.v2.max_vcpu_id :                  \
      dominfo.v2d5.max_vcpu_id))
259

260 261 262
#define XEN_GETDOMAININFO_FLAGS(dominfo)        \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.flags :                         \
263 264 265
     (dom_interface_version < 5 ?               \
      dominfo.v2.flags :                        \
      dominfo.v2d5.flags))
266 267 268 269

#define XEN_GETDOMAININFO_TOT_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.tot_pages :                     \
270 271 272
     (dom_interface_version < 5 ?               \
      dominfo.v2.tot_pages :                    \
      dominfo.v2d5.tot_pages))
273 274 275 276

#define XEN_GETDOMAININFO_MAX_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_pages :                     \
277 278 279
     (dom_interface_version < 5 ?               \
      dominfo.v2.max_pages :                    \
      dominfo.v2d5.max_pages))
280 281 282 283



struct xen_v0_getdomaininfolistop {
284 285 286 287 288
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v0_getdomaininfo *buffer;
    uint32_t  num_domains;
};
289 290 291 292 293 294 295 296 297 298 299
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;


struct xen_v2_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v2_getdomaininfo *buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;

300 301 302 303 304 305 306 307 308 309 310 311
/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
struct xen_v2s3_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    union {
        struct xen_v2d5_getdomaininfo *v;
        uint64_t pad ALIGN_64;
    } buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;

312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

struct xen_v0_domainop {
    domid_t   domain;
};
typedef struct xen_v0_domainop xen_v0_domainop;

/*
 * The informations for a destroydomain system hypercall
 */
#define XEN_V0_OP_DESTROYDOMAIN	9
#define XEN_V1_OP_DESTROYDOMAIN	9
#define XEN_V2_OP_DESTROYDOMAIN	2

/*
 * The informations for a pausedomain system hypercall
 */
#define XEN_V0_OP_PAUSEDOMAIN	10
#define XEN_V1_OP_PAUSEDOMAIN	10
#define XEN_V2_OP_PAUSEDOMAIN	3

/*
 * The informations for an unpausedomain system hypercall
 */
#define XEN_V0_OP_UNPAUSEDOMAIN	11
#define XEN_V1_OP_UNPAUSEDOMAIN	11
#define XEN_V2_OP_UNPAUSEDOMAIN	4

/*
 * The informations for an setmaxmem system hypercall
 */
#define XEN_V0_OP_SETMAXMEM	28
#define XEN_V1_OP_SETMAXMEM	28
345
#define XEN_V2_OP_SETMAXMEM	11
346 347 348 349 350 351 352 353 354 355 356 357 358

struct xen_v0_setmaxmem {
    domid_t	domain;
    uint64_t	maxmem;
};
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;

struct xen_v2_setmaxmem {
    uint64_t	maxmem;
};
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;

359 360 361 362 363
struct xen_v2d5_setmaxmem {
    uint64_t	maxmem ALIGN_64;
};
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * The informations for an setmaxvcpu system hypercall
 */
#define XEN_V0_OP_SETMAXVCPU	41
#define XEN_V1_OP_SETMAXVCPU	41
#define XEN_V2_OP_SETMAXVCPU	15

struct xen_v0_setmaxvcpu {
    domid_t	domain;
    uint32_t	maxvcpu;
};
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;

struct xen_v2_setmaxvcpu {
    uint32_t	maxvcpu;
};
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;

/*
 * The informations for an setvcpumap system hypercall
 * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
 * hence the difference in structures
 */
#define XEN_V0_OP_SETVCPUMAP	20
#define XEN_V1_OP_SETVCPUMAP	20
#define XEN_V2_OP_SETVCPUMAP	9

struct xen_v0_setvcpumap {
    domid_t	domain;
    uint32_t	vcpu;
    cpumap_t    cpumap;
};
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;

struct xen_v2_cpumap {
    uint8_t    *bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2_cpumap cpumap;
};
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;

410 411 412 413 414 415 416 417 418 419 420 421 422 423
/* HV version 2, Dom version 5 requires 64-bit alignment */
struct xen_v2d5_cpumap {
    union {
        uint8_t    *v;
        uint64_t   pad ALIGN_64;
    } bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2d5_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2d5_cpumap cpumap;
};
typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
/*
 * The informations for an vcpuinfo system hypercall
 */
#define XEN_V0_OP_GETVCPUINFO   43
#define XEN_V1_OP_GETVCPUINFO	43
#define XEN_V2_OP_GETVCPUINFO   14

struct xen_v0_vcpuinfo {
    domid_t	domain;		/* owner's domain */
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
    cpumap_t	cpumap;		/* deprecated in V2 */
};
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;

struct xen_v2_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;

454 455 456 457 458 459 460 461 462 463
struct xen_v2d5_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time ALIGN_64; /* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;

464 465 466 467 468
/*
 * from V2 the pinning of a vcpu is read with a separate call
 */
#define XEN_V2_OP_GETVCPUMAP	25
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
469
typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
470

471 472 473 474 475 476 477 478 479
/*
 * The hypercall operation structures also have changed on
 * changeset 86d26e6ec89b
 */
/* the old structure */
struct xen_op_v0 {
    uint32_t cmd;
    uint32_t interface_version;
    union {
480 481 482 483 484 485 486
        xen_v0_getdomaininfolistop getdomaininfolist;
        xen_v0_domainop          domain;
        xen_v0_setmaxmem         setmaxmem;
        xen_v0_setmaxvcpu        setmaxvcpu;
        xen_v0_setvcpumap        setvcpumap;
        xen_v0_vcpuinfo          getvcpuinfo;
        uint8_t padding[128];
487 488 489 490 491 492 493 494 495 496
    } u;
};
typedef struct xen_op_v0 xen_op_v0;
typedef struct xen_op_v0 xen_op_v1;

/* the new structure for systems operations */
struct xen_op_v2_sys {
    uint32_t cmd;
    uint32_t interface_version;
    union {
497 498
        xen_v2_getdomaininfolistop   getdomaininfolist;
        xen_v2s3_getdomaininfolistop getdomaininfolists3;
499
        uint8_t padding[128];
500 501 502 503 504 505 506 507 508 509
    } u;
};
typedef struct xen_op_v2_sys xen_op_v2_sys;

/* the new structure for domains operation */
struct xen_op_v2_dom {
    uint32_t cmd;
    uint32_t interface_version;
    domid_t  domain;
    union {
510
        xen_v2_setmaxmem         setmaxmem;
511
        xen_v2d5_setmaxmem       setmaxmemd5;
512 513
        xen_v2_setmaxvcpu        setmaxvcpu;
        xen_v2_setvcpumap        setvcpumap;
514
        xen_v2d5_setvcpumap      setvcpumapd5;
515
        xen_v2_vcpuinfo          getvcpuinfo;
516
        xen_v2d5_vcpuinfo        getvcpuinfod5;
517
        xen_v2_getvcpumap        getvcpumap;
518
        xen_v2d5_getvcpumap      getvcpumapd5;
519
        uint8_t padding[128];
520 521 522
    } u;
};
typedef struct xen_op_v2_dom xen_op_v2_dom;
523 524

#include "internal.h"
525
#include "driver.h"
526
#include "xen_unified.h"
527 528 529 530
#include "xen_internal.h"

#define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"

531
#ifndef PROXY
532
static const char * xenHypervisorGetType(virConnectPtr conn);
533
static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
534
#endif
535

536
#ifndef PROXY
537 538
virDriver xenHypervisorDriver = {
    -1,
539
    "Xen",
540 541 542
    (DOM0_INTERFACE_VERSION >> 24) * 1000000 +
    ((DOM0_INTERFACE_VERSION >> 16) & 0xFF) * 1000 +
    (DOM0_INTERFACE_VERSION & 0xFFFF),
543 544
    xenHypervisorOpen, /* open */
    xenHypervisorClose, /* close */
545
    xenHypervisorGetType, /* type */
546
    xenHypervisorGetVersion, /* version */
547
    xenHypervisorGetMaxVcpus, /* getMaxVcpus */
548
    NULL, /* nodeGetInfo */
549
    xenHypervisorGetCapabilities, /* getCapabilities */
550 551
    xenHypervisorListDomains, /* listDomains */
    xenHypervisorNumOfDomains, /* numOfDomains */
552 553 554 555 556 557 558
    NULL, /* domainCreateLinux */
    NULL, /* domainLookupByID */
    NULL, /* domainLookupByUUID */
    NULL, /* domainLookupByName */
    xenHypervisorPauseDomain, /* domainSuspend */
    xenHypervisorResumeDomain, /* domainResume */
    NULL, /* domainShutdown */
559
    NULL, /* domainReboot */
560 561
    xenHypervisorDestroyDomain, /* domainDestroy */
    NULL, /* domainGetOSType */
562
    xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
563
    xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
564
    NULL, /* domainSetMemory */
565 566
    xenHypervisorGetDomainInfo, /* domainGetInfo */
    NULL, /* domainSave */
567
    NULL, /* domainRestore */
D
Daniel Veillard 已提交
568
    NULL, /* domainCoreDump */
569 570
    xenHypervisorSetVcpus, /* domainSetVcpus */
    xenHypervisorPinVcpu, /* domainPinVcpu */
571
    xenHypervisorGetVcpus, /* domainGetVcpus */
572
    xenHypervisorGetVcpuMax, /* domainGetMaxVcpus */
573
    NULL, /* domainDumpXML */
574 575 576 577 578
    NULL, /* listDefinedDomains */
    NULL, /* numOfDefinedDomains */
    NULL, /* domainCreate */
    NULL, /* domainDefineXML */
    NULL, /* domainUndefine */
579 580
    NULL, /* domainAttachDevice */
    NULL, /* domainDetachDevice */
581 582
    NULL, /* domainGetAutostart */
    NULL, /* domainSetAutostart */
583
};
584
#endif /* !PROXY */
585

586 587 588 589
/**
 * virXenError:
 * @error: the error number
 * @info: extra information string
590
 * @value: extra information number
591 592 593 594
 *
 * Handle an error at the xend daemon interface
 */
static void
595 596
virXenError(virErrorNumber error, const char *info, int value)
{
597
    const char *errmsg;
598

599
    if ((error == VIR_ERR_OK) || (in_init != 0))
600 601 602
        return;

    errmsg = __virErrorMsg(error, info);
603
    __virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
                    errmsg, info, NULL, value, 0, errmsg, info);
}

/**
 * virXenPerror:
 * @conn: the connection (if available)
 * @msg: name of system call or file (as in perror(3))
 *
 * Raise error from a failed system call, using errno as the source.
 */
static void
virXenPerror (virConnectPtr conn, const char *msg)
{
    char *msg_s;

    msg_s = malloc (strlen (msg) + 10);
    if (msg_s) {
        strcpy (msg_s, msg);
        strcat (msg_s, ": %s");
    }

    __virRaiseError (conn, NULL, NULL,
                     VIR_FROM_XEN, VIR_ERR_SYSTEM_ERROR, VIR_ERR_ERROR,
                     msg, NULL, NULL, errno, 0,
                     msg_s ? msg_s : msg, strerror (errno));
629 630
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
/**
 * xenHypervisorDoV0Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor operation though the old interface,
 * this leads to an hypervisor call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
{
    int ret;
    v0_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = hv_version << 8;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}
/**
 * xenHypervisorDoV1Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor v1 operation, this leads to an hypervisor call through
 * ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Sys:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor operation structure
 *
 * Do an hypervisor v2 stsyem operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = sys_interface_version;
    hc.op = __HYPERVISOR_sysctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Dom:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor domain operation structure
 *
 * Do an hypervisor v2 domain operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = dom_interface_version;
    hc.op = __HYPERVISOR_domctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * virXen_getdomaininfolist:
 * @handle: the hypervisor handle
 * @first_domain: first domain in the range
 * @maxids: maximum number of domains to list
 * @dominfos: output structures
 *
 * Do a low level hypercall to list existing domains informations
 *
 * Returns the number of domains or -1 in case of failure
 */
static int
virXen_getdomaininfolist(int handle, int first_domain, int maxids,
811
                         xen_getdomaininfolist *dominfos)
812 813 814
{
    int ret = -1;

815 816
    if (mlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
              XEN_GETDOMAININFO_SIZE * maxids) < 0) {
817
        virXenError(VIR_ERR_XEN_CALL, " locking",
818
                    XEN_GETDOMAININFO_SIZE * maxids);
819 820 821 822 823 824
        return (-1);
    }
    if (hypervisor_version > 1) {
        xen_op_v2_sys op;

        memset(&op, 0, sizeof(op));
825
        op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
826 827 828 829 830 831 832 833 834 835 836 837

        if (sys_interface_version < 3) {
            op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolist.max_domains = maxids;
            op.u.getdomaininfolist.buffer = dominfos->v2;
            op.u.getdomaininfolist.num_domains = maxids;
        } else {
            op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolists3.max_domains = maxids;
            op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
            op.u.getdomaininfolists3.num_domains = maxids;
        }
838
        ret = xenHypervisorDoV2Sys(handle, &op);
839 840 841 842 843 844 845

        if (ret == 0) {
            if (sys_interface_version < 3)
                ret = op.u.getdomaininfolist.num_domains;
            else
                ret = op.u.getdomaininfolists3.num_domains;
        }
846 847 848 849
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
850 851 852 853 854 855 856 857
        op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV1Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
858 859 860 861
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
862 863 864 865 866 867 868 869
        op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV0Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
870
    }
871 872
    if (munlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
                XEN_GETDOMAININFO_SIZE * maxids) < 0) {
873
        virXenError(VIR_ERR_XEN_CALL, " release",
874
                    XEN_GETDOMAININFO_SIZE * maxids);
875 876 877 878 879
        ret = -1;
    }
    return(ret);
}

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
static int
virXen_getdomaininfo(int handle, int first_domain,
                     xen_getdomaininfo *dominfo) {
    xen_getdomaininfolist dominfos;

    if (hypervisor_version < 2) {
        dominfos.v0 = &(dominfo->v0);
    } else {
        dominfos.v2 = &(dominfo->v2);
    }

    return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
}


895
#ifndef PROXY
896 897 898 899 900 901 902 903 904 905
/**
 * virXen_pausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to pause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
906
virXen_pausedomain(int handle, int id)
907 908 909 910 911 912 913
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
914 915 916
        op.cmd = XEN_V2_OP_PAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
917 918 919 920
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
921 922 923
        op.cmd = XEN_V1_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
924 925 926 927
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
928 929 930
        op.cmd = XEN_V0_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
931 932 933 934 935 936 937 938 939 940 941 942 943 944
    }
    return(ret);
}

/**
 * virXen_unpausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to unpause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
945
virXen_unpausedomain(int handle, int id)
946 947 948 949 950 951 952
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
953 954 955
        op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
956 957 958 959
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
960 961 962
        op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
963 964 965 966
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
967 968 969
        op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
970 971 972 973 974 975 976 977 978 979 980 981 982 983
    }
    return(ret);
}

/**
 * virXen_destroydomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to destroy the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
984
virXen_destroydomain(int handle, int id)
985 986 987 988 989 990 991
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
992 993 994
        op.cmd = XEN_V2_OP_DESTROYDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
995 996 997 998
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
999 1000 1001
        op.cmd = XEN_V1_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
1002 1003 1004 1005
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1006 1007 1008
        op.cmd = XEN_V0_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
    }
    return(ret);
}

/**
 * virXen_setmaxmem:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @memory: the amount of memory in kilobytes
 *
 * Do a low level hypercall to change the max memory amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1024
virXen_setmaxmem(int handle, int id, unsigned long memory)
1025 1026 1027 1028 1029 1030 1031
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1032 1033
        op.cmd = XEN_V2_OP_SETMAXMEM;
        op.domain = (domid_t) id;
1034 1035 1036 1037
        if (dom_interface_version < 5)
            op.u.setmaxmem.maxmem = memory;
        else
            op.u.setmaxmemd5.maxmem = memory;
1038
        ret = xenHypervisorDoV2Dom(handle, &op);
1039 1040 1041 1042
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1043 1044 1045 1046
        op.cmd = XEN_V1_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV1Op(handle, &op);
1047
    } else if (hypervisor_version == 0) {
1048
        xen_op_v0 op;
1049 1050

        memset(&op, 0, sizeof(op));
1051 1052 1053 1054
        op.cmd = XEN_V0_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV0Op(handle, &op);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
    }
    return(ret);
}

/**
 * virXen_setmaxvcpus:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpus: the numbers of vcpus
 *
 * Do a low level hypercall to change the max vcpus amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1070
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
1071 1072 1073 1074 1075 1076 1077
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1078 1079 1080 1081
        op.cmd = XEN_V2_OP_SETMAXVCPU;
        op.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV2Dom(handle, &op);
1082 1083 1084 1085
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1086 1087 1088 1089
        op.cmd = XEN_V1_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV1Op(handle, &op);
1090
    } else if (hypervisor_version == 0) {
1091
        xen_op_v0 op;
1092 1093

        memset(&op, 0, sizeof(op));
1094 1095 1096 1097
        op.cmd = XEN_V0_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV0Op(handle, &op);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
    }
    return(ret);
}

/**
 * virXen_setvcpumap:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
1108
 * @maplen: the size of the bitmap in bytes
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_setvcpumap(int handle, int id, unsigned int vcpu,
                  unsigned char * cpumap, int maplen)
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

1123 1124 1125 1126
        if (mlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
            return (-1);
        }
1127
        memset(&op, 0, sizeof(op));
1128 1129
        op.cmd = XEN_V2_OP_SETVCPUMAP;
        op.domain = (domid_t) id;
1130 1131 1132 1133 1134 1135 1136 1137 1138
        if (dom_interface_version < 5) {
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap.bitmap = cpumap;
            op.u.setvcpumap.cpumap.nr_cpus = maplen * 8;
        } else {
            op.u.setvcpumapd5.vcpu = vcpu;
            op.u.setvcpumapd5.cpumap.bitmap.v = cpumap;
            op.u.setvcpumapd5.cpumap.nr_cpus = maplen * 8;
        }
1139
        ret = xenHypervisorDoV2Dom(handle, &op);
1140

1141 1142 1143 1144
        if (munlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " release", maplen);
            ret = -1;
        }
1145
    } else {
1146 1147 1148
        cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
        uint64_t *pm = &xen_cpumap;
        int j;
1149

1150 1151
        if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
            return (-1);
1152

1153 1154 1155
        memset(pm, 0, sizeof(cpumap_t));
        for (j = 0; j < maplen; j++)
            *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
1156 1157

        if (hypervisor_version == 1) {
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV1Op(handle, &op);
        } else if (hypervisor_version == 0) {
            xen_op_v0 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV0Op(handle, &op);
        }
1176 1177 1178
    }
    return(ret);
}
1179 1180
#endif /* !PROXY*/

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
/**
 * virXen_getvcpusinfo:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
 * @maplen: the size of the bitmap in bytes
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
1195
                    unsigned char *cpumap, int maplen)
1196 1197 1198 1199 1200 1201 1202
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1203 1204
        op.cmd = XEN_V2_OP_GETVCPUINFO;
        op.domain = (domid_t) id;
1205 1206 1207 1208
        if (dom_interface_version < 5)
            op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
        else
            op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
1209
        ret = xenHypervisorDoV2Dom(handle, &op);
1210

1211 1212
        if (ret < 0)
            return(-1);
1213
        ipt->number = vcpu;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
        if (dom_interface_version < 5) {
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
        } else {
            if (op.u.getvcpuinfod5.online) {
                if (op.u.getvcpuinfod5.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfod5.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
            ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
1236 1237 1238 1239 1240 1241
        }
        if ((cpumap != NULL) && (maplen > 0)) {
            if (mlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
                return (-1);
            }
1242
            memset(cpumap, 0, maplen);
1243 1244 1245
            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V2_OP_GETVCPUMAP;
            op.domain = (domid_t) id;
1246 1247 1248 1249 1250 1251 1252 1253 1254
            if (dom_interface_version < 5) {
                op.u.getvcpumap.vcpu = vcpu;
                op.u.getvcpumap.cpumap.bitmap = cpumap;
                op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
            } else {
                op.u.getvcpumapd5.vcpu = vcpu;
                op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
                op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
            }
1255 1256 1257 1258 1259 1260
            ret = xenHypervisorDoV2Dom(handle, &op);
            if (munlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " release", maplen);
                ret = -1;
            }
        }
1261
    } else {
1262 1263 1264 1265 1266
        int mapl = maplen;
        int cpu;

        if (maplen > (int)sizeof(cpumap_t))
            mapl = (int)sizeof(cpumap_t);
1267 1268

        if (hypervisor_version == 1) {
1269 1270 1271 1272 1273 1274 1275 1276 1277
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV1Op(handle, &op);
            if (ret < 0)
                return(-1);
1278
            ipt->number = vcpu;
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        } else if (hypervisor_version == 0) {
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV0Op(handle, &op);
            if (ret < 0)
                return(-1);
1302
            ipt->number = vcpu;
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        }
1317 1318 1319
    }
    return(ret);
}
1320

1321 1322 1323 1324 1325 1326
/**
 * xenHypervisorInit:
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
 */
1327
int
D
Daniel P. Berrange 已提交
1328
xenHypervisorInit(void)
1329
{
1330
    int fd, ret, cmd, errcode;
1331
    hypercall_t hc;
1332
    v0_hypercall_t v0_hc;
1333
    xen_getdomaininfo info;
1334
    virVcpuInfoPtr ipt;
1335 1336

    if (initialized) {
1337
        if (hypervisor_version == -1)
1338
            return (-1);
1339
        return(0);
1340 1341
    }
    initialized = 1;
1342
    in_init = 1;
1343

1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
    /* Compile regular expressions used by xenHypervisorGetCapabilities.
     * Note that errors here are really internal errors since these
     * regexps should never fail to compile.
     */
    errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_hvm_rec, error, sizeof error);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_pae_rec, error, sizeof error);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &xen_cap_rec, error, sizeof error);
        regfree (&xen_cap_rec);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }

    /* Xen hypervisor version detection begins. */
1380 1381
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
1382
        hypervisor_version = -1;
1383
        return(-1);
1384 1385 1386
    }
    fd = ret;

1387 1388 1389 1390
    /*
     * The size of the hypervisor call block changed July 2006
     * this detect if we are using the new or old hypercall_t structure
     */
1391 1392 1393 1394 1395 1396 1397 1398
    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1399
#ifdef DEBUG
1400
        fprintf(stderr, "Using new hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1401
#endif
1402 1403 1404
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
        goto detect_v2;
1405
    }
1406

1407 1408 1409 1410 1411 1412 1413 1414
    /*
     * check if the old hypercall are actually working
     */
    v0_hc.op = __HYPERVISOR_xen_version;
    v0_hc.arg[0] = (unsigned long) XENVER_version;
    v0_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
1415
    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1416
#ifdef DEBUG
1417
        fprintf(stderr, "Using old hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1418
#endif
1419 1420
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
1421
        hypervisor_version = 0;
1422
        goto done;
1423 1424
    }

1425 1426 1427 1428 1429
    /*
     * we faild to make any hypercall
     */

    hypervisor_version = -1;
1430 1431
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
1432 1433 1434
    in_init = 0;
    return(-1);

1435
 detect_v2:
1436 1437 1438 1439 1440 1441
    /*
     * The hypercalls were refactored into 3 different section in August 2006
     * Try to detect if we are running a version post 3.0.2 with the new ones
     * or the old ones
     */
    hypervisor_version = 2;
1442 1443 1444 1445

    ipt = malloc(sizeof(virVcpuInfo));
    if (ipt == NULL){
#ifdef DEBUG
1446
        fprintf(stderr, "Memory allocation failed at xenHypervisorInit()\n");
1447 1448 1449 1450
#endif
        return(-1);
    }
    /* Currently consider RHEL5.0 Fedora7 and xen-unstable */
1451
    sys_interface_version = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
1452
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
1453 1454 1455
        /* RHEL 5.0 */
        dom_interface_version = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
D
Daniel Veillard 已提交
1456
#ifdef DEBUG
1457
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver3\n");
D
Daniel Veillard 已提交
1458
#endif
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
            goto done;
        }
        /* Fedora 7 */
        dom_interface_version = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver4\n");
#endif
            goto done;
        }
    }

    sys_interface_version = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        /* xen-unstable */
        dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver3 dom ver5\n");
#endif
            goto done;
        }
1481
    }
1482

1483 1484
    hypervisor_version = 1;
    sys_interface_version = -1;
1485
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
D
Daniel Veillard 已提交
1486
#ifdef DEBUG
1487
        fprintf(stderr, "Using hypervisor call v1\n");
D
Daniel Veillard 已提交
1488
#endif
1489
        goto done;
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
    }

    /*
     * we faild to make the getdomaininfolist hypercall
     */

    hypervisor_version = -1;
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
    in_init = 0;
1500 1501
    return(-1);

1502
 done:
1503
    close(fd);
1504
    in_init = 0;
1505 1506 1507
    return(0);
}

1508 1509
/**
 * xenHypervisorOpen:
1510 1511 1512
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
1513 1514 1515
 *
 * Connects to the Xen hypervisor.
 *
1516
 * Returns 0 or -1 in case of error.
1517
 */
1518
int
1519 1520
xenHypervisorOpen(virConnectPtr conn ATTRIBUTE_UNUSED,
                  const char *name ATTRIBUTE_UNUSED, int flags)
1521
{
1522
    int ret;
1523
    xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
1524

1525
    if (initialized == 0)
1526 1527
        if (xenHypervisorInit() == -1)
            return -1;
1528

1529
    priv->handle = -1;
1530

1531
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
1532
    if (ret < 0) {
1533
        if (!(flags & VIR_DRV_OPEN_QUIET))
1534 1535
            virXenError(VIR_ERR_NO_XEN, XEN_HYPERVISOR_SOCKET, 0);
        return (-1);
1536
    }
1537 1538

    priv->handle = ret;
1539

1540
    return(0);
1541 1542 1543 1544
}

/**
 * xenHypervisorClose:
1545
 * @conn: pointer to the connection block
1546 1547 1548 1549 1550
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
1551
int
1552
xenHypervisorClose(virConnectPtr conn)
1553
{
1554
    int ret;
1555
    xenUnifiedPrivatePtr priv;
1556

1557
    if (conn == NULL)
1558
        return (-1);
1559

1560 1561 1562 1563 1564 1565
    priv = (xenUnifiedPrivatePtr) conn->privateData;

    if (priv->handle < 0)
        return -1;

    ret = close(priv->handle);
1566
    if (ret < 0)
1567
        return (-1);
1568

1569
    return (0);
1570 1571 1572
}


1573
#ifndef PROXY
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
/**
 * xenHypervisorGetType:
 * @conn: pointer to the Xen Hypervisor block
 *
 * Get the version level of the Hypervisor running.
 *
 * Returns -1 in case of error, 0 otherwise. if the version can't be
 *    extracted by lack of capacities returns 0 and @hvVer is 0, otherwise
 *    @hvVer value is major * 1,000,000 + minor * 1,000 + release
 */
static const char *
xenHypervisorGetType(virConnectPtr conn)
{
    if (!VIR_IS_CONNECT(conn)) {
        virXenError(VIR_ERR_INVALID_CONN, __FUNCTION__, 0);
        return (NULL);
    }
    return("Xen");
}
1593
#endif
1594

1595 1596
/**
 * xenHypervisorGetVersion:
1597 1598
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
1599 1600 1601
 *
 * Call the hypervisor to extracts his own internal API version
 *
1602
 * Returns 0 in case of success, -1 in case of error
1603
 */
1604 1605
int
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
1606
{
1607 1608 1609 1610 1611 1612
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || hvVer == NULL)
1613
        return (-1);
1614
    *hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
1615
    return(0);
1616 1617
}

1618 1619 1620
/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
1621 1622
 * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
 * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
1623 1624 1625 1626
 *
 * Return the capabilities of this hypervisor.
 */
char *
1627 1628 1629
xenHypervisorMakeCapabilitiesXML(virConnectPtr conn ATTRIBUTE_UNUSED,
                                 const char *hostmachine,
                                 FILE *cpuinfo, FILE *capabilities)
1630 1631
{
    char line[1024], *str, *token;
1632
    regmatch_t subs[4];
1633
    char *saveptr = NULL;
1634 1635 1636 1637
    int i, r;

    char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
    int host_pae = 0;
1638
    struct guest_arch {
1639 1640 1641 1642
        const char *model;
        int bits;
        int hvm;
        int pae;
1643
        int nonpae;
1644
        int ia64_be;
1645
    } guest_archs[32];
1646 1647 1648 1649 1650
    int nr_guest_archs = 0;

    virBufferPtr xml;
    char *xml_str;

1651 1652
    memset(guest_archs, 0, sizeof(guest_archs));

1653 1654 1655 1656
    /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
     * It's not clear if this will work on IA64, let alone other
     * architectures and non-Linux. (XXX)
     */
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
    if (cpuinfo) {
        while (fgets (line, sizeof line, cpuinfo)) {
            if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
                && subs[0].rm_so != -1) {
                strncpy (hvm_type,
                         &line[subs[1].rm_so], subs[1].rm_eo-subs[1].rm_so+1);
                hvm_type[subs[1].rm_eo-subs[1].rm_so] = '\0';
            } else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
                host_pae = 1;
        }
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
    }

    /* Most of the useful info is in /sys/hypervisor/properties/capabilities
     * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
     *
     * It is a space-separated list of supported guest architectures.
     *
     * For x86:
     *    TYP-VER-ARCH[p]
     *    ^   ^   ^    ^
     *    |   |   |    +-- PAE supported
     *    |   |   +------- x86_32 or x86_64
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     *
     * For PPC this file appears to be always empty (?)
     *
     * For IA64:
     *    TYP-VER-ARCH[be]
     *    ^   ^   ^    ^
     *    |   |   |    +-- Big-endian supported
     *    |   |   +------- always "ia64"
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     */

    /* Expecting one line in this file - ignore any more. */
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
    if (fgets (line, sizeof line, capabilities)) {
        /* Split the line into tokens.  strtok_r is OK here because we "own"
         * this buffer.  Parse out the features from each token.
         */
        for (str = line, nr_guest_archs = 0;
             nr_guest_archs < sizeof guest_archs / sizeof guest_archs[0]
                 && (token = strtok_r (str, " ", &saveptr)) != NULL;
             str = NULL) {

            if (regexec (&xen_cap_rec, token, sizeof subs / sizeof subs[0],
                         subs, 0) == 0) {
                int hvm = strncmp (&token[subs[1].rm_so], "hvm", 3) == 0;
                const char *model;
                int bits, pae = 0, nonpae = 0, ia64_be = 0;
                if (strncmp (&token[subs[2].rm_so], "x86_32", 6) == 0) {
                    model = "i686";
                    bits = 32;
                    if (strncmp (&token[subs[3].rm_so], "p", 1) == 0)
                        pae = 1;
                    else
                        nonpae = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "x86_64", 6) == 0) {
                    model = "x86_64";
                    bits = 64;
                }
                else if (strncmp (&token[subs[2].rm_so], "ia64", 4) == 0) {
                    model = "ia64";
                    bits = 64;
                    if (strncmp (&token[subs[3].rm_so], "be", 2) == 0)
                        ia64_be = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "powerpc64", 4) == 0) {
                    model = "ppc64";
                    bits = 64;
                } else {
                    /* XXX surely no other Xen archs exist  */
                    continue;
                }
1733

1734 1735 1736 1737 1738 1739 1740
                /* Search for existing matching (model,hvm) tuple */
                for (i = 0 ; i < nr_guest_archs ; i++) {
                    if (!strcmp(guest_archs[i].model, model) &&
                        guest_archs[i].hvm == hvm) {
                        break;
                    }
                }
1741

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
                /* Too many arch flavours - highly unlikely ! */
                if (i >= sizeof(guest_archs)/sizeof(guest_archs[0]))
                    continue;
                /* Didn't find a match, so create a new one */
                if (i == nr_guest_archs)
                    nr_guest_archs++;

                guest_archs[i].model = model;
                guest_archs[i].bits = bits;
                guest_archs[i].hvm = hvm;

                /* Careful not to overwrite a previous positive
                   setting with a negative one here - some archs
                   can do both pae & non-pae, but Xen reports
                   separately capabilities so we're merging archs */
                if (pae)
                    guest_archs[i].pae = pae;
                if (nonpae)
                    guest_archs[i].nonpae = nonpae;
                if (ia64_be)
                    guest_archs[i].ia64_be = ia64_be;
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
            }
        }
    }

    /* Construct the final XML. */
    xml = virBufferNew (1024);
    if (!xml) return NULL;
    r = virBufferVSprintf (xml,
                           "\
<capabilities>\n\
  <host>\n\
    <cpu>\n\
      <arch>%s</arch>\n\
      <features>\n",
1777 1778
                           hostmachine);
    if (r == -1) goto vir_buffer_failed;
1779 1780 1781 1782 1783 1784

    if (strcmp (hvm_type, "") != 0) {
        r = virBufferVSprintf (xml,
                               "\
        <%s/>\n",
                               hvm_type);
1785
        if (r == -1) goto vir_buffer_failed;
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
    }
    if (host_pae) {
        r = virBufferAdd (xml, "\
        <pae/>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
      </features>\n\
    </cpu>\n\
  </host>\n", -1);
    if (r == -1) goto vir_buffer_failed;

    for (i = 0; i < nr_guest_archs; ++i) {
        r = virBufferVSprintf (xml,
                               "\
\n\
  <guest>\n\
    <os_type>%s</os_type>\n\
    <arch name=\"%s\">\n\
      <wordsize>%d</wordsize>\n\
1807
      <domain type=\"xen\"></domain>\n",
1808 1809
                               guest_archs[i].hvm ? "hvm" : "xen",
                               guest_archs[i].model,
1810
                               guest_archs[i].bits);
1811 1812
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].hvm) {
1813
            r = virBufferVSprintf (xml,
1814
                              "\
1815
      <emulator>/usr/lib%s/xen/bin/qemu-dm</emulator>\n\
1816 1817
      <machine>pc</machine>\n\
      <machine>isapc</machine>\n\
1818 1819
      <loader>/usr/lib/xen/boot/hvmloader</loader>\n",
                                   guest_archs[i].bits == 64 ? "64" : "");
1820 1821 1822 1823 1824
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </arch>\n\
1825
    <features>\n", -1);
1826 1827 1828 1829
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].pae) {
            r = virBufferAdd (xml,
                              "\
1830 1831 1832 1833 1834 1835
      <pae/>\n", -1);
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].nonpae) {
            r = virBufferAdd (xml,
                              "\
1836
      <nonpae/>\n", -1);
1837 1838 1839 1840 1841
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].ia64_be) {
            r = virBufferAdd (xml,
                              "\
1842
      <ia64_be/>\n", -1);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </features>\n\
  </guest>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
</capabilities>\n", -1);
    if (r == -1) goto vir_buffer_failed;
    xml_str = strdup (xml->content);
    if (!xml_str) {
        virXenError(VIR_ERR_NO_MEMORY, "strdup", 0);
        goto vir_buffer_failed;
    }
    virBufferFree (xml);

    return xml_str;
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909

 vir_buffer_failed:
    virBufferFree (xml);
    return NULL;
}

/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
 *
 * Return the capabilities of this hypervisor.
 */
char *
xenHypervisorGetCapabilities (virConnectPtr conn)
{
    char *xml;
    FILE *cpuinfo, *capabilities;
    struct utsname utsname;

    /* Really, this never fails - look at the man-page. */
    uname (&utsname);

    cpuinfo = fopen ("/proc/cpuinfo", "r");
    if (cpuinfo == NULL) {
        if (errno != ENOENT) {
            virXenPerror (conn, "/proc/cpuinfo");
            return NULL;
        }
    }

    capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
    if (capabilities == NULL) {
        if (errno != ENOENT) {
            fclose(cpuinfo);
            virXenPerror (conn, "/sys/hypervisor/properties/capabilities");
            return NULL;
        }
    }

    xml = xenHypervisorMakeCapabilitiesXML(conn, utsname.machine, cpuinfo, capabilities);

    if (cpuinfo)
        fclose(cpuinfo);
    if (capabilities)
        fclose(capabilities);

    return xml;
1910 1911
}

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
1923
    xen_getdomaininfolist dominfos;
1924 1925 1926
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;
1927
    xenUnifiedPrivatePtr priv;
1928

1929 1930 1931 1932
    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
1933 1934
        return (-1);

1935 1936
 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
1937
        virXenError(VIR_ERR_NO_MEMORY, _("allocating %d domain info"),
1938 1939
                    maxids);
        return(-1);
1940 1941
    }

1942 1943
    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

1944
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
1945

1946
    XEN_GETDOMAININFOLIST_FREE(dominfos);
1947 1948 1949 1950

    if (ret < 0)
        return (-1);

1951
    nbids = ret;
1952 1953 1954 1955 1956
    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
1957
    if (nbids == maxids) {
1958 1959 1960 1961 1962 1963
        if (maxids < 65000) {
            last_maxids *= 2;
            maxids *= 2;
            goto retry;
        }
        nbids = -1;
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
    }
    if ((nbids < 0) || (nbids > maxids))
        return(-1);
    return(nbids);
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
1983
    xen_getdomaininfolist dominfos;
1984
    int ret, nbids, i;
1985 1986 1987 1988
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
1989

1990 1991
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 ||
1992 1993 1994
        (ids == NULL) || (maxids < 1))
        return (-1);

1995
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
1996
        virXenError(VIR_ERR_NO_MEMORY, "allocating %d domain info",
1997 1998
                    maxids);
        return(-1);
1999
    }
2000 2001

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2002 2003
    memset(ids, 0, maxids * sizeof(int));

2004
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2005 2006

    if (ret < 0) {
2007
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2008 2009 2010
        return (-1);
    }

2011
    nbids = ret;
2012
    if ((nbids < 0) || (nbids > maxids)) {
2013
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2014 2015 2016 2017
        return(-1);
    }

    for (i = 0;i < nbids;i++) {
2018
        ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2019 2020
    }

2021
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2022 2023 2024
    return (nbids);
}

2025
/**
2026
 * xenHypervisorGetMaxVcpus:
2027 2028 2029 2030
 *
 * Returns the maximum of CPU defined by Xen.
 */
int
2031 2032
xenHypervisorGetMaxVcpus(virConnectPtr conn,
                         const char *type ATTRIBUTE_UNUSED)
2033
{
2034 2035 2036 2037 2038 2039
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
2040 2041 2042 2043 2044
        return (-1);

    return MAX_VIRT_CPUS;
}

2045
/**
2046 2047 2048
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
2049
 *
2050
 * Retrieve the maximum amount of physical memory allocated to a
2051
 * domain.
2052 2053 2054
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
2055 2056
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
2057
{
2058
    xenUnifiedPrivatePtr priv;
2059
    xen_getdomaininfo dominfo;
2060 2061
    int ret;

2062 2063 2064 2065 2066 2067
    if (conn == NULL)
        return 0;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
        return 0;
2068

2069 2070 2071 2072 2073 2074
    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }

2075
    XEN_GETDOMAININFO_CLEAR(dominfo);
2076

2077
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
2078

2079
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
2080 2081
        return (0);

2082
    return((unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages);
2083 2084
}

2085
#ifndef PROXY
2086 2087 2088
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
2089
 *
2090 2091 2092 2093 2094 2095 2096 2097 2098
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
static unsigned long
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
2099 2100 2101 2102 2103 2104 2105
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return 0;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2106 2107
        return (0);

2108
    return(xenHypervisorGetDomMaxMemory(domain->conn, domain->id));
2109
}
2110
#endif
2111

2112
/**
2113 2114 2115
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
2116
 * @info: the place where information should be stored
2117
 *
2118
 * Do an hypervisor call to get the related set of domain information.
2119 2120 2121 2122
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2123
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
2124
{
2125
    xenUnifiedPrivatePtr priv;
2126
    xen_getdomaininfo dominfo;
2127
    int ret;
2128
    uint32_t domain_flags, domain_state, domain_shutdown_cause;
2129 2130 2131 2132 2133 2134

    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }
2135

2136 2137 2138 2139 2140
    if (conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || info == NULL)
2141
        return (-1);
2142

2143
    memset(info, 0, sizeof(virDomainInfo));
2144
    XEN_GETDOMAININFO_CLEAR(dominfo);
2145

2146
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
2147

2148
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
2149
        return (-1);
2150

2151
    domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
2152 2153
    domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
    domain_state = domain_flags & 0xFF; /* Mask out high bits */
2154
    switch (domain_state) {
2155 2156 2157 2158
	case DOMFLAGS_DYING:
	    info->state = VIR_DOMAIN_SHUTDOWN;
	    break;
	case DOMFLAGS_SHUTDOWN:
2159 2160 2161 2162 2163 2164 2165 2166 2167
            /* The domain is shutdown.  Determine the cause. */
            domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
            switch (domain_shutdown_cause) {
                case SHUTDOWN_crash:
                    info->state = VIR_DOMAIN_CRASHED;
                    break;
                default:
                    info->state = VIR_DOMAIN_SHUTOFF;
            }
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
	    break;
	case DOMFLAGS_PAUSED:
	    info->state = VIR_DOMAIN_PAUSED;
	    break;
	case DOMFLAGS_BLOCKED:
	    info->state = VIR_DOMAIN_BLOCKED;
	    break;
	case DOMFLAGS_RUNNING:
	    info->state = VIR_DOMAIN_RUNNING;
	    break;
	default:
	    info->state = VIR_DOMAIN_NONE;
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
2187
    info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
2188
    info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
2189 2190 2191
    info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
    if(info->maxMem != UINT_MAX)
        info->maxMem *= kb_per_pages;
2192
    info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
2193
    return (0);
2194 2195
}

2196 2197 2198
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
2199
 * @info: the place where information should be stored
2200
 *
2201
 * Do an hypervisor call to get the related set of domain information.
2202 2203 2204 2205 2206 2207
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
2208 2209 2210 2211 2212 2213 2214
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || info == NULL ||
2215
        (domain->id < 0))
2216
        return (-1);
2217

2218
    return(xenHypervisorGetDomInfo(domain->conn, domain->id, info));
2219 2220 2221

}

2222
#ifndef PROXY
2223 2224
/**
 * xenHypervisorPauseDomain:
2225
 * @domain: pointer to the domain block
2226 2227 2228 2229 2230 2231
 *
 * Do an hypervisor call to pause the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2232
xenHypervisorPauseDomain(virDomainPtr domain)
2233
{
2234
    int ret;
2235
    xenUnifiedPrivatePtr priv;
2236

2237 2238 2239 2240 2241
    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2242 2243
        return (-1);

2244
    ret = virXen_pausedomain(priv->handle, domain->id);
2245
    if (ret < 0)
2246 2247
        return (-1);
    return (0);
2248 2249 2250 2251
}

/**
 * xenHypervisorResumeDomain:
2252
 * @domain: pointer to the domain block
2253 2254 2255 2256 2257 2258
 *
 * Do an hypervisor call to resume the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2259
xenHypervisorResumeDomain(virDomainPtr domain)
2260
{
2261
    int ret;
2262 2263 2264 2265
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;
2266

2267 2268
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2269 2270
        return (-1);

2271
    ret = virXen_unpausedomain(priv->handle, domain->id);
2272
    if (ret < 0)
2273 2274
        return (-1);
    return (0);
2275 2276 2277 2278
}

/**
 * xenHypervisorDestroyDomain:
2279
 * @domain: pointer to the domain block
2280 2281 2282 2283 2284 2285
 *
 * Do an hypervisor call to destroy the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2286
xenHypervisorDestroyDomain(virDomainPtr domain)
2287
{
2288
    int ret;
2289 2290 2291 2292
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2293

2294 2295
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2296 2297
        return (-1);

2298
    ret = virXen_destroydomain(priv->handle, domain->id);
2299
    if (ret < 0)
2300 2301
        return (-1);
    return (0);
2302 2303
}

2304 2305
/**
 * xenHypervisorSetMaxMemory:
2306
 * @domain: pointer to the domain block
2307 2308 2309 2310 2311 2312 2313
 * @memory: the max memory size in kilobytes.
 *
 * Do an hypervisor call to change the maximum amount of memory used
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2314
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
2315
{
2316
    int ret;
2317
    xenUnifiedPrivatePtr priv;
2318

2319 2320 2321 2322 2323
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2324 2325
        return (-1);

2326
    ret = virXen_setmaxmem(priv->handle, domain->id, memory);
2327
    if (ret < 0)
2328 2329
        return (-1);
    return (0);
2330
}
2331
#endif /* PROXY */
2332

2333
#ifndef PROXY
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
2347
    int ret;
2348 2349 2350 2351
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2352

2353 2354
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
2355
        return (-1);
2356

2357
    ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
2358
    if (ret < 0)
2359
        return (-1);
2360
    return (0);
2361 2362 2363 2364 2365 2366 2367 2368
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
2369
 *
2370 2371 2372 2373 2374 2375 2376 2377 2378
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
2379
    int ret;
2380
    xenUnifiedPrivatePtr priv;
2381

2382 2383 2384 2385 2386
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2387
        (cpumap == NULL) || (maplen < 1))
2388 2389
        return (-1);

2390
    ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
2391 2392
                            cpumap, maplen);
    if (ret < 0)
2393
        return (-1);
2394
    return (0);
2395
}
2396
#endif
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
 * @cpumaps: pointer to an bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
 *	If cpumaps is NULL, then no cupmap information is returned by the API.
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
2411
 *
2412 2413 2414 2415 2416
 * Extract information about virtual CPUs of domain, store it in info array
 * and also in cpumaps if this pointer is'nt NULL.
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
2417
#ifndef PROXY
2418 2419
int
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
2420
                      unsigned char *cpumaps, int maplen)
2421
{
2422
    xen_getdomaininfo dominfo;
2423
    int ret;
2424
    xenUnifiedPrivatePtr priv;
2425
    virVcpuInfoPtr ipt;
2426
    int nbinfo, i;
2427

2428 2429 2430 2431 2432
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2433 2434
        (info == NULL) || (maxinfo < 1) ||
        (sizeof(cpumap_t) & 7))
2435
        return (-1);
2436
    if ((cpumaps != NULL) && (maplen < 1))
2437
        return -1;
2438 2439

    /* first get the number of virtual CPUs in this domain */
2440
    XEN_GETDOMAININFO_CLEAR(dominfo);
2441
    ret = virXen_getdomaininfo(priv->handle, domain->id,
2442
                               &dominfo);
2443

2444
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
2445
        return (-1);
2446
    nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
2447 2448 2449
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
2450
        memset(cpumaps, 0, maxinfo * maplen);
2451

2452 2453
    for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
        if ((cpumaps != NULL) && (i < maxinfo)) {
2454
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2455 2456 2457 2458 2459 2460
                                      ipt,
                                      (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
                                      maplen);
            if (ret < 0)
                return(-1);
        } else {
2461
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2462 2463 2464 2465
                                      ipt, NULL, 0);
            if (ret < 0)
                return(-1);
        }
2466 2467 2468
    }
    return nbinfo;
}
2469 2470
#endif

2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
/**
 * xenHypervisorGetVcpuMax:
 *
 *  Returns the maximum number of virtual CPUs supported for
 *  the guest VM. If the guest is inactive, this is the maximum
 *  of CPU defined by Xen. If the guest is running this reflect
 *  the maximum number of virtual CPUs the guest was booted with.
 */
int
xenHypervisorGetVcpuMax(virDomainPtr domain)
{
    xen_getdomaininfo dominfo;
    int ret;
    int maxcpu;
2485 2486 2487 2488
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2489

2490 2491
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0)
2492 2493 2494 2495 2496 2497 2498
        return (-1);

    /* inactive domain */
    if (domain->id < 0) {
        maxcpu = MAX_VIRT_CPUS;
    } else {
        XEN_GETDOMAININFO_CLEAR(dominfo);
2499
        ret = virXen_getdomaininfo(priv->handle, domain->id,
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
                                   &dominfo);

        if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
            return (-1);
        maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
    }

    return maxcpu;
}

2510
#endif /* WITH_XEN */
2511 2512 2513 2514 2515
/*
 * vim: set tabstop=4:
 * vim: set shiftwidth=4:
 * vim: set expandtab:
 */
2516 2517 2518 2519 2520 2521 2522 2523
/*
 * Local variables:
 *  indent-tabs-mode: nil
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 * End:
 */