xen_internal.c 83.7 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
D
Daniel Veillard 已提交
4
 * Copyright (C) 2005, 2006 Red Hat, Inc.
5 6 7 8 9 10
 *
 * See COPYING.LIB for the License of this software
 *
 * Daniel Veillard <veillard@redhat.com>
 */

11 12
#ifdef WITH_XEN

13 14
#include "config.h"

15 16
#include <stdio.h>
#include <string.h>
17
/* required for uint8_t, uint32_t, etc ... */
18 19 20 21 22 23 24
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
25
#include <limits.h>
26
#include <stdint.h>
27 28 29
#include <regex.h>
#include <errno.h>
#include <sys/utsname.h>
30 31

/* required for dom0_getdomaininfo_t */
32
#include <xen/dom0_ops.h>
33
#include <xen/version.h>
34
#include <xen/xen.h>
35
#ifdef HAVE_XEN_LINUX_PRIVCMD_H
36
#include <xen/linux/privcmd.h>
37 38 39 40 41
#else
#ifdef HAVE_XEN_SYS_PRIVCMD_H
#include <xen/sys/privcmd.h>
#endif
#endif
42

43 44 45
/* required for shutdown flags */
#include <xen/sched.h>

46
#include "buf.h"
47

48 49
/* #define DEBUG */
/*
50
 * so far there is 2 versions of the structures usable for doing
51 52 53 54
 * hypervisor calls.
 */
/* the old one */
typedef struct v0_hypercall_struct {
55 56
    unsigned long op;
    unsigned long arg[5];
57 58 59 60 61 62 63 64 65 66
} v0_hypercall_t;
#define XEN_V0_IOCTL_HYPERCALL_CMD \
        _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))

/* the new one */
typedef struct v1_hypercall_struct
{
    uint64_t op;
    uint64_t arg[5];
} v1_hypercall_t;
67 68
#define XEN_V1_IOCTL_HYPERCALL_CMD                  \
    _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
69

70 71 72 73 74 75 76 77
typedef v1_hypercall_t hypercall_t;

#ifndef __HYPERVISOR_sysctl
#define __HYPERVISOR_sysctl 35
#endif
#ifndef __HYPERVISOR_domctl
#define __HYPERVISOR_domctl 36
#endif
78

79 80
static int xen_ioctl_hypercall_cmd = 0;
static int initialized = 0;
81
static int in_init = 0;
82
static int hv_version = 0;
83 84 85
static int hypervisor_version = 2;
static int sys_interface_version = -1;
static int dom_interface_version = -1;
86
static int kb_per_pages = 0;
87

88 89 90 91 92 93 94 95
/* Regular expressions used by xenHypervisorGetCapabilities, and
 * compiled once by xenHypervisorInit.  Note that these are POSIX.2
 * extended regular expressions (regex(7)).
 */
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
static regex_t flags_hvm_rec;
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
static regex_t flags_pae_rec;
96
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
97 98
static regex_t xen_cap_rec;

99 100 101 102 103
/*
 * The content of the structures for a getdomaininfolist system hypercall
 */
#ifndef DOMFLAGS_DYING
#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
104
#define DOMFLAGS_HVM       (1<<1) /* Domain is HVM                           */
105 106 107 108 109 110 111 112 113 114
#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
#define DOMFLAGS_CPUSHIFT       8
#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
#define DOMFLAGS_SHUTDOWNSHIFT 16
#endif

115 116 117 118 119 120 121 122 123 124 125
/*
 * These flags explain why a system is in the state of "shutdown".  Normally,
 * They are defined in xen/sched.h
 */
#ifndef SHUTDOWN_poweroff
#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
#endif

126 127 128 129 130 131 132 133 134
#define XEN_V0_OP_GETDOMAININFOLIST	38
#define XEN_V1_OP_GETDOMAININFOLIST	38
#define XEN_V2_OP_GETDOMAININFOLIST	6

struct xen_v0_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
135
    unsigned long shared_info_frame; /* MFN of shared_info struct */
136 137 138 139 140 141 142 143
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;

144 145 146 147 148 149 150 151 152 153 154 155 156 157
struct xen_v2_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
    uint64_t shared_info_frame; /* MFN of shared_info struct */
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

/* As of Hypervisor Call v2,  DomCtl v5 we are now 8-byte aligned
   even on 32-bit archs when dealing with uint64_t */
#define ALIGN_64 __attribute__((aligned(8)))

struct xen_v2d5_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* falgs, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;

177 178 179
union xen_getdomaininfo {
    struct xen_v0_getdomaininfo v0;
    struct xen_v2_getdomaininfo v2;
180
    struct xen_v2d5_getdomaininfo v2d5;
181 182 183 184 185 186
};
typedef union xen_getdomaininfo xen_getdomaininfo;

union xen_getdomaininfolist {
    struct xen_v0_getdomaininfo *v0;
    struct xen_v2_getdomaininfo *v2;
187
    struct xen_v2d5_getdomaininfo *v2d5;
188 189 190
};
typedef union xen_getdomaininfolist xen_getdomaininfolist;

191 192 193 194 195 196 197 198 199 200 201 202 203

struct xen_v2_getschedulerid {
    uint32_t sched_id; /* Get Scheduler ID from Xen */
};
typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;


union xen_getschedulerid {
    struct xen_v2_getschedulerid *v2;
};
typedef union xen_getschedulerid xen_getschedulerid;


204 205 206
#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size)                      \
    (hypervisor_version < 2 ?                                           \
     ((domlist.v0 = malloc(sizeof(xen_v0_getdomaininfo)*(size))) != NULL) : \
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
     (dom_interface_version < 5 ?                                       \
      ((domlist.v2 = malloc(sizeof(xen_v2_getdomaininfo)*(size))) != NULL) : \
      ((domlist.v2d5 = malloc(sizeof(xen_v2d5_getdomaininfo)*(size))) != NULL)))

#define XEN_GETDOMAININFOLIST_FREE(domlist)        \
    (hypervisor_version < 2 ?                      \
     free(domlist.v0) :                            \
     (dom_interface_version < 5 ?                  \
      free(domlist.v2) :                           \
      free(domlist.v2d5)))

#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size)                     \
    (hypervisor_version < 2 ?                                          \
     memset(domlist.v0, 0, sizeof(xen_v0_getdomaininfo) * size) :      \
     (dom_interface_version < 5 ?                                      \
      memset(domlist.v2, 0, sizeof(xen_v2_getdomaininfo) * size) :     \
      memset(domlist.v2d5, 0, sizeof(xen_v2d5_getdomaininfo) * size)))
224 225 226 227

#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n)    \
    (hypervisor_version < 2 ?                       \
     domlist.v0[n].domain :                         \
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
     (dom_interface_version < 5 ?                   \
      domlist.v2[n].domain :                        \
      domlist.v2d5[n].domain))

#define XEN_GETDOMAININFOLIST_DATA(domlist)        \
    (hypervisor_version < 2 ?                      \
     (void*)(domlist->v0) :                        \
     (dom_interface_version < 5 ?                  \
      (void*)(domlist->v2) :                       \
      (void*)(domlist->v2d5)))

#define XEN_GETDOMAININFO_SIZE                     \
    (hypervisor_version < 2 ?                      \
     sizeof(xen_v0_getdomaininfo) :                \
     (dom_interface_version < 5 ?                  \
      sizeof(xen_v2_getdomaininfo) :               \
      sizeof(xen_v2d5_getdomaininfo)))

#define XEN_GETDOMAININFO_CLEAR(dominfo)                           \
    (hypervisor_version < 2 ?                                      \
     memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) :      \
     (dom_interface_version < 5 ?                                  \
      memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)) :     \
      memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo))))
252 253 254 255

#define XEN_GETDOMAININFO_DOMAIN(dominfo)       \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.domain :                        \
256 257 258
     (dom_interface_version < 5 ?               \
      dominfo.v2.domain :                       \
      dominfo.v2d5.domain))
259 260 261 262

#define XEN_GETDOMAININFO_CPUTIME(dominfo)      \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.cpu_time :                      \
263 264 265
     (dom_interface_version < 5 ?               \
      dominfo.v2.cpu_time :                     \
      dominfo.v2d5.cpu_time))
266 267 268 269

#define XEN_GETDOMAININFO_CPUCOUNT(dominfo)     \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.nr_online_vcpus :               \
270 271 272
     (dom_interface_version < 5 ?               \
      dominfo.v2.nr_online_vcpus :              \
      dominfo.v2d5.nr_online_vcpus))
273

274 275 276
#define XEN_GETDOMAININFO_MAXCPUID(dominfo)  \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_vcpu_id :                   \
277 278 279
     (dom_interface_version < 5 ?               \
      dominfo.v2.max_vcpu_id :                  \
      dominfo.v2d5.max_vcpu_id))
280

281 282 283
#define XEN_GETDOMAININFO_FLAGS(dominfo)        \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.flags :                         \
284 285 286
     (dom_interface_version < 5 ?               \
      dominfo.v2.flags :                        \
      dominfo.v2d5.flags))
287 288 289 290

#define XEN_GETDOMAININFO_TOT_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.tot_pages :                     \
291 292 293
     (dom_interface_version < 5 ?               \
      dominfo.v2.tot_pages :                    \
      dominfo.v2d5.tot_pages))
294 295 296 297

#define XEN_GETDOMAININFO_MAX_PAGES(dominfo)    \
    (hypervisor_version < 2 ?                   \
     dominfo.v0.max_pages :                     \
298 299 300
     (dom_interface_version < 5 ?               \
      dominfo.v2.max_pages :                    \
      dominfo.v2d5.max_pages))
301 302 303 304



struct xen_v0_getdomaininfolistop {
305 306 307 308 309
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v0_getdomaininfo *buffer;
    uint32_t  num_domains;
};
310 311 312 313 314 315 316 317 318 319 320
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;


struct xen_v2_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v2_getdomaininfo *buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;

321 322 323 324
/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
struct xen_v2s3_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
325 326 327 328 329 330
#ifdef __BIG_ENDIAN__
    struct {
        int __pad[(sizeof (long long) - sizeof (struct xen_v2d5_getdomaininfo *)) / sizeof (int)];
        struct xen_v2d5_getdomaininfo *v;
    } buffer;
#else
331 332 333 334
    union {
        struct xen_v2d5_getdomaininfo *v;
        uint64_t pad ALIGN_64;
    } buffer;
335
#endif
336 337 338 339
    uint32_t  num_domains;
};
typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;

340

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372

struct xen_v0_domainop {
    domid_t   domain;
};
typedef struct xen_v0_domainop xen_v0_domainop;

/*
 * The informations for a destroydomain system hypercall
 */
#define XEN_V0_OP_DESTROYDOMAIN	9
#define XEN_V1_OP_DESTROYDOMAIN	9
#define XEN_V2_OP_DESTROYDOMAIN	2

/*
 * The informations for a pausedomain system hypercall
 */
#define XEN_V0_OP_PAUSEDOMAIN	10
#define XEN_V1_OP_PAUSEDOMAIN	10
#define XEN_V2_OP_PAUSEDOMAIN	3

/*
 * The informations for an unpausedomain system hypercall
 */
#define XEN_V0_OP_UNPAUSEDOMAIN	11
#define XEN_V1_OP_UNPAUSEDOMAIN	11
#define XEN_V2_OP_UNPAUSEDOMAIN	4

/*
 * The informations for an setmaxmem system hypercall
 */
#define XEN_V0_OP_SETMAXMEM	28
#define XEN_V1_OP_SETMAXMEM	28
373
#define XEN_V2_OP_SETMAXMEM	11
374 375 376 377 378 379 380 381 382 383 384 385 386

struct xen_v0_setmaxmem {
    domid_t	domain;
    uint64_t	maxmem;
};
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;

struct xen_v2_setmaxmem {
    uint64_t	maxmem;
};
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;

387 388 389 390 391
struct xen_v2d5_setmaxmem {
    uint64_t	maxmem ALIGN_64;
};
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
/*
 * The informations for an setmaxvcpu system hypercall
 */
#define XEN_V0_OP_SETMAXVCPU	41
#define XEN_V1_OP_SETMAXVCPU	41
#define XEN_V2_OP_SETMAXVCPU	15

struct xen_v0_setmaxvcpu {
    domid_t	domain;
    uint32_t	maxvcpu;
};
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;

struct xen_v2_setmaxvcpu {
    uint32_t	maxvcpu;
};
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;

/*
 * The informations for an setvcpumap system hypercall
 * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
 * hence the difference in structures
 */
#define XEN_V0_OP_SETVCPUMAP	20
#define XEN_V1_OP_SETVCPUMAP	20
#define XEN_V2_OP_SETVCPUMAP	9

struct xen_v0_setvcpumap {
    domid_t	domain;
    uint32_t	vcpu;
    cpumap_t    cpumap;
};
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;

struct xen_v2_cpumap {
    uint8_t    *bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2_cpumap cpumap;
};
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;

438 439
/* HV version 2, Dom version 5 requires 64-bit alignment */
struct xen_v2d5_cpumap {
440 441 442 443 444 445
#ifdef __BIG_ENDIAN__
    struct {
        int __pad[(sizeof (long long) - sizeof (uint8_t *)) / sizeof (int)];
        uint8_t *v;
    } bitmap;
#else
446 447 448 449
    union {
        uint8_t    *v;
        uint64_t   pad ALIGN_64;
    } bitmap;
450
#endif
451 452 453 454 455 456 457 458
    uint32_t    nr_cpus;
};
struct xen_v2d5_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2d5_cpumap cpumap;
};
typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
/*
 * The informations for an vcpuinfo system hypercall
 */
#define XEN_V0_OP_GETVCPUINFO   43
#define XEN_V1_OP_GETVCPUINFO	43
#define XEN_V2_OP_GETVCPUINFO   14

struct xen_v0_vcpuinfo {
    domid_t	domain;		/* owner's domain */
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
    cpumap_t	cpumap;		/* deprecated in V2 */
};
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;

struct xen_v2_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;

489 490 491 492 493 494 495 496 497 498
struct xen_v2d5_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time ALIGN_64; /* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;

499 500 501 502 503
/*
 * from V2 the pinning of a vcpu is read with a separate call
 */
#define XEN_V2_OP_GETVCPUMAP	25
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
504
typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
/*
 * from V2 we get the scheduler information
 */
#define XEN_V2_OP_GETSCHEDULERID	4

/*
 * from V2 we get the scheduler parameter
 */
#define XEN_V2_OP_SCHEDULER		16
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF       4
#define XEN_SCHEDULER_CREDIT     5
/* get/set scheduler parameters */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1

struct xen_v2_setschedinfo {
    uint32_t sched_id;
    uint32_t cmd;
    union {
        struct xen_domctl_sched_sedf {
            uint64_t period ALIGN_64;
            uint64_t slice  ALIGN_64;
            uint64_t latency ALIGN_64;
            uint32_t extratime;
            uint32_t weight;
        } sedf;
        struct xen_domctl_sched_credit {
            uint16_t weight;
            uint16_t cap;
        } credit;
    } u;
};
typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;


543 544 545 546 547 548 549 550 551
/*
 * The hypercall operation structures also have changed on
 * changeset 86d26e6ec89b
 */
/* the old structure */
struct xen_op_v0 {
    uint32_t cmd;
    uint32_t interface_version;
    union {
552 553 554 555 556 557 558
        xen_v0_getdomaininfolistop getdomaininfolist;
        xen_v0_domainop          domain;
        xen_v0_setmaxmem         setmaxmem;
        xen_v0_setmaxvcpu        setmaxvcpu;
        xen_v0_setvcpumap        setvcpumap;
        xen_v0_vcpuinfo          getvcpuinfo;
        uint8_t padding[128];
559 560 561 562 563 564 565 566 567 568
    } u;
};
typedef struct xen_op_v0 xen_op_v0;
typedef struct xen_op_v0 xen_op_v1;

/* the new structure for systems operations */
struct xen_op_v2_sys {
    uint32_t cmd;
    uint32_t interface_version;
    union {
569 570
        xen_v2_getdomaininfolistop   getdomaininfolist;
        xen_v2s3_getdomaininfolistop getdomaininfolists3;
571
        xen_v2_getschedulerid        getschedulerid;
572
        uint8_t padding[128];
573 574 575 576 577 578 579 580 581 582
    } u;
};
typedef struct xen_op_v2_sys xen_op_v2_sys;

/* the new structure for domains operation */
struct xen_op_v2_dom {
    uint32_t cmd;
    uint32_t interface_version;
    domid_t  domain;
    union {
583
        xen_v2_setmaxmem         setmaxmem;
584
        xen_v2d5_setmaxmem       setmaxmemd5;
585 586
        xen_v2_setmaxvcpu        setmaxvcpu;
        xen_v2_setvcpumap        setvcpumap;
587
        xen_v2d5_setvcpumap      setvcpumapd5;
588
        xen_v2_vcpuinfo          getvcpuinfo;
589
        xen_v2d5_vcpuinfo        getvcpuinfod5;
590
        xen_v2_getvcpumap        getvcpumap;
591
        xen_v2d5_getvcpumap      getvcpumapd5;
592 593
        xen_v2_setschedinfo      setschedinfo;
        xen_v2_getschedinfo      getschedinfo;
594
        uint8_t padding[128];
595 596 597
    } u;
};
typedef struct xen_op_v2_dom xen_op_v2_dom;
598 599

#include "internal.h"
600
#include "driver.h"
601
#include "xen_unified.h"
602 603 604 605
#include "xen_internal.h"

#define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"

606
#ifndef PROXY
607
static const char * xenHypervisorGetType(virConnectPtr conn);
608
static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
609
#endif
610

611
#ifndef PROXY
612
struct xenUnifiedDriver xenHypervisorDriver = {
613 614
    xenHypervisorOpen, /* open */
    xenHypervisorClose, /* close */
615
    xenHypervisorGetType, /* type */
616
    xenHypervisorGetVersion, /* version */
617 618
    NULL, /* hostname */
    NULL, /* URI */
619
    NULL, /* nodeGetInfo */
620
    xenHypervisorGetCapabilities, /* getCapabilities */
621 622
    xenHypervisorListDomains, /* listDomains */
    xenHypervisorNumOfDomains, /* numOfDomains */
623 624 625 626
    NULL, /* domainCreateLinux */
    xenHypervisorPauseDomain, /* domainSuspend */
    xenHypervisorResumeDomain, /* domainResume */
    NULL, /* domainShutdown */
627
    NULL, /* domainReboot */
628 629
    xenHypervisorDestroyDomain, /* domainDestroy */
    NULL, /* domainGetOSType */
630
    xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
631
    xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
632
    NULL, /* domainSetMemory */
633 634
    xenHypervisorGetDomainInfo, /* domainGetInfo */
    NULL, /* domainSave */
635
    NULL, /* domainRestore */
D
Daniel Veillard 已提交
636
    NULL, /* domainCoreDump */
637 638
    xenHypervisorSetVcpus, /* domainSetVcpus */
    xenHypervisorPinVcpu, /* domainPinVcpu */
639
    xenHypervisorGetVcpus, /* domainGetVcpus */
640
    xenHypervisorGetVcpuMax, /* domainGetMaxVcpus */
641
    NULL, /* domainDumpXML */
642 643 644 645 646
    NULL, /* listDefinedDomains */
    NULL, /* numOfDefinedDomains */
    NULL, /* domainCreate */
    NULL, /* domainDefineXML */
    NULL, /* domainUndefine */
647 648
    NULL, /* domainAttachDevice */
    NULL, /* domainDetachDevice */
649 650
    NULL, /* domainGetAutostart */
    NULL, /* domainSetAutostart */
651 652 653
    xenHypervisorGetSchedulerType, /* domainGetSchedulerType */
    xenHypervisorGetSchedulerParameters, /* domainGetSchedulerParameters */
    xenHypervisorSetSchedulerParameters, /* domainSetSchedulerParameters */
654
};
655
#endif /* !PROXY */
656

657 658 659 660
/**
 * virXenError:
 * @error: the error number
 * @info: extra information string
661
 * @value: extra information number
662 663 664 665
 *
 * Handle an error at the xend daemon interface
 */
static void
666 667
virXenError(virErrorNumber error, const char *info, int value)
{
668
    const char *errmsg;
669

670
    if ((error == VIR_ERR_OK) || (in_init != 0))
671 672 673
        return;

    errmsg = __virErrorMsg(error, info);
674
    __virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
675
                    errmsg, info, NULL, value, 0, errmsg, info, value);
676 677
}

678 679
#ifndef PROXY

680 681 682 683 684 685 686 687 688 689 690 691 692
/**
 * virXenErrorFunc:
 * @error: the error number
 * @func: the function failing
 * @info: extra information string
 * @value: extra information number
 *
 * Handle an error at the xend daemon interface
 */
static void
virXenErrorFunc(virErrorNumber error, const char *func, const char *info,
                int value)
{
D
Daniel Veillard 已提交
693
    char fullinfo[1000];
694 695 696 697 698 699 700 701 702 703 704
    const char *errmsg;

    if ((error == VIR_ERR_OK) || (in_init != 0))
        return;


    errmsg = __virErrorMsg(error, info);
    if (func != NULL) {
        snprintf(fullinfo, 999, "%s: %s", func, info);
	fullinfo[999] = 0;
	__virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
705 706
			errmsg, fullinfo, NULL, value, 0, errmsg, fullinfo,
			value);
707 708
    } else {
	__virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
709 710
			errmsg, info, NULL, value, 0, errmsg, info,
			value);
711 712 713
    }
}

714 715
#endif /* PROXY */

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
/**
 * virXenPerror:
 * @conn: the connection (if available)
 * @msg: name of system call or file (as in perror(3))
 *
 * Raise error from a failed system call, using errno as the source.
 */
static void
virXenPerror (virConnectPtr conn, const char *msg)
{
    char *msg_s;

    msg_s = malloc (strlen (msg) + 10);
    if (msg_s) {
        strcpy (msg_s, msg);
        strcat (msg_s, ": %s");
    }

    __virRaiseError (conn, NULL, NULL,
                     VIR_FROM_XEN, VIR_ERR_SYSTEM_ERROR, VIR_ERR_ERROR,
                     msg, NULL, NULL, errno, 0,
                     msg_s ? msg_s : msg, strerror (errno));
738 739
}

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
/**
 * xenHypervisorDoV0Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor operation though the old interface,
 * this leads to an hypervisor call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
{
    int ret;
    v0_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = hv_version << 8;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}
/**
 * xenHypervisorDoV1Op:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hyperviros operation structure
 *
 * Do an hypervisor v1 operation, this leads to an hypervisor call through
 * ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Sys:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor operation structure
 *
 * Do an hypervisor v2 stsyem operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = sys_interface_version;
    hc.op = __HYPERVISOR_sysctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
851
        virXenError(VIR_ERR_XEN_CALL, " sys ioctl ", xen_ioctl_hypercall_cmd);
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * xenHypervisorDoV2Dom:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor domain operation structure
 *
 * Do an hypervisor v2 domain operation, this leads to an hypervisor
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = dom_interface_version;
    hc.op = __HYPERVISOR_domctl;
    hc.arg[0] = (unsigned long) op;

    if (mlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " locking", sizeof(*op));
        return (-1);
    }

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
        virXenError(VIR_ERR_XEN_CALL, " ioctl ", xen_ioctl_hypercall_cmd);
    }

    if (munlock(op, sizeof(dom0_op_t)) < 0) {
        virXenError(VIR_ERR_XEN_CALL, " releasing", sizeof(*op));
        ret = -1;
    }

    if (ret < 0)
        return (-1);

    return (0);
}

/**
 * virXen_getdomaininfolist:
 * @handle: the hypervisor handle
 * @first_domain: first domain in the range
 * @maxids: maximum number of domains to list
 * @dominfos: output structures
 *
 * Do a low level hypercall to list existing domains informations
 *
 * Returns the number of domains or -1 in case of failure
 */
static int
virXen_getdomaininfolist(int handle, int first_domain, int maxids,
920
                         xen_getdomaininfolist *dominfos)
921 922 923
{
    int ret = -1;

924 925
    if (mlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
              XEN_GETDOMAININFO_SIZE * maxids) < 0) {
926
        virXenError(VIR_ERR_XEN_CALL, " locking",
927
                    XEN_GETDOMAININFO_SIZE * maxids);
928 929 930 931 932 933
        return (-1);
    }
    if (hypervisor_version > 1) {
        xen_op_v2_sys op;

        memset(&op, 0, sizeof(op));
934
        op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
935 936 937 938 939 940 941 942 943 944 945 946

        if (sys_interface_version < 3) {
            op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolist.max_domains = maxids;
            op.u.getdomaininfolist.buffer = dominfos->v2;
            op.u.getdomaininfolist.num_domains = maxids;
        } else {
            op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolists3.max_domains = maxids;
            op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
            op.u.getdomaininfolists3.num_domains = maxids;
        }
947
        ret = xenHypervisorDoV2Sys(handle, &op);
948 949 950 951 952 953 954

        if (ret == 0) {
            if (sys_interface_version < 3)
                ret = op.u.getdomaininfolist.num_domains;
            else
                ret = op.u.getdomaininfolists3.num_domains;
        }
955 956 957 958
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
959 960 961 962 963 964 965 966
        op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV1Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
967 968 969 970
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
971 972 973 974 975 976 977 978
        op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV0Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
979
    }
980 981
    if (munlock(XEN_GETDOMAININFOLIST_DATA(dominfos),
                XEN_GETDOMAININFO_SIZE * maxids) < 0) {
982
        virXenError(VIR_ERR_XEN_CALL, " release",
983
                    XEN_GETDOMAININFO_SIZE * maxids);
984 985 986 987 988
        ret = -1;
    }
    return(ret);
}

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
static int
virXen_getdomaininfo(int handle, int first_domain,
                     xen_getdomaininfo *dominfo) {
    xen_getdomaininfolist dominfos;

    if (hypervisor_version < 2) {
        dominfos.v0 = &(dominfo->v0);
    } else {
        dominfos.v2 = &(dominfo->v2);
    }

    return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
}


1004
#ifndef PROXY
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
/**
 * xenHypervisorGetSchedulerType:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler parameters.
 *
 * Do a low level hypercall to get scheduler type
 *
 * Returns scheduler name or NULL in case of failure
 */
char *
xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
{
    char *schedulertype = NULL;
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL)) {
        virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
			"domain or conn is NULL", 0);
        return NULL;
    }

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0) {
        virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
			"priv->handle or domain->id invalid", 0);
        return NULL;
    }

    /*
     * Support only dom_interface_version >=5
     * (Xen3.1.0 or later)
1036
     * TODO: check on Xen 3.0.3
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
     */
    if (dom_interface_version < 5) {
        virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
		        "unsupported in dom interface < 5", 0);
        return NULL;
    }

    if (hypervisor_version > 1) {
        xen_op_v2_sys op;
        int ret;

        memset(&op, 0, sizeof(op));
        op.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op);
        if (ret < 0)
	    return(NULL);

        switch (op.u.getschedulerid.sched_id){
	    case XEN_SCHEDULER_SEDF:
		schedulertype = strdup("sedf");
		if (nparams)
		    *nparams = 6;
		break;
	    case XEN_SCHEDULER_CREDIT:
		schedulertype = strdup("credit");
		if (nparams)
		    *nparams = 2;
		break;
	    default:
		break;
        }
    }

    return schedulertype;
}

1073 1074 1075
static const char *str_weight = "weight";
static const char *str_cap = "cap";

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/**
 * xenHypervisorGetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @params: pointer to scheduler parameters.
 *     This memory area should be allocated before calling.
 * @nparams:this parameter should be same as
 *     a given number of scheduler parameters.
 *     from xenHypervisorGetSchedulerType().
 *
 * Do a low level hypercall to get scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorGetSchedulerParameters(virDomainPtr domain,
				 virSchedParameterPtr params, int *nparams)
{
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL)) {
        virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
			"domain or conn is NULL", 0);
        return -1;
    }

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0) {
        virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
			"priv->handle or domain->id invalid", 0);
        return -1;
    }

    /*
     * Support only dom_interface_version >=5
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
    if (dom_interface_version < 5) {
        virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
			"unsupported in dom interface < 5", 0);
        return -1;
    }

    if (hypervisor_version > 1) {
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret < 0)
	    return -1;

        switch (op_sys.u.getschedulerid.sched_id){
	    case XEN_SCHEDULER_SEDF:
		/* TODO: Implement for Xen/SEDF */
		TODO
		return(-1);
	    case XEN_SCHEDULER_CREDIT:
		if (*nparams < 2)
		    return(-1);
		memset(&op_dom, 0, sizeof(op_dom));
		op_dom.cmd = XEN_V2_OP_SCHEDULER;
		op_dom.domain = (domid_t) domain->id;
		op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
		op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
		ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
		if (ret < 0)
		    return(-1);

1147 1148
		strncpy (params[0].field, str_weight, VIR_DOMAIN_SCHED_FIELD_LENGTH);
        params[0].field[VIR_DOMAIN_SCHED_FIELD_LENGTH-1] = '\0';
1149 1150 1151
		params[0].type = VIR_DOMAIN_SCHED_FIELD_UINT;
		params[0].value.ui = op_dom.u.getschedinfo.u.credit.weight;

1152 1153
		strncpy (params[1].field, str_cap, VIR_DOMAIN_SCHED_FIELD_LENGTH);
        params[1].field[VIR_DOMAIN_SCHED_FIELD_LENGTH-1] = '\0';
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
		params[1].type = VIR_DOMAIN_SCHED_FIELD_UINT;
		params[1].value.ui = op_dom.u.getschedinfo.u.credit.cap;

		*nparams = 2;
		break;
	    default:
		virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
			"Unknown scheduler", op_sys.u.getschedulerid.sched_id);
		return -1;
        }
    }

    return 0;
}

/**
 * xenHypervisorSetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler setting parameters .
 *
 * Do a low level hypercall to set scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorSetSchedulerParameters(virDomainPtr domain,
				 virSchedParameterPtr params, int nparams)
{
    int i;
1183
    unsigned int val;
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL)) {
        virXenErrorFunc (VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
	                 "domain or conn is NULL", 0);
        return -1;
    }

    if ((nparams == 0) || (params == NULL)) {
	virXenErrorFunc (VIR_ERR_INVALID_ARG, __FUNCTION__,
			 "Noparameters given", 0);
	return(-1);
    }

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0) {
        virXenErrorFunc (VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
	                 "priv->handle or domain->id invalid", 0);
        return -1;
    }

    /*
     * Support only dom_interface_version >=5
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
    if (dom_interface_version < 5) {
D
Daniel Veillard 已提交
1211 1212
        virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
			"unsupported in dom interface < 5", 0);
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
        return -1;
    }

    if (hypervisor_version > 1) {
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret == -1) return -1;

        switch (op_sys.u.getschedulerid.sched_id){
        case XEN_SCHEDULER_SEDF:
            /* TODO: Implement for Xen/SEDF */
            TODO
	    return(-1);
        case XEN_SCHEDULER_CREDIT: {
            int weight_set = 0;
            int cap_set = 0;

            memset(&op_dom, 0, sizeof(op_dom));
            op_dom.cmd = XEN_V2_OP_SCHEDULER;
            op_dom.domain = (domid_t) domain->id;
            op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
            op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;

            /*
1242 1243
             * credit scheduler parameters
             * following values do not change the parameters
1244 1245 1246 1247 1248
             */
            op_dom.u.getschedinfo.u.credit.weight = 0;
            op_dom.u.getschedinfo.u.credit.cap    = (uint16_t)~0U;

            for (i = 0; i < nparams; i++) {
1249
                if (STREQ (params[i].field, str_weight) &&
1250
                    params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
1251 1252 1253 1254 1255 1256 1257 1258
		    val = params[i].value.ui;
		    if ((val < 1) || (val > USHRT_MAX)) {
		        virXenErrorFunc (VIR_ERR_INVALID_ARG, __FUNCTION__,
       _("Credit scheduler weight parameter (%d) is out of range (1-65535)"),
                                         val);
			return(-1);
		    }
                    op_dom.u.getschedinfo.u.credit.weight = val;
1259
		    weight_set = 1;
1260
		} else if (STREQ (params[i].field, str_cap) &&
1261
                    params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
1262 1263 1264 1265 1266 1267 1268 1269
		    val = params[i].value.ui;
		    if (val > USHRT_MAX) {
		        virXenErrorFunc (VIR_ERR_INVALID_ARG, __FUNCTION__,
       _("Credit scheduler cap parameter (%d) is out of range (0-65535)"),
                                         val);
			return(-1);
		    }
                    op_dom.u.getschedinfo.u.credit.cap = val;
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
		    cap_set = 1;
	        } else {
		    virXenErrorFunc (VIR_ERR_INVALID_ARG, __FUNCTION__,
	     "Credit scheduler accepts 'cap' and 'weight' integer parameters",
				     0);
		    return(-1);
		}
            }

            ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
            if (ret < 0)
	        return -1;
            break;
	}
        default:
            virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
                        "Unknown scheduler", op_sys.u.getschedulerid.sched_id);
            return -1;
        }
    }
     
    return 0;
}

1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
/**
 * virXen_pausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to pause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1304
virXen_pausedomain(int handle, int id)
1305 1306 1307 1308 1309 1310 1311
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1312 1313 1314
        op.cmd = XEN_V2_OP_PAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
1315 1316 1317 1318
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1319 1320 1321
        op.cmd = XEN_V1_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
1322 1323 1324 1325
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1326 1327 1328
        op.cmd = XEN_V0_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
    }
    return(ret);
}

/**
 * virXen_unpausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to unpause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1343
virXen_unpausedomain(int handle, int id)
1344 1345 1346 1347 1348 1349 1350
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1351 1352 1353
        op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
1354 1355 1356 1357
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1358 1359 1360
        op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
1361 1362 1363 1364
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1365 1366 1367
        op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
    }
    return(ret);
}

/**
 * virXen_destroydomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to destroy the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1382
virXen_destroydomain(int handle, int id)
1383 1384 1385 1386 1387 1388 1389
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1390 1391 1392
        op.cmd = XEN_V2_OP_DESTROYDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
1393 1394 1395 1396
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1397 1398 1399
        op.cmd = XEN_V1_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
1400 1401 1402 1403
    } else if (hypervisor_version == 0) {
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1404 1405 1406
        op.cmd = XEN_V0_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
    }
    return(ret);
}

/**
 * virXen_setmaxmem:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @memory: the amount of memory in kilobytes
 *
 * Do a low level hypercall to change the max memory amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1422
virXen_setmaxmem(int handle, int id, unsigned long memory)
1423 1424 1425 1426 1427 1428 1429
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1430 1431
        op.cmd = XEN_V2_OP_SETMAXMEM;
        op.domain = (domid_t) id;
1432 1433 1434 1435
        if (dom_interface_version < 5)
            op.u.setmaxmem.maxmem = memory;
        else
            op.u.setmaxmemd5.maxmem = memory;
1436
        ret = xenHypervisorDoV2Dom(handle, &op);
1437 1438 1439 1440
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1441 1442 1443 1444
        op.cmd = XEN_V1_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV1Op(handle, &op);
1445
    } else if (hypervisor_version == 0) {
1446
        xen_op_v0 op;
1447 1448

        memset(&op, 0, sizeof(op));
1449 1450 1451 1452
        op.cmd = XEN_V0_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV0Op(handle, &op);
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
    }
    return(ret);
}

/**
 * virXen_setmaxvcpus:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpus: the numbers of vcpus
 *
 * Do a low level hypercall to change the max vcpus amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1468
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
1469 1470 1471 1472 1473 1474 1475
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1476 1477 1478 1479
        op.cmd = XEN_V2_OP_SETMAXVCPU;
        op.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV2Dom(handle, &op);
1480 1481 1482 1483
    } else if (hypervisor_version == 1) {
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1484 1485 1486 1487
        op.cmd = XEN_V1_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV1Op(handle, &op);
1488
    } else if (hypervisor_version == 0) {
1489
        xen_op_v0 op;
1490 1491

        memset(&op, 0, sizeof(op));
1492 1493 1494 1495
        op.cmd = XEN_V0_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV0Op(handle, &op);
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
    }
    return(ret);
}

/**
 * virXen_setvcpumap:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
1506
 * @maplen: the size of the bitmap in bytes
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_setvcpumap(int handle, int id, unsigned int vcpu,
                  unsigned char * cpumap, int maplen)
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

1521 1522 1523 1524
        if (mlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
            return (-1);
        }
1525
        memset(&op, 0, sizeof(op));
1526 1527
        op.cmd = XEN_V2_OP_SETVCPUMAP;
        op.domain = (domid_t) id;
1528 1529 1530 1531 1532 1533 1534 1535 1536
        if (dom_interface_version < 5) {
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap.bitmap = cpumap;
            op.u.setvcpumap.cpumap.nr_cpus = maplen * 8;
        } else {
            op.u.setvcpumapd5.vcpu = vcpu;
            op.u.setvcpumapd5.cpumap.bitmap.v = cpumap;
            op.u.setvcpumapd5.cpumap.nr_cpus = maplen * 8;
        }
1537
        ret = xenHypervisorDoV2Dom(handle, &op);
1538

1539 1540 1541 1542
        if (munlock(cpumap, maplen) < 0) {
            virXenError(VIR_ERR_XEN_CALL, " release", maplen);
            ret = -1;
        }
1543
    } else {
1544 1545 1546
        cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
        uint64_t *pm = &xen_cpumap;
        int j;
1547

1548 1549
        if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
            return (-1);
1550

1551 1552 1553
        memset(pm, 0, sizeof(cpumap_t));
        for (j = 0; j < maplen; j++)
            *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
1554 1555

        if (hypervisor_version == 1) {
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV1Op(handle, &op);
        } else if (hypervisor_version == 0) {
            xen_op_v0 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV0Op(handle, &op);
        }
1574 1575 1576
    }
    return(ret);
}
1577 1578
#endif /* !PROXY*/

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
/**
 * virXen_getvcpusinfo:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
 * @maplen: the size of the bitmap in bytes
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
1593
                    unsigned char *cpumap, int maplen)
1594 1595 1596 1597 1598 1599 1600
{
    int ret = -1;

    if (hypervisor_version > 1) {
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1601 1602
        op.cmd = XEN_V2_OP_GETVCPUINFO;
        op.domain = (domid_t) id;
1603 1604 1605 1606
        if (dom_interface_version < 5)
            op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
        else
            op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
1607
        ret = xenHypervisorDoV2Dom(handle, &op);
1608

1609 1610
        if (ret < 0)
            return(-1);
1611
        ipt->number = vcpu;
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
        if (dom_interface_version < 5) {
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
        } else {
            if (op.u.getvcpuinfod5.online) {
                if (op.u.getvcpuinfod5.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfod5.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
            ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
1634 1635 1636 1637 1638 1639
        }
        if ((cpumap != NULL) && (maplen > 0)) {
            if (mlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " locking", maplen);
                return (-1);
            }
1640
            memset(cpumap, 0, maplen);
1641 1642 1643
            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V2_OP_GETVCPUMAP;
            op.domain = (domid_t) id;
1644 1645 1646 1647 1648 1649 1650 1651 1652
            if (dom_interface_version < 5) {
                op.u.getvcpumap.vcpu = vcpu;
                op.u.getvcpumap.cpumap.bitmap = cpumap;
                op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
            } else {
                op.u.getvcpumapd5.vcpu = vcpu;
                op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
                op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
            }
1653 1654 1655 1656 1657 1658
            ret = xenHypervisorDoV2Dom(handle, &op);
            if (munlock(cpumap, maplen) < 0) {
                virXenError(VIR_ERR_XEN_CALL, " release", maplen);
                ret = -1;
            }
        }
1659
    } else {
1660 1661 1662 1663 1664
        int mapl = maplen;
        int cpu;

        if (maplen > (int)sizeof(cpumap_t))
            mapl = (int)sizeof(cpumap_t);
1665 1666

        if (hypervisor_version == 1) {
1667 1668 1669 1670 1671 1672 1673 1674 1675
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV1Op(handle, &op);
            if (ret < 0)
                return(-1);
1676
            ipt->number = vcpu;
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        } else if (hypervisor_version == 0) {
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV0Op(handle, &op);
            if (ret < 0)
                return(-1);
1700
            ipt->number = vcpu;
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        }
1715 1716 1717
    }
    return(ret);
}
1718

1719 1720 1721 1722 1723 1724
/**
 * xenHypervisorInit:
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
 */
1725
int
D
Daniel P. Berrange 已提交
1726
xenHypervisorInit(void)
1727
{
1728
    int fd, ret, cmd, errcode;
1729
    hypercall_t hc;
1730
    v0_hypercall_t v0_hc;
1731
    xen_getdomaininfo info;
D
Daniel Veillard 已提交
1732
    virVcpuInfoPtr ipt = NULL;
1733 1734

    if (initialized) {
1735
        if (hypervisor_version == -1)
1736
            return (-1);
1737
        return(0);
1738 1739
    }
    initialized = 1;
1740
    in_init = 1;
1741

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
    /* Compile regular expressions used by xenHypervisorGetCapabilities.
     * Note that errors here are really internal errors since these
     * regexps should never fail to compile.
     */
    errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_hvm_rec, error, sizeof error);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &flags_pae_rec, error, sizeof error);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }
    errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
    if (errcode != 0) {
        char error[100];
        regerror (errcode, &xen_cap_rec, error, sizeof error);
        regfree (&xen_cap_rec);
        regfree (&flags_pae_rec);
        regfree (&flags_hvm_rec);
        virXenError (VIR_ERR_INTERNAL_ERROR, error, 0);
        in_init = 0;
        return -1;
    }

    /* Xen hypervisor version detection begins. */
1778 1779
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
1780
        hypervisor_version = -1;
1781
        return(-1);
1782 1783 1784
    }
    fd = ret;

1785 1786 1787 1788
    /*
     * The size of the hypervisor call block changed July 2006
     * this detect if we are using the new or old hypercall_t structure
     */
1789 1790 1791 1792 1793 1794 1795 1796
    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1797
#ifdef DEBUG
1798
        fprintf(stderr, "Using new hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1799
#endif
1800 1801 1802
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
        goto detect_v2;
1803
    }
1804

1805 1806 1807 1808 1809 1810 1811 1812
    /*
     * check if the old hypercall are actually working
     */
    v0_hc.op = __HYPERVISOR_xen_version;
    v0_hc.arg[0] = (unsigned long) XENVER_version;
    v0_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
1813
    if ((ret != -1) && (ret != 0)) {
D
Daniel Veillard 已提交
1814
#ifdef DEBUG
1815
        fprintf(stderr, "Using old hypervisor call: %X\n", ret);
D
Daniel Veillard 已提交
1816
#endif
1817 1818
        hv_version = ret;
        xen_ioctl_hypercall_cmd = cmd;
1819
        hypervisor_version = 0;
1820
        goto done;
1821 1822
    }

1823 1824 1825 1826 1827
    /*
     * we faild to make any hypercall
     */

    hypervisor_version = -1;
1828 1829
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
1830 1831 1832
    in_init = 0;
    return(-1);

1833
 detect_v2:
1834 1835 1836 1837 1838 1839
    /*
     * The hypercalls were refactored into 3 different section in August 2006
     * Try to detect if we are running a version post 3.0.2 with the new ones
     * or the old ones
     */
    hypervisor_version = 2;
1840 1841 1842 1843

    ipt = malloc(sizeof(virVcpuInfo));
    if (ipt == NULL){
#ifdef DEBUG
1844
        fprintf(stderr, "Memory allocation failed at xenHypervisorInit()\n");
1845 1846 1847 1848
#endif
        return(-1);
    }
    /* Currently consider RHEL5.0 Fedora7 and xen-unstable */
1849
    sys_interface_version = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
1850
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
1851 1852 1853
        /* RHEL 5.0 */
        dom_interface_version = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
D
Daniel Veillard 已提交
1854
#ifdef DEBUG
1855
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver3\n");
D
Daniel Veillard 已提交
1856
#endif
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
            goto done;
        }
        /* Fedora 7 */
        dom_interface_version = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver2 dom ver4\n");
#endif
            goto done;
        }
    }

    sys_interface_version = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        /* xen-unstable */
        dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
#ifdef DEBUG
            fprintf(stderr, "Using hypervisor call v2, sys ver3 dom ver5\n");
#endif
            goto done;
        }
1879
    }
1880

1881 1882
    hypervisor_version = 1;
    sys_interface_version = -1;
1883
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
D
Daniel Veillard 已提交
1884
#ifdef DEBUG
1885
        fprintf(stderr, "Using hypervisor call v1\n");
D
Daniel Veillard 已提交
1886
#endif
1887
        goto done;
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
    }

    /*
     * we faild to make the getdomaininfolist hypercall
     */

    hypervisor_version = -1;
    virXenError(VIR_ERR_XEN_CALL, " ioctl ", IOCTL_PRIVCMD_HYPERCALL);
    close(fd);
    in_init = 0;
1898 1899
    if (ipt)
        free(ipt);
1900 1901
    return(-1);

1902
 done:
1903
    close(fd);
1904
    in_init = 0;
1905 1906
    if (ipt)
        free(ipt);
1907 1908 1909
    return(0);
}

1910 1911
/**
 * xenHypervisorOpen:
1912 1913 1914
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
1915 1916 1917
 *
 * Connects to the Xen hypervisor.
 *
1918
 * Returns 0 or -1 in case of error.
1919
 */
1920
int
1921 1922 1923
xenHypervisorOpen(virConnectPtr conn,
                  const char *name ATTRIBUTE_UNUSED,
                  int flags ATTRIBUTE_UNUSED)
1924
{
1925
    int ret;
1926
    xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
1927

1928
    if (initialized == 0)
1929 1930
        if (xenHypervisorInit() == -1)
            return -1;
1931

1932
    priv->handle = -1;
1933

1934
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
1935
    if (ret < 0) {
1936
        virXenError(VIR_ERR_NO_XEN, XEN_HYPERVISOR_SOCKET, 0);
1937
        return (-1);
1938
    }
1939 1940

    priv->handle = ret;
1941

1942
    return(0);
1943 1944 1945 1946
}

/**
 * xenHypervisorClose:
1947
 * @conn: pointer to the connection block
1948 1949 1950 1951 1952
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
1953
int
1954
xenHypervisorClose(virConnectPtr conn)
1955
{
1956
    int ret;
1957
    xenUnifiedPrivatePtr priv;
1958

1959
    if (conn == NULL)
1960
        return (-1);
1961

1962 1963 1964 1965 1966 1967
    priv = (xenUnifiedPrivatePtr) conn->privateData;

    if (priv->handle < 0)
        return -1;

    ret = close(priv->handle);
1968
    if (ret < 0)
1969
        return (-1);
1970

1971
    return (0);
1972 1973 1974
}


1975
#ifndef PROXY
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
/**
 * xenHypervisorGetType:
 * @conn: pointer to the Xen Hypervisor block
 *
 * Get the version level of the Hypervisor running.
 *
 * Returns -1 in case of error, 0 otherwise. if the version can't be
 *    extracted by lack of capacities returns 0 and @hvVer is 0, otherwise
 *    @hvVer value is major * 1,000,000 + minor * 1,000 + release
 */
static const char *
xenHypervisorGetType(virConnectPtr conn)
{
    if (!VIR_IS_CONNECT(conn)) {
        virXenError(VIR_ERR_INVALID_CONN, __FUNCTION__, 0);
        return (NULL);
    }
    return("Xen");
}
1995
#endif
1996

1997 1998
/**
 * xenHypervisorGetVersion:
1999 2000
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
2001 2002 2003
 *
 * Call the hypervisor to extracts his own internal API version
 *
2004
 * Returns 0 in case of success, -1 in case of error
2005
 */
2006 2007
int
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
2008
{
2009 2010 2011 2012 2013 2014
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || hvVer == NULL)
2015
        return (-1);
2016
    *hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
2017
    return(0);
2018 2019
}

2020 2021 2022
/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
2023 2024
 * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
 * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
2025 2026 2027 2028
 *
 * Return the capabilities of this hypervisor.
 */
char *
2029 2030 2031
xenHypervisorMakeCapabilitiesXML(virConnectPtr conn ATTRIBUTE_UNUSED,
                                 const char *hostmachine,
                                 FILE *cpuinfo, FILE *capabilities)
2032 2033
{
    char line[1024], *str, *token;
2034
    regmatch_t subs[4];
2035
    char *saveptr = NULL;
2036 2037 2038 2039
    int i, r;

    char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
    int host_pae = 0;
2040
    struct guest_arch {
2041 2042 2043 2044
        const char *model;
        int bits;
        int hvm;
        int pae;
2045
        int nonpae;
2046
        int ia64_be;
2047
    } guest_archs[32];
2048 2049 2050 2051 2052
    int nr_guest_archs = 0;

    virBufferPtr xml;
    char *xml_str;

2053 2054
    memset(guest_archs, 0, sizeof(guest_archs));

2055 2056 2057 2058
    /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
     * It's not clear if this will work on IA64, let alone other
     * architectures and non-Linux. (XXX)
     */
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
    if (cpuinfo) {
        while (fgets (line, sizeof line, cpuinfo)) {
            if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
                && subs[0].rm_so != -1) {
                strncpy (hvm_type,
                         &line[subs[1].rm_so], subs[1].rm_eo-subs[1].rm_so+1);
                hvm_type[subs[1].rm_eo-subs[1].rm_so] = '\0';
            } else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
                host_pae = 1;
        }
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
    }

    /* Most of the useful info is in /sys/hypervisor/properties/capabilities
     * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
     *
     * It is a space-separated list of supported guest architectures.
     *
     * For x86:
     *    TYP-VER-ARCH[p]
     *    ^   ^   ^    ^
     *    |   |   |    +-- PAE supported
     *    |   |   +------- x86_32 or x86_64
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     *
     * For PPC this file appears to be always empty (?)
     *
     * For IA64:
     *    TYP-VER-ARCH[be]
     *    ^   ^   ^    ^
     *    |   |   |    +-- Big-endian supported
     *    |   |   +------- always "ia64"
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     */

    /* Expecting one line in this file - ignore any more. */
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
    if (fgets (line, sizeof line, capabilities)) {
        /* Split the line into tokens.  strtok_r is OK here because we "own"
         * this buffer.  Parse out the features from each token.
         */
        for (str = line, nr_guest_archs = 0;
             nr_guest_archs < sizeof guest_archs / sizeof guest_archs[0]
                 && (token = strtok_r (str, " ", &saveptr)) != NULL;
             str = NULL) {

            if (regexec (&xen_cap_rec, token, sizeof subs / sizeof subs[0],
                         subs, 0) == 0) {
                int hvm = strncmp (&token[subs[1].rm_so], "hvm", 3) == 0;
                const char *model;
                int bits, pae = 0, nonpae = 0, ia64_be = 0;
                if (strncmp (&token[subs[2].rm_so], "x86_32", 6) == 0) {
                    model = "i686";
                    bits = 32;
                    if (strncmp (&token[subs[3].rm_so], "p", 1) == 0)
                        pae = 1;
                    else
                        nonpae = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "x86_64", 6) == 0) {
                    model = "x86_64";
                    bits = 64;
                }
                else if (strncmp (&token[subs[2].rm_so], "ia64", 4) == 0) {
                    model = "ia64";
                    bits = 64;
                    if (strncmp (&token[subs[3].rm_so], "be", 2) == 0)
                        ia64_be = 1;
                }
                else if (strncmp (&token[subs[2].rm_so], "powerpc64", 4) == 0) {
                    model = "ppc64";
                    bits = 64;
                } else {
                    /* XXX surely no other Xen archs exist  */
                    continue;
                }
2135

2136 2137 2138 2139 2140 2141 2142
                /* Search for existing matching (model,hvm) tuple */
                for (i = 0 ; i < nr_guest_archs ; i++) {
                    if (!strcmp(guest_archs[i].model, model) &&
                        guest_archs[i].hvm == hvm) {
                        break;
                    }
                }
2143

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
                /* Too many arch flavours - highly unlikely ! */
                if (i >= sizeof(guest_archs)/sizeof(guest_archs[0]))
                    continue;
                /* Didn't find a match, so create a new one */
                if (i == nr_guest_archs)
                    nr_guest_archs++;

                guest_archs[i].model = model;
                guest_archs[i].bits = bits;
                guest_archs[i].hvm = hvm;

                /* Careful not to overwrite a previous positive
                   setting with a negative one here - some archs
                   can do both pae & non-pae, but Xen reports
                   separately capabilities so we're merging archs */
                if (pae)
                    guest_archs[i].pae = pae;
                if (nonpae)
                    guest_archs[i].nonpae = nonpae;
                if (ia64_be)
                    guest_archs[i].ia64_be = ia64_be;
2165 2166 2167 2168 2169 2170
            }
        }
    }

    /* Construct the final XML. */
    xml = virBufferNew (1024);
2171 2172 2173 2174
    if (!xml) {
        virXenError(VIR_ERR_NO_MEMORY, __FUNCTION__, 0);
        return NULL;
    }
2175 2176 2177 2178 2179 2180 2181
    r = virBufferVSprintf (xml,
                           "\
<capabilities>\n\
  <host>\n\
    <cpu>\n\
      <arch>%s</arch>\n\
      <features>\n",
2182 2183
                           hostmachine);
    if (r == -1) goto vir_buffer_failed;
2184 2185 2186 2187 2188 2189

    if (strcmp (hvm_type, "") != 0) {
        r = virBufferVSprintf (xml,
                               "\
        <%s/>\n",
                               hvm_type);
2190
        if (r == -1) goto vir_buffer_failed;
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
    }
    if (host_pae) {
        r = virBufferAdd (xml, "\
        <pae/>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
      </features>\n\
    </cpu>\n\
  </host>\n", -1);
    if (r == -1) goto vir_buffer_failed;

    for (i = 0; i < nr_guest_archs; ++i) {
        r = virBufferVSprintf (xml,
                               "\
\n\
  <guest>\n\
    <os_type>%s</os_type>\n\
    <arch name=\"%s\">\n\
      <wordsize>%d</wordsize>\n\
2212
      <domain type=\"xen\"></domain>\n",
2213 2214
                               guest_archs[i].hvm ? "hvm" : "xen",
                               guest_archs[i].model,
2215
                               guest_archs[i].bits);
2216 2217
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].hvm) {
2218
            r = virBufferVSprintf (xml,
2219
                              "\
2220
      <emulator>/usr/lib%s/xen/bin/qemu-dm</emulator>\n\
2221 2222
      <machine>pc</machine>\n\
      <machine>isapc</machine>\n\
2223 2224
      <loader>/usr/lib/xen/boot/hvmloader</loader>\n",
                                   guest_archs[i].bits == 64 ? "64" : "");
2225 2226 2227 2228 2229
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </arch>\n\
2230
    <features>\n", -1);
2231 2232 2233 2234
        if (r == -1) goto vir_buffer_failed;
        if (guest_archs[i].pae) {
            r = virBufferAdd (xml,
                              "\
2235 2236 2237 2238 2239 2240
      <pae/>\n", -1);
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].nonpae) {
            r = virBufferAdd (xml,
                              "\
2241
      <nonpae/>\n", -1);
2242 2243 2244 2245 2246
            if (r == -1) goto vir_buffer_failed;
        }
        if (guest_archs[i].ia64_be) {
            r = virBufferAdd (xml,
                              "\
2247
      <ia64_be/>\n", -1);
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
            if (r == -1) goto vir_buffer_failed;
        }
        r = virBufferAdd (xml,
                          "\
    </features>\n\
  </guest>\n", -1);
        if (r == -1) goto vir_buffer_failed;
    }
    r = virBufferAdd (xml,
                      "\
</capabilities>\n", -1);
    if (r == -1) goto vir_buffer_failed;
    xml_str = strdup (xml->content);
2261
    if (!xml_str) goto vir_buffer_failed;
2262 2263 2264
    virBufferFree (xml);

    return xml_str;
2265 2266

 vir_buffer_failed:
2267
    virXenError(VIR_ERR_NO_MEMORY, __FUNCTION__, 0);
2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
    virBufferFree (xml);
    return NULL;
}

/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
 *
 * Return the capabilities of this hypervisor.
 */
char *
xenHypervisorGetCapabilities (virConnectPtr conn)
{
    char *xml;
    FILE *cpuinfo, *capabilities;
    struct utsname utsname;

    /* Really, this never fails - look at the man-page. */
    uname (&utsname);

    cpuinfo = fopen ("/proc/cpuinfo", "r");
    if (cpuinfo == NULL) {
        if (errno != ENOENT) {
            virXenPerror (conn, "/proc/cpuinfo");
            return NULL;
        }
    }

    capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
    if (capabilities == NULL) {
        if (errno != ENOENT) {
            fclose(cpuinfo);
            virXenPerror (conn, "/sys/hypervisor/properties/capabilities");
            return NULL;
        }
    }

    xml = xenHypervisorMakeCapabilitiesXML(conn, utsname.machine, cpuinfo, capabilities);

    if (cpuinfo)
        fclose(cpuinfo);
    if (capabilities)
        fclose(capabilities);

    return xml;
2313 2314
}

2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
2326
    xen_getdomaininfolist dominfos;
2327 2328 2329
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;
2330
    xenUnifiedPrivatePtr priv;
2331

2332 2333 2334 2335
    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
2336 2337
        return (-1);

2338 2339
 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2340
        virXenError(VIR_ERR_NO_MEMORY, _("allocating %d domain info"),
2341 2342
                    maxids);
        return(-1);
2343 2344
    }

2345 2346
    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

2347
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2348

2349
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2350 2351 2352 2353

    if (ret < 0)
        return (-1);

2354
    nbids = ret;
2355 2356 2357 2358 2359
    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
2360
    if (nbids == maxids) {
2361 2362 2363 2364 2365 2366
        if (maxids < 65000) {
            last_maxids *= 2;
            maxids *= 2;
            goto retry;
        }
        nbids = -1;
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
    }
    if ((nbids < 0) || (nbids > maxids))
        return(-1);
    return(nbids);
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
2386
    xen_getdomaininfolist dominfos;
2387
    int ret, nbids, i;
2388 2389 2390 2391
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
2392

2393 2394
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 ||
2395 2396 2397
        (ids == NULL) || (maxids < 1))
        return (-1);

2398
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2399
        virXenError(VIR_ERR_NO_MEMORY, "allocating %d domain info",
2400 2401
                    maxids);
        return(-1);
2402
    }
2403 2404

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2405 2406
    memset(ids, 0, maxids * sizeof(int));

2407
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2408 2409

    if (ret < 0) {
2410
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2411 2412 2413
        return (-1);
    }

2414
    nbids = ret;
2415
    if ((nbids < 0) || (nbids > maxids)) {
2416
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2417 2418 2419 2420
        return(-1);
    }

    for (i = 0;i < nbids;i++) {
2421
        ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2422 2423
    }

2424
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2425 2426 2427
    return (nbids);
}

2428
/**
2429
 * xenHypervisorGetMaxVcpus:
2430 2431 2432 2433
 *
 * Returns the maximum of CPU defined by Xen.
 */
int
2434 2435
xenHypervisorGetMaxVcpus(virConnectPtr conn,
                         const char *type ATTRIBUTE_UNUSED)
2436
{
2437 2438 2439 2440 2441 2442
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
2443 2444 2445 2446 2447
        return (-1);

    return MAX_VIRT_CPUS;
}

2448
/**
2449 2450 2451
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
2452
 *
2453
 * Retrieve the maximum amount of physical memory allocated to a
2454
 * domain.
2455 2456 2457
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
2458 2459
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
2460
{
2461
    xenUnifiedPrivatePtr priv;
2462
    xen_getdomaininfo dominfo;
2463 2464
    int ret;

2465 2466 2467 2468 2469 2470
    if (conn == NULL)
        return 0;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
        return 0;
2471

2472 2473 2474 2475 2476 2477
    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }

2478
    XEN_GETDOMAININFO_CLEAR(dominfo);
2479

2480
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
2481

2482
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
2483 2484
        return (0);

2485
    return((unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages);
2486 2487
}

2488
#ifndef PROXY
2489 2490 2491
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
2492
 *
2493 2494 2495 2496 2497 2498 2499 2500 2501
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
static unsigned long
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
2502 2503 2504 2505 2506 2507 2508
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return 0;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2509 2510
        return (0);

2511
    return(xenHypervisorGetDomMaxMemory(domain->conn, domain->id));
2512
}
2513
#endif
2514

2515
/**
2516 2517 2518
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
2519
 * @info: the place where information should be stored
2520
 *
2521
 * Do an hypervisor call to get the related set of domain information.
2522 2523 2524 2525
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2526
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
2527
{
2528
    xenUnifiedPrivatePtr priv;
2529
    xen_getdomaininfo dominfo;
2530
    int ret;
2531
    uint32_t domain_flags, domain_state, domain_shutdown_cause;
2532 2533 2534 2535 2536 2537

    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
	if (kb_per_pages <= 0) 
	    kb_per_pages = 4;
    }
2538

2539 2540 2541 2542 2543
    if (conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || info == NULL)
2544
        return (-1);
2545

2546
    memset(info, 0, sizeof(virDomainInfo));
2547
    XEN_GETDOMAININFO_CLEAR(dominfo);
2548

2549
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
2550

2551
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
2552
        return (-1);
2553

2554
    domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
2555 2556
    domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
    domain_state = domain_flags & 0xFF; /* Mask out high bits */
2557
    switch (domain_state) {
2558 2559 2560 2561
	case DOMFLAGS_DYING:
	    info->state = VIR_DOMAIN_SHUTDOWN;
	    break;
	case DOMFLAGS_SHUTDOWN:
2562 2563 2564 2565 2566 2567 2568 2569 2570
            /* The domain is shutdown.  Determine the cause. */
            domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
            switch (domain_shutdown_cause) {
                case SHUTDOWN_crash:
                    info->state = VIR_DOMAIN_CRASHED;
                    break;
                default:
                    info->state = VIR_DOMAIN_SHUTOFF;
            }
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
	    break;
	case DOMFLAGS_PAUSED:
	    info->state = VIR_DOMAIN_PAUSED;
	    break;
	case DOMFLAGS_BLOCKED:
	    info->state = VIR_DOMAIN_BLOCKED;
	    break;
	case DOMFLAGS_RUNNING:
	    info->state = VIR_DOMAIN_RUNNING;
	    break;
	default:
	    info->state = VIR_DOMAIN_NONE;
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
2590
    info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
2591
    info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
2592 2593 2594
    info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
    if(info->maxMem != UINT_MAX)
        info->maxMem *= kb_per_pages;
2595
    info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
2596
    return (0);
2597 2598
}

2599 2600 2601
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
2602
 * @info: the place where information should be stored
2603
 *
2604
 * Do an hypervisor call to get the related set of domain information.
2605 2606 2607 2608 2609 2610
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
2611 2612 2613 2614 2615 2616 2617
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || info == NULL ||
2618
        (domain->id < 0))
2619
        return (-1);
2620

2621
    return(xenHypervisorGetDomInfo(domain->conn, domain->id, info));
2622 2623 2624

}

2625
#ifndef PROXY
2626 2627
/**
 * xenHypervisorPauseDomain:
2628
 * @domain: pointer to the domain block
2629 2630 2631 2632 2633 2634
 *
 * Do an hypervisor call to pause the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2635
xenHypervisorPauseDomain(virDomainPtr domain)
2636
{
2637
    int ret;
2638
    xenUnifiedPrivatePtr priv;
2639

2640 2641 2642 2643 2644
    if ((domain == NULL) || (domain->conn == NULL))
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2645 2646
        return (-1);

2647
    ret = virXen_pausedomain(priv->handle, domain->id);
2648
    if (ret < 0)
2649 2650
        return (-1);
    return (0);
2651 2652 2653 2654
}

/**
 * xenHypervisorResumeDomain:
2655
 * @domain: pointer to the domain block
2656 2657 2658 2659 2660 2661
 *
 * Do an hypervisor call to resume the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2662
xenHypervisorResumeDomain(virDomainPtr domain)
2663
{
2664
    int ret;
2665 2666 2667 2668
    xenUnifiedPrivatePtr priv;

    if ((domain == NULL) || (domain->conn == NULL))
        return -1;
2669

2670 2671
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2672 2673
        return (-1);

2674
    ret = virXen_unpausedomain(priv->handle, domain->id);
2675
    if (ret < 0)
2676 2677
        return (-1);
    return (0);
2678 2679 2680 2681
}

/**
 * xenHypervisorDestroyDomain:
2682
 * @domain: pointer to the domain block
2683 2684 2685 2686 2687 2688
 *
 * Do an hypervisor call to destroy the given domain
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2689
xenHypervisorDestroyDomain(virDomainPtr domain)
2690
{
2691
    int ret;
2692 2693 2694 2695
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2696

2697 2698
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2699 2700
        return (-1);

2701
    ret = virXen_destroydomain(priv->handle, domain->id);
2702
    if (ret < 0)
2703 2704
        return (-1);
    return (0);
2705 2706
}

2707 2708
/**
 * xenHypervisorSetMaxMemory:
2709
 * @domain: pointer to the domain block
2710 2711 2712 2713 2714 2715 2716
 * @memory: the max memory size in kilobytes.
 *
 * Do an hypervisor call to change the maximum amount of memory used
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
2717
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
2718
{
2719
    int ret;
2720
    xenUnifiedPrivatePtr priv;
2721

2722 2723 2724 2725 2726
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
2727 2728
        return (-1);

2729
    ret = virXen_setmaxmem(priv->handle, domain->id, memory);
2730
    if (ret < 0)
2731 2732
        return (-1);
    return (0);
2733
}
2734
#endif /* PROXY */
2735

2736
#ifndef PROXY
2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
2750
    int ret;
2751 2752 2753 2754
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2755

2756 2757
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
2758
        return (-1);
2759

2760
    ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
2761
    if (ret < 0)
2762
        return (-1);
2763
    return (0);
2764 2765 2766 2767 2768 2769 2770 2771
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
2772
 *
2773 2774 2775 2776 2777 2778 2779 2780 2781
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
2782
    int ret;
2783
    xenUnifiedPrivatePtr priv;
2784

2785 2786 2787 2788 2789
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2790
        (cpumap == NULL) || (maplen < 1))
2791 2792
        return (-1);

2793
    ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
2794 2795
                            cpumap, maplen);
    if (ret < 0)
2796
        return (-1);
2797
    return (0);
2798
}
2799
#endif
2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
 * @cpumaps: pointer to an bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
 *	If cpumaps is NULL, then no cupmap information is returned by the API.
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
2814
 *
2815 2816 2817 2818 2819
 * Extract information about virtual CPUs of domain, store it in info array
 * and also in cpumaps if this pointer is'nt NULL.
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
2820
#ifndef PROXY
2821 2822
int
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
2823
                      unsigned char *cpumaps, int maplen)
2824
{
2825
    xen_getdomaininfo dominfo;
2826
    int ret;
2827
    xenUnifiedPrivatePtr priv;
2828
    virVcpuInfoPtr ipt;
2829
    int nbinfo, i;
2830

2831 2832 2833 2834 2835
    if (domain == NULL || domain->conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
2836 2837
        (info == NULL) || (maxinfo < 1) ||
        (sizeof(cpumap_t) & 7))
2838
        return (-1);
2839
    if ((cpumaps != NULL) && (maplen < 1))
2840
        return -1;
2841 2842

    /* first get the number of virtual CPUs in this domain */
2843
    XEN_GETDOMAININFO_CLEAR(dominfo);
2844
    ret = virXen_getdomaininfo(priv->handle, domain->id,
2845
                               &dominfo);
2846

2847
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
2848
        return (-1);
2849
    nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
2850 2851 2852
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
2853
        memset(cpumaps, 0, maxinfo * maplen);
2854

2855 2856
    for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
        if ((cpumaps != NULL) && (i < maxinfo)) {
2857
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2858 2859 2860 2861 2862 2863
                                      ipt,
                                      (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
                                      maplen);
            if (ret < 0)
                return(-1);
        } else {
2864
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
2865 2866 2867 2868
                                      ipt, NULL, 0);
            if (ret < 0)
                return(-1);
        }
2869 2870 2871
    }
    return nbinfo;
}
2872
#endif /* PROXY */
2873

2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
/**
 * xenHypervisorGetVcpuMax:
 *
 *  Returns the maximum number of virtual CPUs supported for
 *  the guest VM. If the guest is inactive, this is the maximum
 *  of CPU defined by Xen. If the guest is running this reflect
 *  the maximum number of virtual CPUs the guest was booted with.
 */
int
xenHypervisorGetVcpuMax(virDomainPtr domain)
{
    xen_getdomaininfo dominfo;
    int ret;
    int maxcpu;
2888 2889 2890 2891
    xenUnifiedPrivatePtr priv;

    if (domain == NULL || domain->conn == NULL)
        return -1;
2892

2893 2894
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0)
2895 2896 2897 2898 2899 2900 2901
        return (-1);

    /* inactive domain */
    if (domain->id < 0) {
        maxcpu = MAX_VIRT_CPUS;
    } else {
        XEN_GETDOMAININFO_CLEAR(dominfo);
2902
        ret = virXen_getdomaininfo(priv->handle, domain->id,
2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
                                   &dominfo);

        if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
            return (-1);
        maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
    }

    return maxcpu;
}

2913
#endif /* WITH_XEN */
2914 2915 2916 2917 2918
/*
 * vim: set tabstop=4:
 * vim: set shiftwidth=4:
 * vim: set expandtab:
 */
2919 2920 2921 2922 2923 2924 2925 2926
/*
 * Local variables:
 *  indent-tabs-mode: nil
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 * End:
 */