xen_hypervisor.c 107.6 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
4
 * Copyright (C) 2005-2013 Red Hat, Inc.
5
 *
O
Osier Yang 已提交
6 7 8 9 10 11 12 13 14 15 16
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22
 *
 * Daniel Veillard <veillard@redhat.com>
 */

23
#include <config.h>
24

25 26
#include <stdio.h>
#include <string.h>
27
/* required for uint8_t, uint32_t, etc ... */
28 29 30 31 32 33 34
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
35
#include <limits.h>
36 37
#include <regex.h>
#include <errno.h>
38

J
John Levon 已提交
39
#ifdef __sun
40
# include <sys/systeminfo.h>
J
John Levon 已提交
41

42
# include <priv.h>
J
John Levon 已提交
43

44 45 46
# ifndef PRIV_XVM_CONTROL
#  define PRIV_XVM_CONTROL ((const char *)"xvm_control")
# endif
J
John Levon 已提交
47 48 49

#endif /* __sun */

50
/* required for dom0_getdomaininfo_t */
51
#include <xen/dom0_ops.h>
52
#include <xen/version.h>
53
#ifdef HAVE_XEN_LINUX_PRIVCMD_H
54
# include <xen/linux/privcmd.h>
55
#else
56 57 58
# ifdef HAVE_XEN_SYS_PRIVCMD_H
#  include <xen/sys/privcmd.h>
# endif
59
#endif
60

61 62 63
/* required for shutdown flags */
#include <xen/sched.h>

64
#include "virerror.h"
65
#include "virlog.h"
66
#include "datatypes.h"
67
#include "driver.h"
68 69
#include "xen_driver.h"
#include "xen_hypervisor.h"
70
#include "xs_internal.h"
71
#include "virstatslinux.h"
72
#include "block_stats.h"
73
#include "xend_internal.h"
74
#include "virbuffer.h"
75
#include "capabilities.h"
76
#include "viralloc.h"
77
#include "virthread.h"
E
Eric Blake 已提交
78
#include "virfile.h"
79
#include "virnodesuspend.h"
80
#include "virtypedparam.h"
E
Eric Blake 已提交
81
#include "virendian.h"
82
#include "virstring.h"
83

84 85
#define VIR_FROM_THIS VIR_FROM_XEN

86
/*
87
 * so far there is 2 versions of the structures usable for doing
88 89 90 91
 * hypervisor calls.
 */
/* the old one */
typedef struct v0_hypercall_struct {
92 93
    unsigned long op;
    unsigned long arg[5];
94
} v0_hypercall_t;
95 96

#ifdef __linux__
97
# define XEN_V0_IOCTL_HYPERCALL_CMD \
98 99 100 101 102 103 104
        _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))
/* the new one */
typedef struct v1_hypercall_struct
{
    uint64_t op;
    uint64_t arg[5];
} v1_hypercall_t;
105
# define XEN_V1_IOCTL_HYPERCALL_CMD                  \
106
    _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
107
typedef v1_hypercall_t hypercall_t;
108
#elif defined(__sun)
109 110
typedef privcmd_hypercall_t hypercall_t;
#else
111
# error "unsupported platform"
112
#endif
113 114

#ifndef __HYPERVISOR_sysctl
115
# define __HYPERVISOR_sysctl 35
116 117
#endif
#ifndef __HYPERVISOR_domctl
118
# define __HYPERVISOR_domctl 36
119
#endif
120

121
#ifdef WITH_RHEL5_API
122
# define SYS_IFACE_MIN_VERS_NUMA 3
123
#else
124
# define SYS_IFACE_MIN_VERS_NUMA 4
125 126
#endif

127
static int xen_ioctl_hypercall_cmd = 0;
P
Philipp Hahn 已提交
128 129 130 131 132 133 134
static struct xenHypervisorVersions hv_versions = {
    .hv = 0,
    .hypervisor = 2,
    .sys_interface = -1,
    .dom_interface = -1,
};

135
static int kb_per_pages = 0;
136

137 138 139 140 141 142 143 144
/* Regular expressions used by xenHypervisorGetCapabilities, and
 * compiled once by xenHypervisorInit.  Note that these are POSIX.2
 * extended regular expressions (regex(7)).
 */
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
static regex_t flags_hvm_rec;
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
static regex_t flags_pae_rec;
145
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
146 147
static regex_t xen_cap_rec;

148 149 150 151
/*
 * The content of the structures for a getdomaininfolist system hypercall
 */
#ifndef DOMFLAGS_DYING
152 153 154 155 156 157 158 159 160 161
# define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
# define DOMFLAGS_HVM       (1<<1) /* Domain is HVM                           */
# define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
# define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
# define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
# define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
# define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
# define DOMFLAGS_CPUSHIFT       8
# define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
# define DOMFLAGS_SHUTDOWNSHIFT 16
162 163
#endif

164 165 166 167 168
/*
 * These flags explain why a system is in the state of "shutdown".  Normally,
 * They are defined in xen/sched.h
 */
#ifndef SHUTDOWN_poweroff
169 170 171 172
# define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
# define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
# define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
# define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
173 174
#endif

175 176 177 178 179 180
#define XEN_V0_OP_GETDOMAININFOLIST	38
#define XEN_V1_OP_GETDOMAININFOLIST	38
#define XEN_V2_OP_GETDOMAININFOLIST	6

struct xen_v0_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
181
    uint32_t flags;	/* flags, see before */
182 183
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
184
    unsigned long shared_info_frame; /* MFN of shared_info struct */
185 186 187 188 189 190 191 192
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;

193 194
struct xen_v2_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
195
    uint32_t flags;	/* flags, see before */
196 197 198 199 200 201 202 203 204 205 206
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
    uint64_t shared_info_frame; /* MFN of shared_info struct */
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;

207 208 209 210 211 212 213

/* As of Hypervisor Call v2,  DomCtl v5 we are now 8-byte aligned
   even on 32-bit archs when dealing with uint64_t */
#define ALIGN_64 __attribute__((aligned(8)))

struct xen_v2d5_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
214
    uint32_t flags;	/* flags, see before */
215 216 217 218 219 220 221 222 223 224 225
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
struct xen_v2d6_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d6_getdomaininfo xen_v2d6_getdomaininfo;

J
Jim Fehlig 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
struct xen_v2d7_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
    uint32_t cpupool;
};
typedef struct xen_v2d7_getdomaininfo xen_v2d7_getdomaininfo;

J
Jim Fehlig 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
struct xen_v2d8_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t paged_pages ALIGN_64;    /* number of paged pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
    uint32_t cpupool;
};
typedef struct xen_v2d8_getdomaininfo xen_v2d8_getdomaininfo;

274 275 276
union xen_getdomaininfo {
    struct xen_v0_getdomaininfo v0;
    struct xen_v2_getdomaininfo v2;
277
    struct xen_v2d5_getdomaininfo v2d5;
278
    struct xen_v2d6_getdomaininfo v2d6;
J
Jim Fehlig 已提交
279
    struct xen_v2d7_getdomaininfo v2d7;
J
Jim Fehlig 已提交
280
    struct xen_v2d8_getdomaininfo v2d8;
281 282 283 284 285 286
};
typedef union xen_getdomaininfo xen_getdomaininfo;

union xen_getdomaininfolist {
    struct xen_v0_getdomaininfo *v0;
    struct xen_v2_getdomaininfo *v2;
287
    struct xen_v2d5_getdomaininfo *v2d5;
288
    struct xen_v2d6_getdomaininfo *v2d6;
J
Jim Fehlig 已提交
289
    struct xen_v2d7_getdomaininfo *v2d7;
J
Jim Fehlig 已提交
290
    struct xen_v2d8_getdomaininfo *v2d8;
291 292 293
};
typedef union xen_getdomaininfolist xen_getdomaininfolist;

294 295 296 297 298 299 300 301 302 303 304 305

struct xen_v2_getschedulerid {
    uint32_t sched_id; /* Get Scheduler ID from Xen */
};
typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;


union xen_getschedulerid {
    struct xen_v2_getschedulerid *v2;
};
typedef union xen_getschedulerid xen_getschedulerid;

306 307 308 309 310 311 312 313 314
struct xen_v2s4_availheap {
    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
    int32_t  node;          /* NUMA node (-1 for sum across all nodes). */
    uint64_t avail_bytes;   /* Bytes available in the specified region. */
};

typedef struct xen_v2s4_availheap  xen_v2s4_availheap;

315 316 317 318 319 320 321 322 323
struct xen_v2s5_availheap {
    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
    int32_t  node;          /* NUMA node (-1 for sum across all nodes). */
    uint64_t avail_bytes ALIGN_64;   /* Bytes available in the specified region. */
};

typedef struct xen_v2s5_availheap  xen_v2s5_availheap;

324

325
#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size)                      \
P
Philipp Hahn 已提交
326
    (hv_versions.hypervisor < 2 ?                                       \
327
     (VIR_ALLOC_N(domlist.v0, (size)) == 0) :                           \
J
Jim Fehlig 已提交
328 329 330
     (hv_versions.dom_interface >= 8 ?                                  \
      (VIR_ALLOC_N(domlist.v2d8, (size)) == 0) :                        \
     (hv_versions.dom_interface == 7 ?                                  \
J
Jim Fehlig 已提交
331
      (VIR_ALLOC_N(domlist.v2d7, (size)) == 0) :                        \
P
Philipp Hahn 已提交
332
     (hv_versions.dom_interface == 6 ?                                  \
333
      (VIR_ALLOC_N(domlist.v2d6, (size)) == 0) :                        \
P
Philipp Hahn 已提交
334
     (hv_versions.dom_interface == 5 ?                                  \
335
      (VIR_ALLOC_N(domlist.v2d5, (size)) == 0) :                        \
J
Jim Fehlig 已提交
336
      (VIR_ALLOC_N(domlist.v2, (size)) == 0))))))
337

338
#define XEN_GETDOMAININFOLIST_FREE(domlist)            \
P
Philipp Hahn 已提交
339
    (hv_versions.hypervisor < 2 ?                      \
340
     VIR_FREE(domlist.v0) :                            \
J
Jim Fehlig 已提交
341 342 343
     (hv_versions.dom_interface >= 8 ?                 \
      VIR_FREE(domlist.v2d8) :                         \
     (hv_versions.dom_interface == 7 ?                 \
J
Jim Fehlig 已提交
344
      VIR_FREE(domlist.v2d7) :                         \
P
Philipp Hahn 已提交
345
     (hv_versions.dom_interface == 6 ?                 \
346
      VIR_FREE(domlist.v2d6) :                         \
P
Philipp Hahn 已提交
347
     (hv_versions.dom_interface == 5 ?                 \
348
      VIR_FREE(domlist.v2d5) :                         \
J
Jim Fehlig 已提交
349
      VIR_FREE(domlist.v2))))))
350

351
#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size)            \
P
Philipp Hahn 已提交
352
    (hv_versions.hypervisor < 2 ?                             \
353
     memset(domlist.v0, 0, sizeof(*domlist.v0) * size) :      \
J
Jim Fehlig 已提交
354 355 356
     (hv_versions.dom_interface >= 8 ?                        \
      memset(domlist.v2d8, 0, sizeof(*domlist.v2d8) * size) : \
     (hv_versions.dom_interface == 7 ?                        \
J
Jim Fehlig 已提交
357
      memset(domlist.v2d7, 0, sizeof(*domlist.v2d7) * size) : \
P
Philipp Hahn 已提交
358
     (hv_versions.dom_interface == 6 ?                        \
359
      memset(domlist.v2d6, 0, sizeof(*domlist.v2d6) * size) : \
P
Philipp Hahn 已提交
360
     (hv_versions.dom_interface == 5 ?                        \
361
      memset(domlist.v2d5, 0, sizeof(*domlist.v2d5) * size) : \
J
Jim Fehlig 已提交
362
      memset(domlist.v2, 0, sizeof(*domlist.v2) * size))))))
363 364

#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n)    \
P
Philipp Hahn 已提交
365
    (hv_versions.hypervisor < 2 ?                   \
366
     domlist.v0[n].domain :                         \
J
Jim Fehlig 已提交
367 368 369
     (hv_versions.dom_interface >= 8 ?              \
      domlist.v2d8[n].domain :                      \
     (hv_versions.dom_interface == 7 ?              \
J
Jim Fehlig 已提交
370
      domlist.v2d7[n].domain :                      \
P
Philipp Hahn 已提交
371
     (hv_versions.dom_interface == 6 ?              \
372
      domlist.v2d6[n].domain :                      \
P
Philipp Hahn 已提交
373
     (hv_versions.dom_interface == 5 ?              \
374
      domlist.v2d5[n].domain :                      \
J
Jim Fehlig 已提交
375
      domlist.v2[n].domain)))))
376

377
#define XEN_GETDOMAININFOLIST_UUID(domlist, n)      \
P
Philipp Hahn 已提交
378
    (hv_versions.hypervisor < 2 ?                   \
379
     domlist.v0[n].handle :                         \
J
Jim Fehlig 已提交
380 381 382
     (hv_versions.dom_interface >= 8 ?              \
      domlist.v2d8[n].handle :                      \
     (hv_versions.dom_interface == 7 ?              \
J
Jim Fehlig 已提交
383
      domlist.v2d7[n].handle :                      \
P
Philipp Hahn 已提交
384
     (hv_versions.dom_interface == 6 ?              \
385
      domlist.v2d6[n].handle :                      \
P
Philipp Hahn 已提交
386
     (hv_versions.dom_interface == 5 ?              \
387
      domlist.v2d5[n].handle :                      \
J
Jim Fehlig 已提交
388
      domlist.v2[n].handle)))))
389

390
#define XEN_GETDOMAININFOLIST_DATA(domlist)        \
P
Philipp Hahn 已提交
391
    (hv_versions.hypervisor < 2 ?                  \
392
     (void*)(domlist->v0) :                        \
J
Jim Fehlig 已提交
393 394 395
     (hv_versions.dom_interface >= 8 ?             \
      (void*)(domlist->v2d8) :                     \
     (hv_versions.dom_interface == 7 ?             \
J
Jim Fehlig 已提交
396
      (void*)(domlist->v2d7) :                     \
P
Philipp Hahn 已提交
397
     (hv_versions.dom_interface == 6 ?             \
398
      (void*)(domlist->v2d6) :                     \
P
Philipp Hahn 已提交
399
     (hv_versions.dom_interface == 5 ?             \
400
      (void*)(domlist->v2d5) :                     \
J
Jim Fehlig 已提交
401
      (void*)(domlist->v2))))))
402 403

#define XEN_GETDOMAININFO_SIZE                     \
P
Philipp Hahn 已提交
404
    (hv_versions.hypervisor < 2 ?                  \
405
     sizeof(xen_v0_getdomaininfo) :                \
J
Jim Fehlig 已提交
406 407 408
     (hv_versions.dom_interface >= 8 ?             \
      sizeof(xen_v2d8_getdomaininfo) :             \
     (hv_versions.dom_interface == 7 ?             \
J
Jim Fehlig 已提交
409
      sizeof(xen_v2d7_getdomaininfo) :             \
P
Philipp Hahn 已提交
410
     (hv_versions.dom_interface == 6 ?             \
411
      sizeof(xen_v2d6_getdomaininfo) :             \
P
Philipp Hahn 已提交
412
     (hv_versions.dom_interface == 5 ?             \
413
      sizeof(xen_v2d5_getdomaininfo) :             \
J
Jim Fehlig 已提交
414
      sizeof(xen_v2_getdomaininfo))))))
415 416

#define XEN_GETDOMAININFO_CLEAR(dominfo)                           \
P
Philipp Hahn 已提交
417
    (hv_versions.hypervisor < 2 ?                                  \
418
     memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) :      \
J
Jim Fehlig 已提交
419 420 421
     (hv_versions.dom_interface >= 8 ?                             \
      memset(&(dominfo.v2d8), 0, sizeof(xen_v2d8_getdomaininfo)) : \
     (hv_versions.dom_interface == 7 ?                             \
J
Jim Fehlig 已提交
422
      memset(&(dominfo.v2d7), 0, sizeof(xen_v2d7_getdomaininfo)) : \
P
Philipp Hahn 已提交
423
     (hv_versions.dom_interface == 6 ?                             \
424
      memset(&(dominfo.v2d6), 0, sizeof(xen_v2d6_getdomaininfo)) : \
P
Philipp Hahn 已提交
425
     (hv_versions.dom_interface == 5 ?                             \
426
      memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo)) : \
J
Jim Fehlig 已提交
427
      memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))))))
428 429

#define XEN_GETDOMAININFO_DOMAIN(dominfo)       \
P
Philipp Hahn 已提交
430
    (hv_versions.hypervisor < 2 ?               \
431
     dominfo.v0.domain :                        \
J
Jim Fehlig 已提交
432 433 434
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.domain :                     \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
435
      dominfo.v2d7.domain :                     \
P
Philipp Hahn 已提交
436
     (hv_versions.dom_interface == 6 ?          \
437
      dominfo.v2d6.domain :                     \
P
Philipp Hahn 已提交
438
     (hv_versions.dom_interface == 5 ?          \
439
      dominfo.v2d5.domain :                     \
J
Jim Fehlig 已提交
440
      dominfo.v2.domain)))))
441 442

#define XEN_GETDOMAININFO_CPUTIME(dominfo)      \
P
Philipp Hahn 已提交
443
    (hv_versions.hypervisor < 2 ?               \
444
     dominfo.v0.cpu_time :                      \
J
Jim Fehlig 已提交
445 446 447
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.cpu_time :                   \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
448
      dominfo.v2d7.cpu_time :                   \
P
Philipp Hahn 已提交
449
     (hv_versions.dom_interface == 6 ?          \
450
      dominfo.v2d6.cpu_time :                   \
P
Philipp Hahn 已提交
451
     (hv_versions.dom_interface == 5 ?          \
452
      dominfo.v2d5.cpu_time :                   \
J
Jim Fehlig 已提交
453
      dominfo.v2.cpu_time)))))
454

455 456

#define XEN_GETDOMAININFO_CPUCOUNT(dominfo)     \
P
Philipp Hahn 已提交
457
    (hv_versions.hypervisor < 2 ?               \
458
     dominfo.v0.nr_online_vcpus :               \
J
Jim Fehlig 已提交
459 460 461
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.nr_online_vcpus :            \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
462
      dominfo.v2d7.nr_online_vcpus :            \
P
Philipp Hahn 已提交
463
     (hv_versions.dom_interface == 6 ?          \
464
      dominfo.v2d6.nr_online_vcpus :            \
P
Philipp Hahn 已提交
465
     (hv_versions.dom_interface == 5 ?          \
466
      dominfo.v2d5.nr_online_vcpus :            \
J
Jim Fehlig 已提交
467
      dominfo.v2.nr_online_vcpus)))))
468

J
Jim Fehlig 已提交
469
#define XEN_GETDOMAININFO_MAXCPUID(dominfo)     \
P
Philipp Hahn 已提交
470
    (hv_versions.hypervisor < 2 ?               \
471
     dominfo.v0.max_vcpu_id :                   \
J
Jim Fehlig 已提交
472 473 474
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.max_vcpu_id :                \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
475
      dominfo.v2d7.max_vcpu_id :                \
P
Philipp Hahn 已提交
476
     (hv_versions.dom_interface == 6 ?          \
477
      dominfo.v2d6.max_vcpu_id :                \
P
Philipp Hahn 已提交
478
     (hv_versions.dom_interface == 5 ?          \
479
      dominfo.v2d5.max_vcpu_id :                \
J
Jim Fehlig 已提交
480
      dominfo.v2.max_vcpu_id)))))
481

482
#define XEN_GETDOMAININFO_FLAGS(dominfo)        \
P
Philipp Hahn 已提交
483
    (hv_versions.hypervisor < 2 ?               \
484
     dominfo.v0.flags :                         \
J
Jim Fehlig 已提交
485 486 487
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.flags :                      \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
488
      dominfo.v2d7.flags :                      \
P
Philipp Hahn 已提交
489
     (hv_versions.dom_interface == 6 ?          \
490
      dominfo.v2d6.flags :                      \
P
Philipp Hahn 已提交
491
     (hv_versions.dom_interface == 5 ?          \
492
      dominfo.v2d5.flags :                      \
J
Jim Fehlig 已提交
493
      dominfo.v2.flags)))))
494 495

#define XEN_GETDOMAININFO_TOT_PAGES(dominfo)    \
P
Philipp Hahn 已提交
496
    (hv_versions.hypervisor < 2 ?               \
497
     dominfo.v0.tot_pages :                     \
J
Jim Fehlig 已提交
498 499 500
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.tot_pages :                  \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
501
      dominfo.v2d7.tot_pages :                  \
P
Philipp Hahn 已提交
502
     (hv_versions.dom_interface == 6 ?          \
503
      dominfo.v2d6.tot_pages :                  \
P
Philipp Hahn 已提交
504
     (hv_versions.dom_interface == 5 ?          \
505
      dominfo.v2d5.tot_pages :                  \
J
Jim Fehlig 已提交
506
      dominfo.v2.tot_pages)))))
507 508

#define XEN_GETDOMAININFO_MAX_PAGES(dominfo)    \
P
Philipp Hahn 已提交
509
    (hv_versions.hypervisor < 2 ?               \
510
     dominfo.v0.max_pages :                     \
J
Jim Fehlig 已提交
511 512 513
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.max_pages :                  \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
514
      dominfo.v2d7.max_pages :                  \
P
Philipp Hahn 已提交
515
     (hv_versions.dom_interface == 6 ?          \
516
      dominfo.v2d6.max_pages :                  \
P
Philipp Hahn 已提交
517
     (hv_versions.dom_interface == 5 ?          \
518
      dominfo.v2d5.max_pages :                  \
J
Jim Fehlig 已提交
519
      dominfo.v2.max_pages)))))
520

521
#define XEN_GETDOMAININFO_UUID(dominfo)         \
P
Philipp Hahn 已提交
522
    (hv_versions.hypervisor < 2 ?               \
523
     dominfo.v0.handle :                        \
J
Jim Fehlig 已提交
524 525 526
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.handle :                     \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
527
      dominfo.v2d7.handle :                     \
P
Philipp Hahn 已提交
528
     (hv_versions.dom_interface == 6 ?          \
529
      dominfo.v2d6.handle :                     \
P
Philipp Hahn 已提交
530
     (hv_versions.dom_interface == 5 ?          \
531
      dominfo.v2d5.handle :                     \
J
Jim Fehlig 已提交
532
      dominfo.v2.handle)))))
533

534

535 536 537 538
static int
lock_pages(void *addr, size_t len)
{
#ifdef __linux__
539 540 541 542 543 544 545
    if (mlock(addr, len) < 0) {
        virReportSystemError(errno,
                             _("Unable to lock %zu bytes of memory"),
                             len);
        return -1;
    }
    return 0;
546
#elif defined(__sun)
547
    return 0;
548 549 550 551 552 553 554
#endif
}

static int
unlock_pages(void *addr, size_t len)
{
#ifdef __linux__
555 556 557 558 559 560 561
    if (munlock(addr, len) < 0) {
        virReportSystemError(errno,
                             _("Unable to unlock %zu bytes of memory"),
                             len);
        return -1;
    }
    return 0;
562
#elif defined(__sun)
563
    return 0;
564 565 566
#endif
}

567 568

struct xen_v0_getdomaininfolistop {
569 570 571 572 573
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v0_getdomaininfo *buffer;
    uint32_t  num_domains;
};
574 575 576 577 578 579 580 581 582 583 584
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;


struct xen_v2_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v2_getdomaininfo *buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;

585 586 587 588
/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
struct xen_v2s3_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
589 590
#ifdef __BIG_ENDIAN__
    struct {
591
        int __pad[(sizeof(long long) - sizeof(struct xen_v2d5_getdomaininfo *)) / sizeof(int)];
592 593 594
        struct xen_v2d5_getdomaininfo *v;
    } buffer;
#else
595 596 597 598
    union {
        struct xen_v2d5_getdomaininfo *v;
        uint64_t pad ALIGN_64;
    } buffer;
599
#endif
600 601 602 603
    uint32_t  num_domains;
};
typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;

604

605 606 607 608 609 610 611

struct xen_v0_domainop {
    domid_t   domain;
};
typedef struct xen_v0_domainop xen_v0_domainop;

/*
612
 * The information for a destroydomain system hypercall
613 614 615 616 617 618
 */
#define XEN_V0_OP_DESTROYDOMAIN	9
#define XEN_V1_OP_DESTROYDOMAIN	9
#define XEN_V2_OP_DESTROYDOMAIN	2

/*
619
 * The information for a pausedomain system hypercall
620 621 622 623 624 625
 */
#define XEN_V0_OP_PAUSEDOMAIN	10
#define XEN_V1_OP_PAUSEDOMAIN	10
#define XEN_V2_OP_PAUSEDOMAIN	3

/*
626
 * The information for an unpausedomain system hypercall
627 628 629 630 631 632
 */
#define XEN_V0_OP_UNPAUSEDOMAIN	11
#define XEN_V1_OP_UNPAUSEDOMAIN	11
#define XEN_V2_OP_UNPAUSEDOMAIN	4

/*
E
Eric Blake 已提交
633
 * The information for a setmaxmem system hypercall
634 635 636
 */
#define XEN_V0_OP_SETMAXMEM	28
#define XEN_V1_OP_SETMAXMEM	28
637
#define XEN_V2_OP_SETMAXMEM	11
638 639 640 641 642 643 644 645 646 647 648 649 650

struct xen_v0_setmaxmem {
    domid_t	domain;
    uint64_t	maxmem;
};
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;

struct xen_v2_setmaxmem {
    uint64_t	maxmem;
};
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;

651 652 653 654 655
struct xen_v2d5_setmaxmem {
    uint64_t	maxmem ALIGN_64;
};
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;

656
/*
E
Eric Blake 已提交
657
 * The information for a setmaxvcpu system hypercall
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
 */
#define XEN_V0_OP_SETMAXVCPU	41
#define XEN_V1_OP_SETMAXVCPU	41
#define XEN_V2_OP_SETMAXVCPU	15

struct xen_v0_setmaxvcpu {
    domid_t	domain;
    uint32_t	maxvcpu;
};
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;

struct xen_v2_setmaxvcpu {
    uint32_t	maxvcpu;
};
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;

/*
E
Eric Blake 已提交
676
 * The information for a setvcpumap system hypercall
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
 * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
 * hence the difference in structures
 */
#define XEN_V0_OP_SETVCPUMAP	20
#define XEN_V1_OP_SETVCPUMAP	20
#define XEN_V2_OP_SETVCPUMAP	9

struct xen_v0_setvcpumap {
    domid_t	domain;
    uint32_t	vcpu;
    cpumap_t    cpumap;
};
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;

struct xen_v2_cpumap {
    uint8_t    *bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2_cpumap cpumap;
};
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;

702 703
/* HV version 2, Dom version 5 requires 64-bit alignment */
struct xen_v2d5_cpumap {
704 705
#ifdef __BIG_ENDIAN__
    struct {
706
        int __pad[(sizeof(long long) - sizeof(uint8_t *)) / sizeof(int)];
707 708 709
        uint8_t *v;
    } bitmap;
#else
710 711 712 713
    union {
        uint8_t    *v;
        uint64_t   pad ALIGN_64;
    } bitmap;
714
#endif
715 716 717 718 719 720 721 722
    uint32_t    nr_cpus;
};
struct xen_v2d5_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2d5_cpumap cpumap;
};
typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;

723
/*
E
Eric Blake 已提交
724
 * The information for a vcpuinfo system hypercall
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
 */
#define XEN_V0_OP_GETVCPUINFO   43
#define XEN_V1_OP_GETVCPUINFO	43
#define XEN_V2_OP_GETVCPUINFO   14

struct xen_v0_vcpuinfo {
    domid_t	domain;		/* owner's domain */
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
    cpumap_t	cpumap;		/* deprecated in V2 */
};
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;

struct xen_v2_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;

753 754 755 756 757 758 759 760 761 762
struct xen_v2d5_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time ALIGN_64; /* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;

763 764 765 766 767
/*
 * from V2 the pinning of a vcpu is read with a separate call
 */
#define XEN_V2_OP_GETVCPUMAP	25
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
768
typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
769

770 771 772 773 774
/*
 * from V2 we get the scheduler information
 */
#define XEN_V2_OP_GETSCHEDULERID	4

775 776 777
/*
 * from V2 we get the available heap information
 */
E
Eric Blake 已提交
778
#define XEN_V2_OP_GETAVAILHEAP		9
779

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
/*
 * from V2 we get the scheduler parameter
 */
#define XEN_V2_OP_SCHEDULER		16
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF       4
#define XEN_SCHEDULER_CREDIT     5
/* get/set scheduler parameters */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1

struct xen_v2_setschedinfo {
    uint32_t sched_id;
    uint32_t cmd;
    union {
        struct xen_domctl_sched_sedf {
            uint64_t period ALIGN_64;
            uint64_t slice  ALIGN_64;
            uint64_t latency ALIGN_64;
            uint32_t extratime;
            uint32_t weight;
        } sedf;
        struct xen_domctl_sched_credit {
            uint16_t weight;
            uint16_t cap;
        } credit;
    } u;
};
typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;


812 813 814 815 816 817 818 819 820
/*
 * The hypercall operation structures also have changed on
 * changeset 86d26e6ec89b
 */
/* the old structure */
struct xen_op_v0 {
    uint32_t cmd;
    uint32_t interface_version;
    union {
821 822 823 824 825 826 827
        xen_v0_getdomaininfolistop getdomaininfolist;
        xen_v0_domainop          domain;
        xen_v0_setmaxmem         setmaxmem;
        xen_v0_setmaxvcpu        setmaxvcpu;
        xen_v0_setvcpumap        setvcpumap;
        xen_v0_vcpuinfo          getvcpuinfo;
        uint8_t padding[128];
828 829 830 831 832 833 834 835 836 837
    } u;
};
typedef struct xen_op_v0 xen_op_v0;
typedef struct xen_op_v0 xen_op_v1;

/* the new structure for systems operations */
struct xen_op_v2_sys {
    uint32_t cmd;
    uint32_t interface_version;
    union {
838 839
        xen_v2_getdomaininfolistop   getdomaininfolist;
        xen_v2s3_getdomaininfolistop getdomaininfolists3;
840
        xen_v2_getschedulerid        getschedulerid;
841
        xen_v2s4_availheap           availheap;
842
        xen_v2s5_availheap           availheap5;
843
        uint8_t padding[128];
844 845 846 847 848 849 850 851 852 853
    } u;
};
typedef struct xen_op_v2_sys xen_op_v2_sys;

/* the new structure for domains operation */
struct xen_op_v2_dom {
    uint32_t cmd;
    uint32_t interface_version;
    domid_t  domain;
    union {
854
        xen_v2_setmaxmem         setmaxmem;
855
        xen_v2d5_setmaxmem       setmaxmemd5;
856 857
        xen_v2_setmaxvcpu        setmaxvcpu;
        xen_v2_setvcpumap        setvcpumap;
858
        xen_v2d5_setvcpumap      setvcpumapd5;
859
        xen_v2_vcpuinfo          getvcpuinfo;
860
        xen_v2d5_vcpuinfo        getvcpuinfod5;
861
        xen_v2_getvcpumap        getvcpumap;
862
        xen_v2d5_getvcpumap      getvcpumapd5;
863 864
        xen_v2_setschedinfo      setschedinfo;
        xen_v2_getschedinfo      getschedinfo;
865
        uint8_t padding[128];
866 867 868
    } u;
};
typedef struct xen_op_v2_dom xen_op_v2_dom;
869 870


871
#ifdef __linux__
872 873
# define XEN_HYPERVISOR_SOCKET	"/proc/xen/privcmd"
# define HYPERVISOR_CAPABILITIES	"/sys/hypervisor/properties/capabilities"
874
#elif defined(__sun)
875
# define XEN_HYPERVISOR_SOCKET	"/dev/xen/privcmd"
876
#else
877
# error "unsupported platform"
878
#endif
879

880
static unsigned long long xenHypervisorGetMaxMemory(virDomainPtr domain);
881

882
struct xenUnifiedDriver xenHypervisorDriver = {
E
Eric Blake 已提交
883 884 885 886 887 888 889 890 891 892 893 894
    .xenDomainSuspend = xenHypervisorPauseDomain,
    .xenDomainResume = xenHypervisorResumeDomain,
    .xenDomainDestroyFlags = xenHypervisorDestroyDomainFlags,
    .xenDomainGetOSType = xenHypervisorDomainGetOSType,
    .xenDomainGetMaxMemory = xenHypervisorGetMaxMemory,
    .xenDomainSetMaxMemory = xenHypervisorSetMaxMemory,
    .xenDomainGetInfo = xenHypervisorGetDomainInfo,
    .xenDomainPinVcpu = xenHypervisorPinVcpu,
    .xenDomainGetVcpus = xenHypervisorGetVcpus,
    .xenDomainGetSchedulerType = xenHypervisorGetSchedulerType,
    .xenDomainGetSchedulerParameters = xenHypervisorGetSchedulerParameters,
    .xenDomainSetSchedulerParameters = xenHypervisorSetSchedulerParameters,
895 896
};

897 898 899
/**
 * xenHypervisorDoV0Op:
 * @handle: the handle to the Xen hypervisor
R
Richard W.M. Jones 已提交
900
 * @op: pointer to the hypervisor operation structure
901
 *
E
Eric Blake 已提交
902 903
 * Do a hypervisor operation though the old interface,
 * this leads to a hypervisor call through ioctl.
904 905 906 907 908 909 910 911 912 913
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
{
    int ret;
    v0_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
914
    op->interface_version = hv_versions.hv << 8;
915 916 917
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

918
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
919
        return -1;
920 921 922

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
923 924 925
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
926 927
    }

928
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
929 930 931
        ret = -1;

    if (ret < 0)
932
        return -1;
933

934
    return 0;
935 936 937 938
}
/**
 * xenHypervisorDoV1Op:
 * @handle: the handle to the Xen hypervisor
R
Richard W.M. Jones 已提交
939
 * @op: pointer to the hypervisor operation structure
940
 *
E
Eric Blake 已提交
941
 * Do a hypervisor v1 operation, this leads to a hypervisor call through
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
 * ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

957
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
958
        return -1;
959 960 961

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
962 963 964
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
965 966
    }

967
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
968 969 970
        ret = -1;

    if (ret < 0)
971
        return -1;
972

973
    return 0;
974 975 976 977 978 979 980
}

/**
 * xenHypervisorDoV2Sys:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor operation structure
 *
E
Eric Blake 已提交
981
 * Do a hypervisor v2 system operation, this leads to a hypervisor
982 983 984 985 986 987 988 989 990 991 992
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
993
    op->interface_version = hv_versions.sys_interface;
994 995 996
    hc.op = __HYPERVISOR_sysctl;
    hc.arg[0] = (unsigned long) op;

997
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
998
        return -1;
999 1000 1001

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
1002 1003 1004
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
1005 1006
    }

1007
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
1008 1009 1010
        ret = -1;

    if (ret < 0)
1011
        return -1;
1012

1013
    return 0;
1014 1015 1016 1017 1018 1019 1020
}

/**
 * xenHypervisorDoV2Dom:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor domain operation structure
 *
E
Eric Blake 已提交
1021
 * Do a hypervisor v2 domain operation, this leads to a hypervisor
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
1033
    op->interface_version = hv_versions.dom_interface;
1034 1035 1036
    hc.op = __HYPERVISOR_domctl;
    hc.arg[0] = (unsigned long) op;

1037
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
1038
        return -1;
1039 1040 1041

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
1042 1043 1044
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
1045 1046
    }

1047
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
1048 1049 1050
        ret = -1;

    if (ret < 0)
1051
        return -1;
1052

1053
    return 0;
1054 1055 1056 1057 1058 1059 1060 1061 1062
}

/**
 * virXen_getdomaininfolist:
 * @handle: the hypervisor handle
 * @first_domain: first domain in the range
 * @maxids: maximum number of domains to list
 * @dominfos: output structures
 *
1063
 * Do a low level hypercall to list existing domains information
1064 1065 1066 1067
 *
 * Returns the number of domains or -1 in case of failure
 */
static int
1068 1069 1070
virXen_getdomaininfolist(int handle,
                         int first_domain,
                         int maxids,
1071
                         xen_getdomaininfolist *dominfos)
1072 1073 1074
{
    int ret = -1;

1075
    if (lock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1076
                   XEN_GETDOMAININFO_SIZE * maxids) < 0)
1077
        return -1;
1078

P
Philipp Hahn 已提交
1079
    if (hv_versions.hypervisor > 1) {
1080 1081 1082
        xen_op_v2_sys op;

        memset(&op, 0, sizeof(op));
1083
        op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
1084

P
Philipp Hahn 已提交
1085
        if (hv_versions.sys_interface < 3) {
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
            op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolist.max_domains = maxids;
            op.u.getdomaininfolist.buffer = dominfos->v2;
            op.u.getdomaininfolist.num_domains = maxids;
        } else {
            op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolists3.max_domains = maxids;
            op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
            op.u.getdomaininfolists3.num_domains = maxids;
        }
1096
        ret = xenHypervisorDoV2Sys(handle, &op);
1097 1098

        if (ret == 0) {
P
Philipp Hahn 已提交
1099
            if (hv_versions.sys_interface < 3)
1100 1101 1102 1103
                ret = op.u.getdomaininfolist.num_domains;
            else
                ret = op.u.getdomaininfolists3.num_domains;
        }
P
Philipp Hahn 已提交
1104
    } else if (hv_versions.hypervisor == 1) {
1105 1106 1107
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1108 1109 1110 1111 1112 1113 1114 1115
        op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV1Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
P
Philipp Hahn 已提交
1116
    } else if (hv_versions.hypervisor == 0) {
1117 1118 1119
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1120 1121 1122 1123 1124 1125 1126 1127
        op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV0Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
1128
    }
1129
    if (unlock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1130
                     XEN_GETDOMAININFO_SIZE * maxids) < 0)
1131
        ret = -1;
1132

1133
    return ret;
1134 1135
}

1136
static int
1137 1138
virXen_getdomaininfo(int handle, int first_domain, xen_getdomaininfo *dominfo)
{
1139 1140
    xen_getdomaininfolist dominfos;

P
Philipp Hahn 已提交
1141
    if (hv_versions.hypervisor < 2) {
1142 1143 1144 1145 1146 1147 1148 1149 1150
        dominfos.v0 = &(dominfo->v0);
    } else {
        dominfos.v2 = &(dominfo->v2);
    }

    return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
}


1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
/**
 * xenHypervisorGetSchedulerType:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler parameters.
 *
 * Do a low level hypercall to get scheduler type
 *
 * Returns scheduler name or NULL in case of failure
 */
char *
xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
{
    char *schedulertype = NULL;
1164
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
1165

1166
    if (domain->id < 0) {
1167 1168
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1169 1170 1171 1172
        return NULL;
    }

    /*
P
Philipp Hahn 已提交
1173
     * Support only hv_versions.dom_interface >=5
1174
     * (Xen3.1.0 or later)
1175
     * TODO: check on Xen 3.0.3
1176
     */
P
Philipp Hahn 已提交
1177
    if (hv_versions.dom_interface < 5) {
1178 1179
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1180 1181 1182
        return NULL;
    }

P
Philipp Hahn 已提交
1183
    if (hv_versions.hypervisor > 1) {
1184 1185 1186 1187 1188 1189 1190
        xen_op_v2_sys op;
        int ret;

        memset(&op, 0, sizeof(op));
        op.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op);
        if (ret < 0)
1191
            return NULL;
1192 1193

        switch (op.u.getschedulerid.sched_id){
1194 1195
            case XEN_SCHEDULER_SEDF:
                schedulertype = strdup("sedf");
1196
                if (schedulertype == NULL)
1197
                    virReportOOMError();
1198
                if (nparams)
1199
                    *nparams = XEN_SCHED_SEDF_NPARAM;
1200 1201 1202
                break;
            case XEN_SCHEDULER_CREDIT:
                schedulertype = strdup("credit");
1203
                if (schedulertype == NULL)
1204
                    virReportOOMError();
1205
                if (nparams)
1206
                    *nparams = XEN_SCHED_CRED_NPARAM;
1207 1208 1209
                break;
            default:
                break;
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
        }
    }

    return schedulertype;
}

/**
 * xenHypervisorGetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @params: pointer to scheduler parameters.
 *     This memory area should be allocated before calling.
1221 1222
 * @nparams: this parameter must be at least as large as
 *     the given number of scheduler parameters.
1223 1224 1225 1226 1227 1228 1229 1230
 *     from xenHypervisorGetSchedulerType().
 *
 * Do a low level hypercall to get scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorGetSchedulerParameters(virDomainPtr domain,
1231 1232
                                    virTypedParameterPtr params,
                                    int *nparams)
1233
{
1234
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
1235

1236

1237
    if (domain->id < 0) {
1238 1239
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1240 1241 1242 1243
        return -1;
    }

    /*
P
Philipp Hahn 已提交
1244
     * Support only hv_versions.dom_interface >=5
1245 1246 1247
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
P
Philipp Hahn 已提交
1248
    if (hv_versions.dom_interface < 5) {
1249 1250
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1251 1252 1253
        return -1;
    }

P
Philipp Hahn 已提交
1254
    if (hv_versions.hypervisor > 1) {
1255 1256 1257 1258 1259 1260 1261 1262
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret < 0)
1263
            return -1;
1264 1265

        switch (op_sys.u.getschedulerid.sched_id){
1266
            case XEN_SCHEDULER_SEDF:
1267
                if (*nparams < XEN_SCHED_SEDF_NPARAM) {
1268 1269
                    virReportError(VIR_ERR_INVALID_ARG,
                                   "%s", _("Invalid parameter count"));
1270 1271 1272
                    return -1;
                }

1273 1274
                /* TODO: Implement for Xen/SEDF */
                TODO
1275
                return -1;
1276 1277 1278 1279 1280 1281 1282 1283
            case XEN_SCHEDULER_CREDIT:
                memset(&op_dom, 0, sizeof(op_dom));
                op_dom.cmd = XEN_V2_OP_SCHEDULER;
                op_dom.domain = (domid_t) domain->id;
                op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
                op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
                ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
                if (ret < 0)
1284
                    return -1;
1285

1286 1287 1288 1289
                if (virTypedParameterAssign(&params[0],
                                            VIR_DOMAIN_SCHEDULER_WEIGHT,
                                            VIR_TYPED_PARAM_UINT,
                                            op_dom.u.getschedinfo.u.credit.weight) < 0)
C
Chris Lalancette 已提交
1290
                    return -1;
1291 1292 1293 1294 1295 1296

                if (*nparams > 1 &&
                    virTypedParameterAssign(&params[1],
                                            VIR_DOMAIN_SCHEDULER_CAP,
                                            VIR_TYPED_PARAM_UINT,
                                            op_dom.u.getschedinfo.u.credit.cap) < 0)
1297
                        return -1;
1298

1299 1300
                if (*nparams > XEN_SCHED_CRED_NPARAM)
                    *nparams = XEN_SCHED_CRED_NPARAM;
1301 1302
                break;
            default:
1303 1304 1305
                virReportError(VIR_ERR_INVALID_ARG,
                               _("Unknown scheduler %d"),
                               op_sys.u.getschedulerid.sched_id);
1306
                return -1;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
        }
    }

    return 0;
}

/**
 * xenHypervisorSetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler setting parameters .
 *
 * Do a low level hypercall to set scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorSetSchedulerParameters(virDomainPtr domain,
1324 1325
                                    virTypedParameterPtr params,
                                    int nparams)
1326 1327
{
    int i;
1328
    unsigned int val;
1329
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
1330
    char buf[256];
1331

1332 1333 1334
    if (nparams == 0) {
        /* nothing to do, exit early */
        return 0;
1335 1336
    }

1337 1338 1339 1340 1341 1342 1343 1344
    if (virTypedParameterArrayValidate(params, nparams,
                                       VIR_DOMAIN_SCHEDULER_WEIGHT,
                                       VIR_TYPED_PARAM_UINT,
                                       VIR_DOMAIN_SCHEDULER_CAP,
                                       VIR_TYPED_PARAM_UINT,
                                       NULL) < 0)
        return -1;

1345
    if (domain->id < 0) {
1346 1347
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1348 1349 1350 1351
        return -1;
    }

    /*
P
Philipp Hahn 已提交
1352
     * Support only hv_versions.dom_interface >=5
1353 1354 1355
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
P
Philipp Hahn 已提交
1356
    if (hv_versions.dom_interface < 5) {
1357 1358
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1359 1360 1361
        return -1;
    }

P
Philipp Hahn 已提交
1362
    if (hv_versions.hypervisor > 1) {
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret == -1) return -1;

        switch (op_sys.u.getschedulerid.sched_id){
        case XEN_SCHEDULER_SEDF:
            /* TODO: Implement for Xen/SEDF */
            TODO
1376
            return -1;
1377 1378 1379 1380 1381 1382 1383 1384
        case XEN_SCHEDULER_CREDIT: {
            memset(&op_dom, 0, sizeof(op_dom));
            op_dom.cmd = XEN_V2_OP_SCHEDULER;
            op_dom.domain = (domid_t) domain->id;
            op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
            op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;

            /*
1385 1386
             * credit scheduler parameters
             * following values do not change the parameters
1387 1388 1389 1390 1391
             */
            op_dom.u.getschedinfo.u.credit.weight = 0;
            op_dom.u.getschedinfo.u.credit.cap    = (uint16_t)~0U;

            for (i = 0; i < nparams; i++) {
1392
                memset(&buf, 0, sizeof(buf));
1393
                if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT)) {
1394 1395
                    val = params[i].value.ui;
                    if ((val < 1) || (val > USHRT_MAX)) {
1396 1397 1398
                        virReportError(VIR_ERR_INVALID_ARG,
                                       _("Credit scheduler weight parameter (%d) "
                                         "is out of range (1-65535)"), val);
1399
                        return -1;
1400
                    }
1401
                    op_dom.u.getschedinfo.u.credit.weight = val;
1402
                } else if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_CAP)) {
1403
                    val = params[i].value.ui;
1404
                    if (val >= USHRT_MAX) {
1405 1406 1407
                        virReportError(VIR_ERR_INVALID_ARG,
                                       _("Credit scheduler cap parameter (%d) is "
                                         "out of range (0-65534)"), val);
1408
                        return -1;
1409
                    }
1410
                    op_dom.u.getschedinfo.u.credit.cap = val;
1411
                }
1412 1413 1414 1415
            }

            ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
            if (ret < 0)
1416
                return -1;
1417
            break;
1418
        }
1419
        default:
1420 1421 1422
            virReportError(VIR_ERR_INVALID_ARG,
                           _("Unknown scheduler %d"),
                           op_sys.u.getschedulerid.sched_id);
1423 1424 1425
            return -1;
        }
    }
1426

1427 1428 1429
    return 0;
}

1430 1431

int
1432 1433 1434
xenHypervisorDomainBlockStats(virDomainPtr dom,
                              const char *path,
                              struct _virDomainBlockStats *stats)
1435
{
1436
#ifdef __linux__
1437
    xenUnifiedPrivatePtr priv = dom->conn->privateData;
D
Daniel P. Berrange 已提交
1438
    int ret;
1439

D
Daniel P. Berrange 已提交
1440 1441
    xenUnifiedLock(priv);
    /* Need to lock because it hits the xenstore handle :-( */
1442
    ret = xenLinuxDomainBlockStats(priv, dom, path, stats);
D
Daniel P. Berrange 已提交
1443 1444
    xenUnifiedUnlock(priv);
    return ret;
1445
#else
1446 1447
    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                   _("block statistics not supported on this platform"));
1448
    return -1;
1449
#endif
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
}

/* Paths have the form vif<domid>.<n> (this interface checks that
 * <domid> is the real domain ID and returns an error if not).
 *
 * In future we may allow you to query bridge stats (virbrX or
 * xenbrX), but that will probably be through a separate
 * virNetwork interface, as yet not decided.
 */
int
1460 1461 1462
xenHypervisorDomainInterfaceStats(virDomainPtr dom,
                                  const char *path,
                                  struct _virDomainInterfaceStats *stats)
1463
{
1464
#ifdef __linux__
1465 1466
    int rqdomid, device;

1467 1468 1469
    /* Verify that the vif requested is one belonging to the current
     * domain.
     */
1470
    if (sscanf(path, "vif%d.%d", &rqdomid, &device) != 2) {
1471 1472
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid path, should be vif<domid>.<n>."));
1473 1474 1475
        return -1;
    }
    if (rqdomid != dom->id) {
1476 1477
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid path, vif<domid> should match this domain ID"));
1478 1479 1480
        return -1;
    }

1481
    return linuxDomainInterfaceStats(path, stats);
1482
#else
1483 1484
    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                   _("/proc/net/dev: Interface not found"));
1485
    return -1;
1486
#endif
1487 1488
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
/**
 * virXen_pausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to pause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1499
virXen_pausedomain(int handle, int id)
1500 1501 1502
{
    int ret = -1;

P
Philipp Hahn 已提交
1503
    if (hv_versions.hypervisor > 1) {
1504 1505 1506
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1507 1508 1509
        op.cmd = XEN_V2_OP_PAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1510
    } else if (hv_versions.hypervisor == 1) {
1511 1512 1513
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1514 1515 1516
        op.cmd = XEN_V1_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1517
    } else if (hv_versions.hypervisor == 0) {
1518 1519 1520
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1521 1522 1523
        op.cmd = XEN_V0_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1524
    }
1525
    return ret;
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
}

/**
 * virXen_unpausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to unpause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1538
virXen_unpausedomain(int handle, int id)
1539 1540 1541
{
    int ret = -1;

P
Philipp Hahn 已提交
1542
    if (hv_versions.hypervisor > 1) {
1543 1544 1545
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1546 1547 1548
        op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1549
    } else if (hv_versions.hypervisor == 1) {
1550 1551 1552
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1553 1554 1555
        op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1556
    } else if (hv_versions.hypervisor == 0) {
1557 1558 1559
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1560 1561 1562
        op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1563
    }
1564
    return ret;
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
}

/**
 * virXen_destroydomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to destroy the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1577
virXen_destroydomain(int handle, int id)
1578 1579 1580
{
    int ret = -1;

P
Philipp Hahn 已提交
1581
    if (hv_versions.hypervisor > 1) {
1582 1583 1584
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1585 1586 1587
        op.cmd = XEN_V2_OP_DESTROYDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1588
    } else if (hv_versions.hypervisor == 1) {
1589 1590 1591
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1592 1593 1594
        op.cmd = XEN_V1_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1595
    } else if (hv_versions.hypervisor == 0) {
1596 1597 1598
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1599 1600 1601
        op.cmd = XEN_V0_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1602
    }
1603
    return ret;
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
}

/**
 * virXen_setmaxmem:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @memory: the amount of memory in kilobytes
 *
 * Do a low level hypercall to change the max memory amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1617
virXen_setmaxmem(int handle, int id, unsigned long memory)
1618 1619 1620
{
    int ret = -1;

P
Philipp Hahn 已提交
1621
    if (hv_versions.hypervisor > 1) {
1622 1623 1624
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1625 1626
        op.cmd = XEN_V2_OP_SETMAXMEM;
        op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1627
        if (hv_versions.dom_interface < 5)
1628 1629 1630
            op.u.setmaxmem.maxmem = memory;
        else
            op.u.setmaxmemd5.maxmem = memory;
1631
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1632
    } else if (hv_versions.hypervisor == 1) {
1633 1634 1635
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1636 1637 1638 1639
        op.cmd = XEN_V1_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1640
    } else if (hv_versions.hypervisor == 0) {
1641
        xen_op_v0 op;
1642 1643

        memset(&op, 0, sizeof(op));
1644 1645 1646 1647
        op.cmd = XEN_V0_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV0Op(handle, &op);
1648
    }
1649
    return ret;
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
}

/**
 * virXen_setmaxvcpus:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpus: the numbers of vcpus
 *
 * Do a low level hypercall to change the max vcpus amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1663
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
1664 1665 1666
{
    int ret = -1;

P
Philipp Hahn 已提交
1667
    if (hv_versions.hypervisor > 1) {
1668 1669 1670
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1671 1672 1673 1674
        op.cmd = XEN_V2_OP_SETMAXVCPU;
        op.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1675
    } else if (hv_versions.hypervisor == 1) {
1676 1677 1678
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1679 1680 1681 1682
        op.cmd = XEN_V1_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1683
    } else if (hv_versions.hypervisor == 0) {
1684
        xen_op_v0 op;
1685 1686

        memset(&op, 0, sizeof(op));
1687 1688 1689 1690
        op.cmd = XEN_V0_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV0Op(handle, &op);
1691
    }
1692
    return ret;
1693 1694 1695 1696 1697 1698 1699 1700
}

/**
 * virXen_setvcpumap:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
1701
 * @maplen: the size of the bitmap in bytes
1702 1703 1704 1705 1706 1707
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
1708 1709 1710 1711 1712
virXen_setvcpumap(int handle,
                  int id,
                  unsigned int vcpu,
                  unsigned char * cpumap,
                  int maplen)
1713 1714
{
    int ret = -1;
1715 1716 1717
    unsigned char *new = NULL;
    unsigned char *bitmap = NULL;
    uint32_t nr_cpus;
1718

P
Philipp Hahn 已提交
1719
    if (hv_versions.hypervisor > 1) {
1720 1721
        xen_op_v2_dom op;

1722
        if (lock_pages(cpumap, maplen) < 0)
1723
            return -1;
1724

1725
        memset(&op, 0, sizeof(op));
1726 1727
        op.cmd = XEN_V2_OP_SETVCPUMAP;
        op.domain = (domid_t) id;
1728 1729 1730 1731

        /* The allocated memory to cpumap must be 'sizeof(uint64_t)' byte *
         * for Xen, and also nr_cpus must be 'sizeof(uint64_t) * 8'       */
        if (maplen < 8) {
1732
            if (VIR_ALLOC_N(new, sizeof(uint64_t)) < 0) {
1733
                virReportOOMError();
1734
                return -1;
1735 1736 1737 1738 1739 1740 1741 1742 1743
            }
            memcpy(new, cpumap, maplen);
            bitmap = new;
            nr_cpus = sizeof(uint64_t) * 8;
        } else {
            bitmap = cpumap;
            nr_cpus = maplen * 8;
        }

P
Philipp Hahn 已提交
1744
        if (hv_versions.dom_interface < 5) {
1745
            op.u.setvcpumap.vcpu = vcpu;
1746 1747
            op.u.setvcpumap.cpumap.bitmap = bitmap;
            op.u.setvcpumap.cpumap.nr_cpus = nr_cpus;
1748 1749
        } else {
            op.u.setvcpumapd5.vcpu = vcpu;
1750 1751
            op.u.setvcpumapd5.cpumap.bitmap.v = bitmap;
            op.u.setvcpumapd5.cpumap.nr_cpus = nr_cpus;
1752
        }
1753
        ret = xenHypervisorDoV2Dom(handle, &op);
1754
        VIR_FREE(new);
1755

1756
        if (unlock_pages(cpumap, maplen) < 0)
1757
            ret = -1;
1758
    } else {
1759
        cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
E
Eric Blake 已提交
1760
        char buf[8] = "";
1761

E
Eric Blake 已提交
1762
        if (maplen > sizeof(cpumap_t) || sizeof(cpumap_t) != sizeof(uint64_t))
1763
            return -1;
E
Eric Blake 已提交
1764 1765 1766
        /* Supply trailing 0s if user's input array was short */
        memcpy(buf, cpumap, maplen);
        xen_cpumap = virReadBufInt64LE(buf);
1767

P
Philipp Hahn 已提交
1768
        if (hv_versions.hypervisor == 1) {
1769 1770 1771 1772 1773 1774 1775 1776
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1777
        } else if (hv_versions.hypervisor == 0) {
1778 1779 1780 1781 1782 1783 1784 1785 1786
            xen_op_v0 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV0Op(handle, &op);
        }
1787
    }
1788
    return ret;
1789
}
1790

1791

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
/**
 * virXen_getvcpusinfo:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
 * @maplen: the size of the bitmap in bytes
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
1805 1806 1807 1808 1809 1810
virXen_getvcpusinfo(int handle,
                    int id,
                    unsigned int vcpu,
                    virVcpuInfoPtr ipt,
                    unsigned char *cpumap,
                    int maplen)
1811 1812 1813
{
    int ret = -1;

P
Philipp Hahn 已提交
1814
    if (hv_versions.hypervisor > 1) {
1815 1816 1817
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1818 1819
        op.cmd = XEN_V2_OP_GETVCPUINFO;
        op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1820
        if (hv_versions.dom_interface < 5)
1821 1822 1823
            op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
        else
            op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
1824
        ret = xenHypervisorDoV2Dom(handle, &op);
1825

1826
        if (ret < 0)
1827
            return -1;
1828
        ipt->number = vcpu;
P
Philipp Hahn 已提交
1829
        if (hv_versions.dom_interface < 5) {
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
        } else {
            if (op.u.getvcpuinfod5.online) {
                if (op.u.getvcpuinfod5.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfod5.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
            ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
1851 1852
        }
        if ((cpumap != NULL) && (maplen > 0)) {
1853
            if (lock_pages(cpumap, maplen) < 0)
1854
                return -1;
1855

1856
            memset(cpumap, 0, maplen);
1857 1858 1859
            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V2_OP_GETVCPUMAP;
            op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1860
            if (hv_versions.dom_interface < 5) {
1861 1862 1863 1864 1865 1866 1867 1868
                op.u.getvcpumap.vcpu = vcpu;
                op.u.getvcpumap.cpumap.bitmap = cpumap;
                op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
            } else {
                op.u.getvcpumapd5.vcpu = vcpu;
                op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
                op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
            }
1869
            ret = xenHypervisorDoV2Dom(handle, &op);
1870
            if (unlock_pages(cpumap, maplen) < 0)
1871 1872
                ret = -1;
        }
1873
    } else {
1874 1875 1876 1877 1878
        int mapl = maplen;
        int cpu;

        if (maplen > (int)sizeof(cpumap_t))
            mapl = (int)sizeof(cpumap_t);
1879

P
Philipp Hahn 已提交
1880
        if (hv_versions.hypervisor == 1) {
1881 1882 1883 1884 1885 1886 1887 1888
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV1Op(handle, &op);
            if (ret < 0)
1889
                return -1;
1890
            ipt->number = vcpu;
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
P
Philipp Hahn 已提交
1904
        } else if (hv_versions.hypervisor == 0) {
1905 1906 1907 1908 1909 1910 1911 1912
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV0Op(handle, &op);
            if (ret < 0)
1913
                return -1;
1914
            ipt->number = vcpu;
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        }
1929
    }
1930
    return ret;
1931
}
1932

1933 1934
/**
 * xenHypervisorInit:
P
Philipp Hahn 已提交
1935 1936
 * @override_versions: pointer to optional struct xenHypervisorVersions with
 *     version information used instead of automatic version detection.
1937 1938 1939
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
P
Philipp Hahn 已提交
1940 1941
 *
 * Returns 0 or -1 in case of failure
1942
 */
1943
int
P
Philipp Hahn 已提交
1944
xenHypervisorInit(struct xenHypervisorVersions *override_versions)
1945
{
1946
    int fd, ret, cmd, errcode;
1947
    hypercall_t hc;
1948
    v0_hypercall_t v0_hc;
1949
    xen_getdomaininfo info;
D
Daniel Veillard 已提交
1950
    virVcpuInfoPtr ipt = NULL;
1951

1952 1953 1954 1955
    /* Compile regular expressions used by xenHypervisorGetCapabilities.
     * Note that errors here are really internal errors since these
     * regexps should never fail to compile.
     */
1956
    errcode = regcomp(&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
1957 1958
    if (errcode != 0) {
        char error[100];
1959 1960
        regerror(errcode, &flags_hvm_rec, error, sizeof(error));
        regfree(&flags_hvm_rec);
1961
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1962 1963
        return -1;
    }
1964
    errcode = regcomp(&flags_pae_rec, flags_pae_re, REG_EXTENDED);
1965 1966
    if (errcode != 0) {
        char error[100];
1967 1968 1969
        regerror(errcode, &flags_pae_rec, error, sizeof(error));
        regfree(&flags_pae_rec);
        regfree(&flags_hvm_rec);
1970
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1971 1972
        return -1;
    }
1973
    errcode = regcomp(&xen_cap_rec, xen_cap_re, REG_EXTENDED);
1974 1975
    if (errcode != 0) {
        char error[100];
1976 1977 1978 1979
        regerror(errcode, &xen_cap_rec, error, sizeof(error));
        regfree(&xen_cap_rec);
        regfree(&flags_pae_rec);
        regfree(&flags_hvm_rec);
1980
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1981 1982 1983
        return -1;
    }

P
Philipp Hahn 已提交
1984 1985 1986 1987 1988
    if (override_versions) {
      hv_versions = *override_versions;
      return 0;
    }

1989
    /* Xen hypervisor version detection begins. */
1990 1991
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
P
Philipp Hahn 已提交
1992
        hv_versions.hypervisor = -1;
1993
        return -1;
1994 1995 1996
    }
    fd = ret;

1997 1998 1999 2000
    /*
     * The size of the hypervisor call block changed July 2006
     * this detect if we are using the new or old hypercall_t structure
     */
2001 2002 2003 2004 2005 2006 2007 2008
    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
2009
        VIR_DEBUG("Using new hypervisor call: %X", ret);
P
Philipp Hahn 已提交
2010
        hv_versions.hv = ret;
2011 2012
        xen_ioctl_hypercall_cmd = cmd;
        goto detect_v2;
2013
    }
2014

2015
#ifndef __sun
2016 2017 2018 2019 2020 2021 2022 2023
    /*
     * check if the old hypercall are actually working
     */
    v0_hc.op = __HYPERVISOR_xen_version;
    v0_hc.arg[0] = (unsigned long) XENVER_version;
    v0_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
2024
    if ((ret != -1) && (ret != 0)) {
2025
        VIR_DEBUG("Using old hypervisor call: %X", ret);
P
Philipp Hahn 已提交
2026
        hv_versions.hv = ret;
2027
        xen_ioctl_hypercall_cmd = cmd;
P
Philipp Hahn 已提交
2028
        hv_versions.hypervisor = 0;
2029
        goto done;
2030
    }
2031
#endif
2032

2033
    /*
R
Richard W.M. Jones 已提交
2034
     * we failed to make any hypercall
2035 2036
     */

P
Philipp Hahn 已提交
2037
    hv_versions.hypervisor = -1;
2038 2039 2040
    virReportSystemError(errno,
                         _("Unable to issue hypervisor ioctl %lu"),
                         (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
2041
    VIR_FORCE_CLOSE(fd);
2042
    return -1;
2043

2044
 detect_v2:
2045 2046 2047 2048 2049
    /*
     * The hypercalls were refactored into 3 different section in August 2006
     * Try to detect if we are running a version post 3.0.2 with the new ones
     * or the old ones
     */
P
Philipp Hahn 已提交
2050
    hv_versions.hypervisor = 2;
2051

2052
    if (VIR_ALLOC(ipt) < 0) {
2053
        virReportOOMError();
2054
        return -1;
2055
    }
2056
    /* Currently consider RHEL5.0 Fedora7, xen-3.1, and xen-unstable */
P
Philipp Hahn 已提交
2057
    hv_versions.sys_interface = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
2058
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2059
        /* RHEL 5.0 */
P
Philipp Hahn 已提交
2060
        hv_versions.dom_interface = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
2061
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2062
            VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver3");
2063 2064 2065
            goto done;
        }
        /* Fedora 7 */
P
Philipp Hahn 已提交
2066
        hv_versions.dom_interface = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
2067
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2068
            VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver4");
2069 2070 2071 2072
            goto done;
        }
    }

P
Philipp Hahn 已提交
2073
    hv_versions.sys_interface = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
2074
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2075
        /* xen-3.1 */
P
Philipp Hahn 已提交
2076
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2077
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2078
            VIR_DEBUG("Using hypervisor call v2, sys ver3 dom ver5");
2079 2080
            goto done;
        }
2081
    }
2082

P
Philipp Hahn 已提交
2083
    hv_versions.sys_interface = 4; /* XEN_SYSCTL_INTERFACE_VERSION */
2084
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2085
        /* Fedora 8 */
P
Philipp Hahn 已提交
2086
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2087
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2088
            VIR_DEBUG("Using hypervisor call v2, sys ver4 dom ver5");
2089 2090 2091 2092
            goto done;
        }
    }

P
Philipp Hahn 已提交
2093
    hv_versions.sys_interface = 6; /* XEN_SYSCTL_INTERFACE_VERSION */
2094 2095
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        /* Xen 3.2, Fedora 9 */
P
Philipp Hahn 已提交
2096
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2097
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2098
            VIR_DEBUG("Using hypervisor call v2, sys ver6 dom ver5");
2099 2100
            goto done;
        }
J
Jim Fehlig 已提交
2101 2102 2103
    }

    /* Xen 4.0 */
P
Philipp Hahn 已提交
2104
    hv_versions.sys_interface = 7; /* XEN_SYSCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2105
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
P
Philipp Hahn 已提交
2106
        hv_versions.dom_interface = 6; /* XEN_DOMCTL_INTERFACE_VERSION */
2107
        VIR_DEBUG("Using hypervisor call v2, sys ver7 dom ver6");
J
Jim Fehlig 已提交
2108
        goto done;
2109 2110
    }

J
Jim Fehlig 已提交
2111 2112 2113
    /* Xen 4.1
     * sysctl version 8 -> xen-unstable c/s 21118:28e5409e3fb3
     * domctl version 7 -> xen-unstable c/s 21212:de94884a669c
J
Jim Fehlig 已提交
2114
     * domctl version 8 -> xen-unstable c/s 23874:651aed73b39c
J
Jim Fehlig 已提交
2115
     */
P
Philipp Hahn 已提交
2116
    hv_versions.sys_interface = 8; /* XEN_SYSCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2117
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
P
Philipp Hahn 已提交
2118
        hv_versions.dom_interface = 7; /* XEN_DOMCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2119 2120 2121 2122 2123 2124 2125 2126 2127
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver7");
            goto done;
        }
        hv_versions.dom_interface = 8; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver8");
            goto done;
        }
J
Jim Fehlig 已提交
2128 2129
    }

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
    /* Xen 4.2
     * sysctl version 9 -> xen-unstable c/s 24102:dc8e55c90604
     * domctl version 8 -> unchanged from Xen 4.1
     */
    hv_versions.sys_interface = 9; /* XEN_SYSCTL_INTERFACE_VERSION */
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        hv_versions.dom_interface = 8; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver9 dom ver8");
            goto done;
        }
    }
2142

J
Jim Fehlig 已提交
2143 2144 2145 2146 2147 2148 2149
    hv_versions.hypervisor = 1;
    hv_versions.sys_interface = -1;
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        VIR_DEBUG("Using hypervisor call v1");
        goto done;
    }

2150
    /*
R
Richard W.M. Jones 已提交
2151
     * we failed to make the getdomaininfolist hypercall
2152
     */
P
Philipp Hahn 已提交
2153
    hv_versions.hypervisor = -1;
2154 2155 2156 2157
    virReportSystemError(errno,
                         _("Unable to issue hypervisor ioctl %lu"),
                         (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
    VIR_DEBUG("Failed to find any Xen hypervisor method");
2158
    VIR_FORCE_CLOSE(fd);
2159
    VIR_FREE(ipt);
2160
    return -1;
2161

2162
 done:
2163
    VIR_FORCE_CLOSE(fd);
2164
    VIR_FREE(ipt);
2165
    return 0;
2166 2167
}

2168 2169 2170 2171 2172 2173 2174

static int xenHypervisorOnceInit(void) {
    return xenHypervisorInit(NULL);
}

VIR_ONCE_GLOBAL_INIT(xenHypervisor)

2175 2176
/**
 * xenHypervisorOpen:
2177 2178 2179
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
2180 2181 2182
 *
 * Connects to the Xen hypervisor.
 *
2183
 * Returns 0 or -1 in case of error.
2184
 */
2185
int
2186
xenHypervisorOpen(virConnectPtr conn,
2187
                  virConnectAuthPtr auth ATTRIBUTE_UNUSED,
E
Eric Blake 已提交
2188
                  unsigned int flags)
2189
{
2190
    int ret;
2191
    xenUnifiedPrivatePtr priv = conn->privateData;
2192

2193
    virCheckFlags(VIR_CONNECT_RO, -1);
E
Eric Blake 已提交
2194

2195
    if (xenHypervisorInitialize() < 0)
2196
        return -1;
2197

2198
    priv->handle = -1;
2199

2200
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
2201
    if (ret < 0) {
2202
        virReportError(VIR_ERR_NO_XEN, "%s", XEN_HYPERVISOR_SOCKET);
2203
        return -1;
2204
    }
2205 2206

    priv->handle = ret;
2207

2208
    return 0;
2209 2210 2211 2212
}

/**
 * xenHypervisorClose:
2213
 * @conn: pointer to the connection block
2214 2215 2216 2217 2218
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
2219
int
2220
xenHypervisorClose(virConnectPtr conn)
2221
{
2222
    int ret;
2223
    xenUnifiedPrivatePtr priv = conn->privateData;
2224

2225
    ret = VIR_CLOSE(priv->handle);
2226
    if (ret < 0)
2227
        return -1;
2228

2229
    return 0;
2230 2231 2232
}


2233 2234
/**
 * xenHypervisorGetVersion:
2235 2236
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
2237 2238 2239
 *
 * Call the hypervisor to extracts his own internal API version
 *
2240
 * Returns 0 in case of success, -1 in case of error
2241
 */
2242
int
2243
xenHypervisorGetVersion(virConnectPtr conn ATTRIBUTE_UNUSED, unsigned long *hvVer)
2244
{
P
Philipp Hahn 已提交
2245
    *hvVer = (hv_versions.hv >> 16) * 1000000 + (hv_versions.hv & 0xFFFF) * 1000;
2246
    return 0;
2247 2248
}

2249
struct guest_arch {
2250
    virArch arch;
2251 2252 2253 2254 2255 2256 2257 2258
    int hvm;
    int pae;
    int nonpae;
    int ia64_be;
};


static virCapsPtr
2259
xenHypervisorBuildCapabilities(virConnectPtr conn, virArch hostarch,
2260
                               int host_pae,
2261
                               const char *hvm_type,
2262
                               struct guest_arch *guest_archs,
2263 2264
                               int nr_guest_archs)
{
2265 2266
    virCapsPtr caps;
    int i;
P
Philipp Hahn 已提交
2267 2268
    int hv_major = hv_versions.hv >> 16;
    int hv_minor = hv_versions.hv & 0xFFFF;
2269

2270
    if ((caps = virCapabilitiesNew(hostarch, 1, 1)) == NULL)
2271
        goto no_memory;
2272

2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
    if (hvm_type && STRNEQ(hvm_type, "") &&
        virCapabilitiesAddHostFeature(caps, hvm_type) < 0)
        goto no_memory;
    if (host_pae &&
        virCapabilitiesAddHostFeature(caps, "pae") < 0)
        goto no_memory;


    if (virCapabilitiesAddHostMigrateTransport(caps,
                                               "xenmigr") < 0)
        goto no_memory;


P
Philipp Hahn 已提交
2286
    if (hv_versions.sys_interface >= SYS_IFACE_MIN_VERS_NUMA && conn != NULL) {
2287
        if (xenDaemonNodeGetTopology(conn, caps) != 0) {
2288
            virObjectUnref(caps);
2289 2290 2291 2292 2293 2294
            return NULL;
        }
    }

    for (i = 0; i < nr_guest_archs; ++i) {
        virCapsGuestPtr guest;
2295 2296 2297
        char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"};
        virCapsGuestMachinePtr *machines;

2298
        if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
2299
            goto no_memory;
2300 2301 2302

        if ((guest = virCapabilitiesAddGuest(caps,
                                             guest_archs[i].hvm ? "hvm" : "xen",
2303 2304
                                             guest_archs[i].arch,
                                             (hostarch == VIR_ARCH_X86_64 ?
2305 2306 2307 2308 2309 2310
                                              "/usr/lib64/xen/bin/qemu-dm" :
                                              "/usr/lib/xen/bin/qemu-dm"),
                                             (guest_archs[i].hvm ?
                                              "/usr/lib/xen/boot/hvmloader" :
                                              NULL),
                                             1,
2311 2312
                                             machines)) == NULL) {
            virCapabilitiesFreeMachines(machines, 1);
2313
            goto no_memory;
2314
        }
2315
        machines = NULL;
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351

        if (virCapabilitiesAddGuestDomain(guest,
                                          "xen",
                                          NULL,
                                          NULL,
                                          0,
                                          NULL) == NULL)
            goto no_memory;

        if (guest_archs[i].pae &&
            virCapabilitiesAddGuestFeature(guest,
                                           "pae",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].nonpae &&
            virCapabilitiesAddGuestFeature(guest,
                                           "nonpae",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].ia64_be &&
            virCapabilitiesAddGuestFeature(guest,
                                           "ia64_be",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].hvm) {
            if (virCapabilitiesAddGuestFeature(guest,
                                               "acpi",
                                               1, 1) == NULL)
                goto no_memory;

2352
            /* In Xen 3.1.0, APIC is always on and can't be toggled */
2353 2354 2355 2356 2357 2358 2359
            if (virCapabilitiesAddGuestFeature(guest,
                                               "apic",
                                               1,
                                               (hv_major > 3 &&
                                                hv_minor > 0 ?
                                                0 : 1)) == NULL)
                goto no_memory;
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369

            /* Xen 3.3.x and beyond supports enabling/disabling
             * hardware assisted paging.  Default is off.
             */
            if ((hv_major == 3 && hv_minor >= 3) || (hv_major > 3))
                if (virCapabilitiesAddGuestFeature(guest,
                                                   "hap",
                                                   0,
                                                   1) == NULL)
                    goto no_memory;
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379

            /* Xen 3.4.x and beyond supports the Viridian (Hyper-V)
             * enlightenment interface.  Default is off.
             */
            if ((hv_major == 3 && hv_minor >= 4) || (hv_major > 3))
                if (virCapabilitiesAddGuestFeature(guest,
                                                   "viridian",
                                                   0,
                                                   1) == NULL)
                    goto no_memory;
2380
        }
2381

2382 2383 2384 2385 2386
    }

    return caps;

 no_memory:
2387
    virObjectUnref(caps);
2388 2389 2390
    return NULL;
}

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
#ifdef __sun

static int
get_cpu_flags(virConnectPtr conn, const char **hvm, int *pae, int *longmode)
{
    struct {
        uint32_t r_eax, r_ebx, r_ecx, r_edx;
    } regs;

    char tmpbuf[20];
    int ret = 0;
    int fd;

    /* returns -1, errno 22 if in 32-bit mode */
    *longmode = (sysinfo(SI_ARCHITECTURE_64, tmpbuf, sizeof(tmpbuf)) != -1);

    if ((fd = open("/dev/cpu/self/cpuid", O_RDONLY)) == -1 ||
        pread(fd, &regs, sizeof(regs), 0) != sizeof(regs)) {
2409
        virReportSystemError(errno, "%s", _("could not read CPU flags"));
2410 2411 2412 2413 2414 2415 2416
        goto out;
    }

    *pae = 0;
    *hvm = "";

    if (STREQLEN((const char *)&regs.r_ebx, "AuthcAMDenti", 12)) {
2417
        if (pread(fd, &regs, sizeof(regs), 0x80000001) == sizeof(regs)) {
2418 2419 2420 2421 2422 2423 2424 2425
            /* Read secure virtual machine bit (bit 2 of ECX feature ID) */
            if ((regs.r_ecx >> 2) & 1) {
                *hvm = "svm";
            }
            if ((regs.r_edx >> 6) & 1)
                *pae = 1;
        }
    } else if (STREQLEN((const char *)&regs.r_ebx, "GenuntelineI", 12)) {
2426
        if (pread(fd, &regs, sizeof(regs), 0x00000001) == sizeof(regs)) {
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
            /* Read VMXE feature bit (bit 5 of ECX feature ID) */
            if ((regs.r_ecx >> 5) & 1)
                *hvm = "vmx";
            if ((regs.r_edx >> 6) & 1)
                *pae = 1;
        }
    }

    ret = 1;

out:
2438
    VIR_FORCE_CLOSE(fd);
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
    return ret;
}

static virCapsPtr
xenHypervisorMakeCapabilitiesSunOS(virConnectPtr conn)
{
    struct guest_arch guest_arches[32];
    int i = 0;
    virCapsPtr caps = NULL;
    int pae, longmode;
    const char *hvm;

    if (!get_cpu_flags(conn, &hvm, &pae, &longmode))
        return NULL;

2454
    guest_arches[i].arch = VIR_ARCH_I686;
2455 2456 2457 2458 2459 2460 2461
    guest_arches[i].hvm = 0;
    guest_arches[i].pae = pae;
    guest_arches[i].nonpae = !pae;
    guest_arches[i].ia64_be = 0;
    i++;

    if (longmode) {
2462
        guest_arches[i].arch = VIR_ARCH_X86_64;
2463 2464 2465 2466 2467 2468 2469 2470
        guest_arches[i].hvm = 0;
        guest_arches[i].pae = 0;
        guest_arches[i].nonpae = 0;
        guest_arches[i].ia64_be = 0;
        i++;
    }

    if (hvm[0] != '\0') {
2471
        guest_arches[i].arch = VIR_ARCH_I686;
2472 2473 2474 2475 2476 2477 2478
        guest_arches[i].hvm = 1;
        guest_arches[i].pae = pae;
        guest_arches[i].nonpae = 1;
        guest_arches[i].ia64_be = 0;
        i++;

        if (longmode) {
2479
            guest_arches[i].arch = VIR_ARCH_X86_64;
2480 2481 2482 2483 2484 2485 2486 2487 2488
            guest_arches[i].hvm = 1;
            guest_arches[i].pae = 0;
            guest_arches[i].nonpae = 0;
            guest_arches[i].ia64_be = 0;
            i++;
        }
    }

    if ((caps = xenHypervisorBuildCapabilities(conn,
2489
                                               virArchFromHost(),
2490 2491
                                               pae, hvm,
                                               guest_arches, i)) == NULL)
2492
        virReportOOMError();
2493 2494 2495 2496 2497 2498

    return caps;
}

#endif /* __sun */

2499
/**
2500
 * xenHypervisorMakeCapabilitiesInternal:
2501
 * @conn: pointer to the connection block
2502 2503
 * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
 * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
2504 2505 2506
 *
 * Return the capabilities of this hypervisor.
 */
2507
virCapsPtr
2508
xenHypervisorMakeCapabilitiesInternal(virConnectPtr conn,
2509
                                      virArch hostarch,
2510 2511
                                      FILE *cpuinfo,
                                      FILE *capabilities)
2512 2513
{
    char line[1024], *str, *token;
2514
    regmatch_t subs[4];
2515
    char *saveptr = NULL;
2516
    int i;
2517 2518 2519

    char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
    int host_pae = 0;
2520
    struct guest_arch guest_archs[32];
2521
    int nr_guest_archs = 0;
2522
    virCapsPtr caps = NULL;
2523

2524 2525
    memset(guest_archs, 0, sizeof(guest_archs));

2526 2527 2528 2529
    /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
     * It's not clear if this will work on IA64, let alone other
     * architectures and non-Linux. (XXX)
     */
2530
    if (cpuinfo) {
2531 2532
        while (fgets(line, sizeof(line), cpuinfo)) {
            if (regexec(&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
2533
                && subs[0].rm_so != -1) {
C
Chris Lalancette 已提交
2534 2535 2536 2537
                if (virStrncpy(hvm_type,
                               &line[subs[1].rm_so],
                               subs[1].rm_eo-subs[1].rm_so,
                               sizeof(hvm_type)) == NULL)
2538
                    goto no_memory;
2539
            } else if (regexec(&flags_pae_rec, line, 0, NULL, 0) == 0)
2540 2541
                host_pae = 1;
        }
2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
    }

    /* Most of the useful info is in /sys/hypervisor/properties/capabilities
     * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
     *
     * It is a space-separated list of supported guest architectures.
     *
     * For x86:
     *    TYP-VER-ARCH[p]
     *    ^   ^   ^    ^
     *    |   |   |    +-- PAE supported
     *    |   |   +------- x86_32 or x86_64
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     *
     * For PPC this file appears to be always empty (?)
     *
     * For IA64:
     *    TYP-VER-ARCH[be]
     *    ^   ^   ^    ^
     *    |   |   |    +-- Big-endian supported
     *    |   |   +------- always "ia64"
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     */

    /* Expecting one line in this file - ignore any more. */
2569
    if ((capabilities) && (fgets(line, sizeof(line), capabilities))) {
2570 2571 2572 2573
        /* Split the line into tokens.  strtok_r is OK here because we "own"
         * this buffer.  Parse out the features from each token.
         */
        for (str = line, nr_guest_archs = 0;
2574
             nr_guest_archs < sizeof(guest_archs) / sizeof(guest_archs[0])
2575
                 && (token = strtok_r(str, " ", &saveptr)) != NULL;
2576 2577
             str = NULL) {

2578 2579
            if (regexec(&xen_cap_rec, token, sizeof(subs) / sizeof(subs[0]),
                        subs, 0) == 0) {
2580
                int hvm = STRPREFIX(&token[subs[1].rm_so], "hvm");
2581 2582
                int pae = 0, nonpae = 0, ia64_be = 0;
                virArch arch;
2583 2584

                if (STRPREFIX(&token[subs[2].rm_so], "x86_32")) {
2585
                    arch = VIR_ARCH_I686;
2586 2587
                    if (subs[3].rm_so != -1 &&
                        STRPREFIX(&token[subs[3].rm_so], "p"))
2588 2589 2590 2591
                        pae = 1;
                    else
                        nonpae = 1;
                }
2592
                else if (STRPREFIX(&token[subs[2].rm_so], "x86_64")) {
2593
                    arch = VIR_ARCH_X86_64;
2594
                }
2595
                else if (STRPREFIX(&token[subs[2].rm_so], "ia64")) {
2596
                    arch = VIR_ARCH_ITANIUM;
2597 2598
                    if (subs[3].rm_so != -1 &&
                        STRPREFIX(&token[subs[3].rm_so], "be"))
2599 2600
                        ia64_be = 1;
                }
2601
                else if (STRPREFIX(&token[subs[2].rm_so], "powerpc64")) {
2602
                    arch = VIR_ARCH_PPC64;
2603
                } else {
2604
                    /* XXX surely no other Xen archs exist. Arrrrrrrrrm  */
2605 2606
                    continue;
                }
2607

2608 2609
                /* Search for existing matching (model,hvm) tuple */
                for (i = 0 ; i < nr_guest_archs ; i++) {
2610
                    if (guest_archs[i].arch == arch &&
2611 2612 2613 2614
                        guest_archs[i].hvm == hvm) {
                        break;
                    }
                }
2615

2616
                /* Too many arch flavours - highly unlikely ! */
J
Jim Meyering 已提交
2617
                if (i >= ARRAY_CARDINALITY(guest_archs))
2618 2619 2620 2621 2622
                    continue;
                /* Didn't find a match, so create a new one */
                if (i == nr_guest_archs)
                    nr_guest_archs++;

2623
                guest_archs[i].arch = arch;
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
                guest_archs[i].hvm = hvm;

                /* Careful not to overwrite a previous positive
                   setting with a negative one here - some archs
                   can do both pae & non-pae, but Xen reports
                   separately capabilities so we're merging archs */
                if (pae)
                    guest_archs[i].pae = pae;
                if (nonpae)
                    guest_archs[i].nonpae = nonpae;
                if (ia64_be)
                    guest_archs[i].ia64_be = ia64_be;
2636 2637 2638 2639
            }
        }
    }

2640
    if ((caps = xenHypervisorBuildCapabilities(conn,
2641
                                               hostarch,
2642 2643 2644 2645 2646
                                               host_pae,
                                               hvm_type,
                                               guest_archs,
                                               nr_guest_archs)) == NULL)
        goto no_memory;
2647

2648
    return caps;
2649

2650
 no_memory:
2651
    virReportOOMError();
2652
    virObjectUnref(caps);
2653 2654 2655 2656
    return NULL;
}

/**
2657
 * xenHypervisorMakeCapabilities:
2658 2659 2660
 *
 * Return the capabilities of this hypervisor.
 */
2661
virCapsPtr
2662
xenHypervisorMakeCapabilities(virConnectPtr conn)
2663
{
2664 2665 2666
#ifdef __sun
    return xenHypervisorMakeCapabilitiesSunOS(conn);
#else
2667
    virCapsPtr caps = NULL;
2668 2669
    FILE *cpuinfo, *capabilities;

2670
    cpuinfo = fopen("/proc/cpuinfo", "r");
2671 2672
    if (cpuinfo == NULL) {
        if (errno != ENOENT) {
2673
            virReportSystemError(errno,
2674 2675
                                 _("cannot read file %s"),
                                 "/proc/cpuinfo");
2676 2677 2678 2679
            return NULL;
        }
    }

2680
    capabilities = fopen("/sys/hypervisor/properties/capabilities", "r");
2681 2682
    if (capabilities == NULL) {
        if (errno != ENOENT) {
2683
            VIR_FORCE_FCLOSE(cpuinfo);
2684
            virReportSystemError(errno,
2685 2686
                                 _("cannot read file %s"),
                                 "/sys/hypervisor/properties/capabilities");
2687 2688 2689 2690
            return NULL;
        }
    }

2691
    caps = xenHypervisorMakeCapabilitiesInternal(conn,
2692
                                                 virArchFromHost(),
2693 2694
                                                 cpuinfo,
                                                 capabilities);
2695
    if (caps == NULL)
2696
        goto cleanup;
2697

2698 2699 2700
    if (virNodeSuspendGetTargetMask(&caps->host.powerMgmt) < 0)
        VIR_WARN("Failed to get host power management capabilities");

2701
cleanup:
2702 2703
    VIR_FORCE_FCLOSE(cpuinfo);
    VIR_FORCE_FCLOSE(capabilities);
2704

2705
    return caps;
2706
#endif /* __sun */
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
}



/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
 *
 * Return the capabilities of this hypervisor.
 */
char *
2718
xenHypervisorGetCapabilities(virConnectPtr conn)
2719
{
2720
    xenUnifiedPrivatePtr priv = conn->privateData;
2721 2722 2723
    char *xml;

    if (!(xml = virCapabilitiesFormatXML(priv->caps))) {
2724
        virReportOOMError();
2725 2726 2727
        return NULL;
    }

2728
    return xml;
2729 2730
}

2731

2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
2743
    xen_getdomaininfolist dominfos;
2744 2745 2746
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;
2747
    xenUnifiedPrivatePtr priv = conn->privateData;
2748

2749 2750
 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2751
        virReportOOMError();
2752
        return -1;
2753 2754
    }

2755 2756
    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

2757
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2758

2759
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2760 2761

    if (ret < 0)
2762
        return -1;
2763

2764
    nbids = ret;
2765 2766 2767 2768 2769
    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
2770
    if (nbids == maxids) {
2771 2772 2773 2774 2775 2776
        if (maxids < 65000) {
            last_maxids *= 2;
            maxids *= 2;
            goto retry;
        }
        nbids = -1;
2777 2778
    }
    if ((nbids < 0) || (nbids > maxids))
2779 2780
        return -1;
    return nbids;
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
2796
    xen_getdomaininfolist dominfos;
2797
    int ret, nbids, i;
2798
    xenUnifiedPrivatePtr priv = conn->privateData;
2799

2800
    if (maxids == 0)
2801
        return 0;
2802

2803
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2804
        virReportOOMError();
2805
        return -1;
2806
    }
2807 2808

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2809 2810
    memset(ids, 0, maxids * sizeof(int));

2811
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2812 2813

    if (ret < 0) {
2814
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2815
        return -1;
2816 2817
    }

2818
    nbids = ret;
2819
    if ((nbids < 0) || (nbids > maxids)) {
2820
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2821
        return -1;
2822 2823 2824
    }

    for (i = 0;i < nbids;i++) {
2825
        ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2826 2827
    }

2828
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2829
    return nbids;
2830 2831
}

2832 2833

char *
2834
xenHypervisorDomainGetOSType(virDomainPtr dom)
2835
{
2836
    xenUnifiedPrivatePtr priv = dom->conn->privateData;
2837
    xen_getdomaininfo dominfo;
2838
    char *ostype = NULL;
2839 2840

    /* HV's earlier than 3.1.0 don't include the HVM flags in guests status*/
P
Philipp Hahn 已提交
2841 2842
    if (hv_versions.hypervisor < 2 ||
        hv_versions.dom_interface < 4) {
2843 2844
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("unsupported in dom interface < 4"));
2845
        return NULL;
2846
    }
2847 2848 2849

    XEN_GETDOMAININFO_CLEAR(dominfo);

2850
    if (virXen_getdomaininfo(priv->handle, dom->id, &dominfo) < 0) {
2851 2852
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
2853
        return NULL;
2854
    }
2855

2856
    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != dom->id) {
2857 2858
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
2859
        return NULL;
2860
    }
2861 2862

    if (XEN_GETDOMAININFO_FLAGS(dominfo) & DOMFLAGS_HVM)
2863 2864 2865 2866 2867
        ostype = strdup("hvm");
    else
        ostype = strdup("linux");

    if (ostype == NULL)
2868
        virReportOOMError();
2869 2870

    return ostype;
2871 2872
}

2873
int
2874
xenHypervisorHasDomain(virConnectPtr conn, int id)
2875
{
2876
    xenUnifiedPrivatePtr priv = conn->privateData;
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889
    xen_getdomaininfo dominfo;

    XEN_GETDOMAININFO_CLEAR(dominfo);

    if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
        return 0;

    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
        return 0;

    return 1;
}

2890
virDomainPtr
2891
xenHypervisorLookupDomainByID(virConnectPtr conn, int id)
2892
{
2893
    xenUnifiedPrivatePtr priv = conn->privateData;
2894 2895 2896 2897 2898 2899 2900
    xen_getdomaininfo dominfo;
    virDomainPtr ret;
    char *name;

    XEN_GETDOMAININFO_CLEAR(dominfo);

    if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
2901
        return NULL;
2902 2903

    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
2904
        return NULL;
2905

D
Daniel P. Berrange 已提交
2906 2907 2908 2909
    xenUnifiedLock(priv);
    name = xenStoreDomainGetName(conn, id);
    xenUnifiedUnlock(priv);
    if (!name)
2910
        return NULL;
2911 2912 2913 2914

    ret = virGetDomain(conn, name, XEN_GETDOMAININFO_UUID(dominfo));
    if (ret)
        ret->id = id;
2915
    VIR_FREE(name);
2916 2917 2918 2919 2920
    return ret;
}


virDomainPtr
2921
xenHypervisorLookupDomainByUUID(virConnectPtr conn, const unsigned char *uuid)
2922 2923
{
    xen_getdomaininfolist dominfos;
2924
    xenUnifiedPrivatePtr priv = conn->privateData;
2925 2926 2927 2928 2929 2930
    virDomainPtr ret;
    char *name;
    int maxids = 100, nids, i, id;

 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2931
        virReportOOMError();
2932
        return NULL;
2933 2934 2935 2936 2937 2938 2939 2940
    }

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

    nids = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);

    if (nids < 0) {
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2941
        return NULL;
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
    }

    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
    if (nids == maxids) {
        XEN_GETDOMAININFOLIST_FREE(dominfos);
        if (maxids < 65000) {
            maxids *= 2;
            goto retry;
        }
2955
        return NULL;
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
    }

    id = -1;
    for (i = 0 ; i < nids ; i++) {
        if (memcmp(XEN_GETDOMAININFOLIST_UUID(dominfos, i), uuid, VIR_UUID_BUFLEN) == 0) {
            id = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
            break;
        }
    }
    XEN_GETDOMAININFOLIST_FREE(dominfos);

    if (id == -1)
2968
        return NULL;
2969

D
Daniel P. Berrange 已提交
2970 2971 2972 2973
    xenUnifiedLock(priv);
    name = xenStoreDomainGetName(conn, id);
    xenUnifiedUnlock(priv);
    if (!name)
2974
        return NULL;
2975 2976 2977 2978

    ret = virGetDomain(conn, name, uuid);
    if (ret)
        ret->id = id;
2979
    VIR_FREE(name);
2980 2981 2982
    return ret;
}

2983
/**
2984
 * xenHypervisorGetMaxVcpus:
2985 2986 2987 2988
 *
 * Returns the maximum of CPU defined by Xen.
 */
int
2989 2990
xenHypervisorGetMaxVcpus(virConnectPtr conn ATTRIBUTE_UNUSED,
                         const char *type ATTRIBUTE_UNUSED)
2991 2992 2993 2994
{
    return MAX_VIRT_CPUS;
}

2995
/**
2996 2997 2998
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
2999
 *
3000
 * Retrieve the maximum amount of physical memory allocated to a
3001
 * domain.
3002 3003 3004
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
3005 3006
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
3007
{
3008
    xenUnifiedPrivatePtr priv = conn->privateData;
3009
    xen_getdomaininfo dominfo;
3010 3011
    int ret;

3012 3013
    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3014 3015
        if (kb_per_pages <= 0)
            kb_per_pages = 4;
3016 3017
    }

3018
    XEN_GETDOMAININFO_CLEAR(dominfo);
3019

3020
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3021

3022
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3023
        return 0;
3024

3025
    return (unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages;
3026 3027
}

3028 3029 3030
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
3031
 *
3032 3033 3034 3035 3036 3037
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
3038
static unsigned long long ATTRIBUTE_NONNULL(1)
3039 3040
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
3041
    if (domain->id < 0)
3042
        return 0;
3043

3044
    return xenHypervisorGetDomMaxMemory(domain->conn, domain->id);
3045 3046
}

3047
/**
3048 3049 3050
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
3051
 * @info: the place where information should be stored
3052
 *
E
Eric Blake 已提交
3053
 * Do a hypervisor call to get the related set of domain information.
3054 3055 3056 3057
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3058
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
3059
{
3060
    xenUnifiedPrivatePtr priv = conn->privateData;
3061
    xen_getdomaininfo dominfo;
3062
    int ret;
3063
    uint32_t domain_flags, domain_state, domain_shutdown_cause;
3064 3065 3066

    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3067 3068
        if (kb_per_pages <= 0)
            kb_per_pages = 4;
3069
    }
3070

3071
    memset(info, 0, sizeof(virDomainInfo));
3072
    XEN_GETDOMAININFO_CLEAR(dominfo);
3073

3074
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3075

3076
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3077
        return -1;
3078

3079
    domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
3080 3081
    domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
    domain_state = domain_flags & 0xFF; /* Mask out high bits */
3082
    switch (domain_state) {
3083 3084 3085 3086
        case DOMFLAGS_DYING:
            info->state = VIR_DOMAIN_SHUTDOWN;
            break;
        case DOMFLAGS_SHUTDOWN:
3087 3088 3089 3090 3091 3092 3093 3094 3095
            /* The domain is shutdown.  Determine the cause. */
            domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
            switch (domain_shutdown_cause) {
                case SHUTDOWN_crash:
                    info->state = VIR_DOMAIN_CRASHED;
                    break;
                default:
                    info->state = VIR_DOMAIN_SHUTOFF;
            }
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
            break;
        case DOMFLAGS_PAUSED:
            info->state = VIR_DOMAIN_PAUSED;
            break;
        case DOMFLAGS_BLOCKED:
            info->state = VIR_DOMAIN_BLOCKED;
            break;
        case DOMFLAGS_RUNNING:
            info->state = VIR_DOMAIN_RUNNING;
            break;
        default:
            info->state = VIR_DOMAIN_NOSTATE;
3108 3109 3110 3111 3112 3113 3114
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
3115
    info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
3116
    info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
3117
    info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
3118
    if (info->maxMem != UINT_MAX)
3119
        info->maxMem *= kb_per_pages;
3120
    info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
3121
    return 0;
3122 3123
}

3124 3125 3126
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
3127
 * @info: the place where information should be stored
3128
 *
E
Eric Blake 已提交
3129
 * Do a hypervisor call to get the related set of domain information.
3130 3131 3132 3133 3134 3135
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
3136
    if (domain->id < 0)
3137
        return -1;
3138

3139
    return xenHypervisorGetDomInfo(domain->conn, domain->id, info);
3140 3141 3142

}

3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157
/**
 * xenHypervisorGetDomainState:
 * @domain: pointer to the domain block
 * @state: returned state of the domain
 * @reason: returned reason for the state
 * @flags: additional flags, 0 for now
 *
 * Do a hypervisor call to get the related set of domain information.
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainState(virDomainPtr domain,
                            int *state,
                            int *reason,
E
Eric Blake 已提交
3158
                            unsigned int flags)
3159 3160 3161
{
    virDomainInfo info;

E
Eric Blake 已提交
3162 3163
    virCheckFlags(0, -1);

3164
    if (domain->id < 0)
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176
        return -1;

    if (xenHypervisorGetDomInfo(domain->conn, domain->id, &info) < 0)
        return -1;

    *state = info.state;
    if (reason)
        *reason = 0;

    return 0;
}

3177 3178 3179 3180
/**
 * xenHypervisorNodeGetCellsFreeMemory:
 * @conn: pointer to the hypervisor connection
 * @freeMems: pointer to the array of unsigned long long
3181 3182
 * @startCell: index of first cell to return freeMems info on.
 * @maxCells: Maximum number of cells for which freeMems information can
3183 3184 3185 3186
 *            be returned.
 *
 * This call returns the amount of free memory in one or more NUMA cells.
 * The @freeMems array must be allocated by the caller and will be filled
3187 3188 3189
 * with the amount of free memory in kilobytes for each cell requested,
 * starting with startCell (in freeMems[0]), up to either
 * (startCell + maxCells), or the number of additional cells in the node,
3190 3191 3192 3193 3194
 * whichever is smaller.
 *
 * Returns the number of entries filled in freeMems, or -1 in case of error.
 */
int
3195 3196 3197 3198
xenHypervisorNodeGetCellsFreeMemory(virConnectPtr conn,
                                    unsigned long long *freeMems,
                                    int startCell,
                                    int maxCells)
3199 3200 3201
{
    xen_op_v2_sys op_sys;
    int i, j, ret;
3202
    xenUnifiedPrivatePtr priv = conn->privateData;
D
Daniel P. Berrange 已提交
3203 3204

    if (priv->nbNodeCells < 0) {
3205 3206
        virReportError(VIR_ERR_XEN_CALL, "%s",
                       _("cannot determine actual number of cells"));
3207
        return -1;
3208 3209
    }

D
Daniel P. Berrange 已提交
3210
    if ((maxCells < 1) || (startCell >= priv->nbNodeCells)) {
3211 3212
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid argument"));
3213 3214
        return -1;
    }
3215

3216
    /*
P
Philipp Hahn 已提交
3217
     * Support only hv_versions.sys_interface >=4
3218
     */
P
Philipp Hahn 已提交
3219
    if (hv_versions.sys_interface < SYS_IFACE_MIN_VERS_NUMA) {
3220 3221
        virReportError(VIR_ERR_XEN_CALL, "%s",
                       _("unsupported in sys interface < 4"));
3222 3223 3224 3225 3226 3227
        return -1;
    }

    memset(&op_sys, 0, sizeof(op_sys));
    op_sys.cmd = XEN_V2_OP_GETAVAILHEAP;

D
Daniel P. Berrange 已提交
3228
    for (i = startCell, j = 0;(i < priv->nbNodeCells) && (j < maxCells);i++,j++) {
P
Philipp Hahn 已提交
3229
        if (hv_versions.sys_interface >= 5)
3230 3231 3232
            op_sys.u.availheap5.node = i;
        else
            op_sys.u.availheap.node = i;
3233 3234
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret < 0) {
3235
            return -1;
3236
        }
P
Philipp Hahn 已提交
3237
        if (hv_versions.sys_interface >= 5)
3238 3239 3240
            freeMems[j] = op_sys.u.availheap5.avail_bytes;
        else
            freeMems[j] = op_sys.u.availheap.avail_bytes;
3241
    }
3242
    return j;
3243 3244 3245
}


3246 3247
/**
 * xenHypervisorPauseDomain:
3248
 * @domain: pointer to the domain block
3249
 *
E
Eric Blake 已提交
3250
 * Do a hypervisor call to pause the given domain
3251 3252 3253 3254
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3255
xenHypervisorPauseDomain(virDomainPtr domain)
3256
{
3257
    int ret;
3258
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3259

3260
    if (domain->id < 0)
3261
        return -1;
3262

3263
    ret = virXen_pausedomain(priv->handle, domain->id);
3264
    if (ret < 0)
3265 3266
        return -1;
    return 0;
3267 3268 3269 3270
}

/**
 * xenHypervisorResumeDomain:
3271
 * @domain: pointer to the domain block
3272
 *
E
Eric Blake 已提交
3273
 * Do a hypervisor call to resume the given domain
3274 3275 3276 3277
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3278
xenHypervisorResumeDomain(virDomainPtr domain)
3279
{
3280
    int ret;
3281
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3282

3283
    if (domain->id < 0)
3284
        return -1;
3285

3286
    ret = virXen_unpausedomain(priv->handle, domain->id);
3287
    if (ret < 0)
3288 3289
        return -1;
    return 0;
3290 3291 3292
}

/**
3293
 * xenHypervisorDestroyDomainFlags:
3294
 * @domain: pointer to the domain block
3295
 * @flags: an OR'ed set of virDomainDestroyFlagsValues
3296
 *
E
Eric Blake 已提交
3297
 * Do a hypervisor call to destroy the given domain
3298
 *
3299 3300 3301
 * Calling this function with no @flags set (equal to zero)
 * is equivalent to calling xenHypervisorDestroyDomain.
 *
3302 3303 3304
 * Returns 0 in case of success, -1 in case of error.
 */
int
3305
xenHypervisorDestroyDomainFlags(virDomainPtr domain, unsigned int flags)
3306
{
3307
    int ret;
3308
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3309

3310 3311
    virCheckFlags(0, -1);

3312
    if (domain->id < 0)
3313
        return -1;
3314

3315
    ret = virXen_destroydomain(priv->handle, domain->id);
3316
    if (ret < 0)
3317 3318
        return -1;
    return 0;
3319 3320
}

3321 3322
/**
 * xenHypervisorSetMaxMemory:
3323
 * @domain: pointer to the domain block
3324 3325
 * @memory: the max memory size in kilobytes.
 *
E
Eric Blake 已提交
3326
 * Do a hypervisor call to change the maximum amount of memory used
3327 3328 3329 3330
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3331
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
3332
{
3333
    int ret;
3334
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3335

3336
    if (domain->id < 0)
3337
        return -1;
3338

3339
    ret = virXen_setmaxmem(priv->handle, domain->id, memory);
3340
    if (ret < 0)
3341 3342
        return -1;
    return 0;
3343
}
3344

3345

3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
3359
    int ret;
3360
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3361

3362
    if (domain->id < 0 || nvcpus < 1)
3363
        return -1;
3364

3365
    ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
3366
    if (ret < 0)
3367 3368
        return -1;
    return 0;
3369 3370 3371 3372 3373 3374 3375 3376
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
3377
 *
3378 3379 3380 3381 3382 3383 3384 3385 3386
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
3387
    int ret;
3388
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3389

3390
    if (domain->id < 0)
3391
        return -1;
3392

3393
    ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
3394 3395
                            cpumap, maplen);
    if (ret < 0)
3396 3397
        return -1;
    return 0;
3398 3399 3400 3401 3402 3403 3404
}

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
E
Eric Blake 已提交
3405
 * @cpumaps: pointer to a bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
D
Daniel Veillard 已提交
3406
 *	If cpumaps is NULL, then no cpumap information is returned by the API.
3407 3408 3409 3410 3411 3412
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
3413
 *
3414
 * Extract information about virtual CPUs of domain, store it in info array
R
Richard W.M. Jones 已提交
3415
 * and also in cpumaps if this pointer isn't NULL.
3416 3417 3418 3419
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
int
3420 3421 3422 3423 3424
xenHypervisorGetVcpus(virDomainPtr domain,
                      virVcpuInfoPtr info,
                      int maxinfo,
                      unsigned char *cpumaps,
                      int maplen)
3425
{
3426
    xen_getdomaininfo dominfo;
3427
    int ret;
3428
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3429
    virVcpuInfoPtr ipt;
3430
    int nbinfo, i;
3431

3432
    if (domain->id < 0 || sizeof(cpumap_t) & 7) {
3433 3434
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain shut off or invalid"));
3435
        return -1;
3436
    }
3437

3438
    /* first get the number of virtual CPUs in this domain */
3439
    XEN_GETDOMAININFO_CLEAR(dominfo);
3440
    ret = virXen_getdomaininfo(priv->handle, domain->id,
3441
                               &dominfo);
3442

3443
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id)) {
3444 3445
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
3446
        return -1;
3447
    }
3448
    nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
3449 3450 3451
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
3452
        memset(cpumaps, 0, maxinfo * maplen);
3453

3454 3455
    for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
        if ((cpumaps != NULL) && (i < maxinfo)) {
3456
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3457 3458 3459
                                      ipt,
                                      (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
                                      maplen);
3460
            if (ret < 0) {
3461 3462
                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                               _("cannot get VCPUs info"));
3463
                return -1;
3464
            }
3465
        } else {
3466
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3467
                                      ipt, NULL, 0);
3468
            if (ret < 0) {
3469 3470
                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                               _("cannot get VCPUs info"));
3471
                return -1;
3472
            }
3473
        }
3474 3475 3476
    }
    return nbinfo;
}
3477

3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
/**
 * xenHypervisorGetVcpuMax:
 *
 *  Returns the maximum number of virtual CPUs supported for
 *  the guest VM. If the guest is inactive, this is the maximum
 *  of CPU defined by Xen. If the guest is running this reflect
 *  the maximum number of virtual CPUs the guest was booted with.
 */
int
xenHypervisorGetVcpuMax(virDomainPtr domain)
{
    xen_getdomaininfo dominfo;
    int ret;
    int maxcpu;
3492
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
3493 3494 3495 3496 3497 3498

    /* inactive domain */
    if (domain->id < 0) {
        maxcpu = MAX_VIRT_CPUS;
    } else {
        XEN_GETDOMAININFO_CLEAR(dominfo);
3499
        ret = virXen_getdomaininfo(priv->handle, domain->id,
3500 3501 3502
                                   &dominfo);

        if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
3503
            return -1;
3504 3505 3506 3507 3508 3509
        maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
    }

    return maxcpu;
}

J
John Levon 已提交
3510 3511 3512 3513 3514 3515
/**
 * xenHavePrivilege()
 *
 * Return true if the current process should be able to connect to Xen.
 */
int
3516
xenHavePrivilege(void)
J
John Levon 已提交
3517 3518
{
#ifdef __sun
3519
    return priv_ineffect(PRIV_XVM_CONTROL);
J
John Levon 已提交
3520
#else
3521
    return access(XEN_HYPERVISOR_SOCKET, R_OK) == 0;
J
John Levon 已提交
3522 3523
#endif
}