xen_hypervisor.c 112.2 KB
Newer Older
1 2 3
/*
 * xen_internal.c: direct access to Xen hypervisor level
 *
4
 * Copyright (C) 2005-2012 Red Hat, Inc.
5
 *
O
Osier Yang 已提交
6 7 8 9 10 11 12 13 14 15 16
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22
 *
 * Daniel Veillard <veillard@redhat.com>
 */

23
#include <config.h>
24

25 26
#include <stdio.h>
#include <string.h>
27
/* required for uint8_t, uint32_t, etc ... */
28 29 30 31 32 33 34
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
35
#include <limits.h>
36
#include <stdint.h>
37 38
#include <regex.h>
#include <errno.h>
39

J
John Levon 已提交
40
#ifdef __sun
41
# include <sys/systeminfo.h>
J
John Levon 已提交
42

43
# include <priv.h>
J
John Levon 已提交
44

45 46 47
# ifndef PRIV_XVM_CONTROL
#  define PRIV_XVM_CONTROL ((const char *)"xvm_control")
# endif
J
John Levon 已提交
48 49 50

#endif /* __sun */

51
/* required for dom0_getdomaininfo_t */
52
#include <xen/dom0_ops.h>
53
#include <xen/version.h>
54
#ifdef HAVE_XEN_LINUX_PRIVCMD_H
55
# include <xen/linux/privcmd.h>
56
#else
57 58 59
# ifdef HAVE_XEN_SYS_PRIVCMD_H
#  include <xen/sys/privcmd.h>
# endif
60
#endif
61

62 63 64
/* required for shutdown flags */
#include <xen/sched.h>

65
#include "virterror_internal.h"
66
#include "logging.h"
67
#include "datatypes.h"
68 69
#include "driver.h"
#include "util.h"
70 71
#include "xen_driver.h"
#include "xen_hypervisor.h"
72 73
#include "xs_internal.h"
#include "stats_linux.h"
74
#include "block_stats.h"
75
#include "xend_internal.h"
76
#include "virbuffer.h"
77
#include "capabilities.h"
78
#include "memory.h"
79
#include "threads.h"
E
Eric Blake 已提交
80
#include "virfile.h"
81
#include "virnodesuspend.h"
82
#include "virtypedparam.h"
83

84 85
#define VIR_FROM_THIS VIR_FROM_XEN

86
/*
87
 * so far there is 2 versions of the structures usable for doing
88 89 90 91
 * hypervisor calls.
 */
/* the old one */
typedef struct v0_hypercall_struct {
92 93
    unsigned long op;
    unsigned long arg[5];
94
} v0_hypercall_t;
95 96

#ifdef __linux__
97
# define XEN_V0_IOCTL_HYPERCALL_CMD \
98 99 100 101 102 103 104
        _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))
/* the new one */
typedef struct v1_hypercall_struct
{
    uint64_t op;
    uint64_t arg[5];
} v1_hypercall_t;
105
# define XEN_V1_IOCTL_HYPERCALL_CMD                  \
106
    _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
107
typedef v1_hypercall_t hypercall_t;
108
#elif defined(__sun)
109 110
typedef privcmd_hypercall_t hypercall_t;
#else
111
# error "unsupported platform"
112
#endif
113 114

#ifndef __HYPERVISOR_sysctl
115
# define __HYPERVISOR_sysctl 35
116 117
#endif
#ifndef __HYPERVISOR_domctl
118
# define __HYPERVISOR_domctl 36
119
#endif
120

121
#ifdef WITH_RHEL5_API
122
# define SYS_IFACE_MIN_VERS_NUMA 3
123
#else
124
# define SYS_IFACE_MIN_VERS_NUMA 4
125 126
#endif

127
static int xen_ioctl_hypercall_cmd = 0;
P
Philipp Hahn 已提交
128 129 130 131 132 133 134
static struct xenHypervisorVersions hv_versions = {
    .hv = 0,
    .hypervisor = 2,
    .sys_interface = -1,
    .dom_interface = -1,
};

135
static int kb_per_pages = 0;
136

137 138 139 140 141 142 143 144
/* Regular expressions used by xenHypervisorGetCapabilities, and
 * compiled once by xenHypervisorInit.  Note that these are POSIX.2
 * extended regular expressions (regex(7)).
 */
static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
static regex_t flags_hvm_rec;
static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
static regex_t flags_pae_rec;
145
static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
146 147
static regex_t xen_cap_rec;

148 149 150 151
/*
 * The content of the structures for a getdomaininfolist system hypercall
 */
#ifndef DOMFLAGS_DYING
152 153 154 155 156 157 158 159 160 161
# define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
# define DOMFLAGS_HVM       (1<<1) /* Domain is HVM                           */
# define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
# define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
# define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
# define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
# define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
# define DOMFLAGS_CPUSHIFT       8
# define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
# define DOMFLAGS_SHUTDOWNSHIFT 16
162 163
#endif

164 165 166 167 168
/*
 * These flags explain why a system is in the state of "shutdown".  Normally,
 * They are defined in xen/sched.h
 */
#ifndef SHUTDOWN_poweroff
169 170 171 172
# define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
# define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
# define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
# define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
173 174
#endif

175 176 177 178 179 180
#define XEN_V0_OP_GETDOMAININFOLIST	38
#define XEN_V1_OP_GETDOMAININFOLIST	38
#define XEN_V2_OP_GETDOMAININFOLIST	6

struct xen_v0_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
181
    uint32_t flags;	/* flags, see before */
182 183
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
184
    unsigned long shared_info_frame; /* MFN of shared_info struct */
185 186 187 188 189 190 191 192
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;

193 194
struct xen_v2_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
195
    uint32_t flags;	/* flags, see before */
196 197 198 199 200 201 202 203 204 205 206
    uint64_t tot_pages;	/* total number of pages used */
    uint64_t max_pages;	/* maximum number of pages allowed */
    uint64_t shared_info_frame; /* MFN of shared_info struct */
    uint64_t cpu_time;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;

207 208 209 210 211 212 213

/* As of Hypervisor Call v2,  DomCtl v5 we are now 8-byte aligned
   even on 32-bit archs when dealing with uint64_t */
#define ALIGN_64 __attribute__((aligned(8)))

struct xen_v2d5_getdomaininfo {
    domid_t  domain;	/* the domain number */
R
Richard W.M. Jones 已提交
214
    uint32_t flags;	/* flags, see before */
215 216 217 218 219 220 221 222 223 224 225
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
struct xen_v2d6_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
};
typedef struct xen_v2d6_getdomaininfo xen_v2d6_getdomaininfo;

J
Jim Fehlig 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
struct xen_v2d7_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
    uint32_t cpupool;
};
typedef struct xen_v2d7_getdomaininfo xen_v2d7_getdomaininfo;

J
Jim Fehlig 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
struct xen_v2d8_getdomaininfo {
    domid_t  domain;	/* the domain number */
    uint32_t flags;	/* flags, see before */
    uint64_t tot_pages ALIGN_64;	/* total number of pages used */
    uint64_t max_pages ALIGN_64;	/* maximum number of pages allowed */
    uint64_t shr_pages ALIGN_64;    /* number of shared pages */
    uint64_t paged_pages ALIGN_64;    /* number of paged pages */
    uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
    uint64_t cpu_time ALIGN_64;  /* CPU time used */
    uint32_t nr_online_vcpus;  /* Number of VCPUs currently online. */
    uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
    uint32_t ssidref;
    xen_domain_handle_t handle;
    uint32_t cpupool;
};
typedef struct xen_v2d8_getdomaininfo xen_v2d8_getdomaininfo;

274 275 276
union xen_getdomaininfo {
    struct xen_v0_getdomaininfo v0;
    struct xen_v2_getdomaininfo v2;
277
    struct xen_v2d5_getdomaininfo v2d5;
278
    struct xen_v2d6_getdomaininfo v2d6;
J
Jim Fehlig 已提交
279
    struct xen_v2d7_getdomaininfo v2d7;
J
Jim Fehlig 已提交
280
    struct xen_v2d8_getdomaininfo v2d8;
281 282 283 284 285 286
};
typedef union xen_getdomaininfo xen_getdomaininfo;

union xen_getdomaininfolist {
    struct xen_v0_getdomaininfo *v0;
    struct xen_v2_getdomaininfo *v2;
287
    struct xen_v2d5_getdomaininfo *v2d5;
288
    struct xen_v2d6_getdomaininfo *v2d6;
J
Jim Fehlig 已提交
289
    struct xen_v2d7_getdomaininfo *v2d7;
J
Jim Fehlig 已提交
290
    struct xen_v2d8_getdomaininfo *v2d8;
291 292 293
};
typedef union xen_getdomaininfolist xen_getdomaininfolist;

294 295 296 297 298 299 300 301 302 303 304 305

struct xen_v2_getschedulerid {
    uint32_t sched_id; /* Get Scheduler ID from Xen */
};
typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;


union xen_getschedulerid {
    struct xen_v2_getschedulerid *v2;
};
typedef union xen_getschedulerid xen_getschedulerid;

306 307 308 309 310 311 312 313 314
struct xen_v2s4_availheap {
    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
    int32_t  node;          /* NUMA node (-1 for sum across all nodes). */
    uint64_t avail_bytes;   /* Bytes available in the specified region. */
};

typedef struct xen_v2s4_availheap  xen_v2s4_availheap;

315 316 317 318 319 320 321 322 323
struct xen_v2s5_availheap {
    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
    int32_t  node;          /* NUMA node (-1 for sum across all nodes). */
    uint64_t avail_bytes ALIGN_64;   /* Bytes available in the specified region. */
};

typedef struct xen_v2s5_availheap  xen_v2s5_availheap;

324

325
#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size)                      \
P
Philipp Hahn 已提交
326
    (hv_versions.hypervisor < 2 ?                                       \
327
     (VIR_ALLOC_N(domlist.v0, (size)) == 0) :                           \
J
Jim Fehlig 已提交
328 329 330
     (hv_versions.dom_interface >= 8 ?                                  \
      (VIR_ALLOC_N(domlist.v2d8, (size)) == 0) :                        \
     (hv_versions.dom_interface == 7 ?                                  \
J
Jim Fehlig 已提交
331
      (VIR_ALLOC_N(domlist.v2d7, (size)) == 0) :                        \
P
Philipp Hahn 已提交
332
     (hv_versions.dom_interface == 6 ?                                  \
333
      (VIR_ALLOC_N(domlist.v2d6, (size)) == 0) :                        \
P
Philipp Hahn 已提交
334
     (hv_versions.dom_interface == 5 ?                                  \
335
      (VIR_ALLOC_N(domlist.v2d5, (size)) == 0) :                        \
J
Jim Fehlig 已提交
336
      (VIR_ALLOC_N(domlist.v2, (size)) == 0))))))
337

338
#define XEN_GETDOMAININFOLIST_FREE(domlist)            \
P
Philipp Hahn 已提交
339
    (hv_versions.hypervisor < 2 ?                      \
340
     VIR_FREE(domlist.v0) :                            \
J
Jim Fehlig 已提交
341 342 343
     (hv_versions.dom_interface >= 8 ?                 \
      VIR_FREE(domlist.v2d8) :                         \
     (hv_versions.dom_interface == 7 ?                 \
J
Jim Fehlig 已提交
344
      VIR_FREE(domlist.v2d7) :                         \
P
Philipp Hahn 已提交
345
     (hv_versions.dom_interface == 6 ?                 \
346
      VIR_FREE(domlist.v2d6) :                         \
P
Philipp Hahn 已提交
347
     (hv_versions.dom_interface == 5 ?                 \
348
      VIR_FREE(domlist.v2d5) :                         \
J
Jim Fehlig 已提交
349
      VIR_FREE(domlist.v2))))))
350

351
#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size)            \
P
Philipp Hahn 已提交
352
    (hv_versions.hypervisor < 2 ?                             \
353
     memset(domlist.v0, 0, sizeof(*domlist.v0) * size) :      \
J
Jim Fehlig 已提交
354 355 356
     (hv_versions.dom_interface >= 8 ?                        \
      memset(domlist.v2d8, 0, sizeof(*domlist.v2d8) * size) : \
     (hv_versions.dom_interface == 7 ?                        \
J
Jim Fehlig 已提交
357
      memset(domlist.v2d7, 0, sizeof(*domlist.v2d7) * size) : \
P
Philipp Hahn 已提交
358
     (hv_versions.dom_interface == 6 ?                        \
359
      memset(domlist.v2d6, 0, sizeof(*domlist.v2d6) * size) : \
P
Philipp Hahn 已提交
360
     (hv_versions.dom_interface == 5 ?                        \
361
      memset(domlist.v2d5, 0, sizeof(*domlist.v2d5) * size) : \
J
Jim Fehlig 已提交
362
      memset(domlist.v2, 0, sizeof(*domlist.v2) * size))))))
363 364

#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n)    \
P
Philipp Hahn 已提交
365
    (hv_versions.hypervisor < 2 ?                   \
366
     domlist.v0[n].domain :                         \
J
Jim Fehlig 已提交
367 368 369
     (hv_versions.dom_interface >= 8 ?              \
      domlist.v2d8[n].domain :                      \
     (hv_versions.dom_interface == 7 ?              \
J
Jim Fehlig 已提交
370
      domlist.v2d7[n].domain :                      \
P
Philipp Hahn 已提交
371
     (hv_versions.dom_interface == 6 ?              \
372
      domlist.v2d6[n].domain :                      \
P
Philipp Hahn 已提交
373
     (hv_versions.dom_interface == 5 ?              \
374
      domlist.v2d5[n].domain :                      \
J
Jim Fehlig 已提交
375
      domlist.v2[n].domain)))))
376

377
#define XEN_GETDOMAININFOLIST_UUID(domlist, n)      \
P
Philipp Hahn 已提交
378
    (hv_versions.hypervisor < 2 ?                   \
379
     domlist.v0[n].handle :                         \
J
Jim Fehlig 已提交
380 381 382
     (hv_versions.dom_interface >= 8 ?              \
      domlist.v2d8[n].handle :                      \
     (hv_versions.dom_interface == 7 ?              \
J
Jim Fehlig 已提交
383
      domlist.v2d7[n].handle :                      \
P
Philipp Hahn 已提交
384
     (hv_versions.dom_interface == 6 ?              \
385
      domlist.v2d6[n].handle :                      \
P
Philipp Hahn 已提交
386
     (hv_versions.dom_interface == 5 ?              \
387
      domlist.v2d5[n].handle :                      \
J
Jim Fehlig 已提交
388
      domlist.v2[n].handle)))))
389

390
#define XEN_GETDOMAININFOLIST_DATA(domlist)        \
P
Philipp Hahn 已提交
391
    (hv_versions.hypervisor < 2 ?                  \
392
     (void*)(domlist->v0) :                        \
J
Jim Fehlig 已提交
393 394 395
     (hv_versions.dom_interface >= 8 ?             \
      (void*)(domlist->v2d8) :                     \
     (hv_versions.dom_interface == 7 ?             \
J
Jim Fehlig 已提交
396
      (void*)(domlist->v2d7) :                     \
P
Philipp Hahn 已提交
397
     (hv_versions.dom_interface == 6 ?             \
398
      (void*)(domlist->v2d6) :                     \
P
Philipp Hahn 已提交
399
     (hv_versions.dom_interface == 5 ?             \
400
      (void*)(domlist->v2d5) :                     \
J
Jim Fehlig 已提交
401
      (void*)(domlist->v2))))))
402 403

#define XEN_GETDOMAININFO_SIZE                     \
P
Philipp Hahn 已提交
404
    (hv_versions.hypervisor < 2 ?                  \
405
     sizeof(xen_v0_getdomaininfo) :                \
J
Jim Fehlig 已提交
406 407 408
     (hv_versions.dom_interface >= 8 ?             \
      sizeof(xen_v2d8_getdomaininfo) :             \
     (hv_versions.dom_interface == 7 ?             \
J
Jim Fehlig 已提交
409
      sizeof(xen_v2d7_getdomaininfo) :             \
P
Philipp Hahn 已提交
410
     (hv_versions.dom_interface == 6 ?             \
411
      sizeof(xen_v2d6_getdomaininfo) :             \
P
Philipp Hahn 已提交
412
     (hv_versions.dom_interface == 5 ?             \
413
      sizeof(xen_v2d5_getdomaininfo) :             \
J
Jim Fehlig 已提交
414
      sizeof(xen_v2_getdomaininfo))))))
415 416

#define XEN_GETDOMAININFO_CLEAR(dominfo)                           \
P
Philipp Hahn 已提交
417
    (hv_versions.hypervisor < 2 ?                                  \
418
     memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) :      \
J
Jim Fehlig 已提交
419 420 421
     (hv_versions.dom_interface >= 8 ?                             \
      memset(&(dominfo.v2d8), 0, sizeof(xen_v2d8_getdomaininfo)) : \
     (hv_versions.dom_interface == 7 ?                             \
J
Jim Fehlig 已提交
422
      memset(&(dominfo.v2d7), 0, sizeof(xen_v2d7_getdomaininfo)) : \
P
Philipp Hahn 已提交
423
     (hv_versions.dom_interface == 6 ?                             \
424
      memset(&(dominfo.v2d6), 0, sizeof(xen_v2d6_getdomaininfo)) : \
P
Philipp Hahn 已提交
425
     (hv_versions.dom_interface == 5 ?                             \
426
      memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo)) : \
J
Jim Fehlig 已提交
427
      memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))))))
428 429

#define XEN_GETDOMAININFO_DOMAIN(dominfo)       \
P
Philipp Hahn 已提交
430
    (hv_versions.hypervisor < 2 ?               \
431
     dominfo.v0.domain :                        \
J
Jim Fehlig 已提交
432 433 434
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.domain :                     \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
435
      dominfo.v2d7.domain :                     \
P
Philipp Hahn 已提交
436
     (hv_versions.dom_interface == 6 ?          \
437
      dominfo.v2d6.domain :                     \
P
Philipp Hahn 已提交
438
     (hv_versions.dom_interface == 5 ?          \
439
      dominfo.v2d5.domain :                     \
J
Jim Fehlig 已提交
440
      dominfo.v2.domain)))))
441 442

#define XEN_GETDOMAININFO_CPUTIME(dominfo)      \
P
Philipp Hahn 已提交
443
    (hv_versions.hypervisor < 2 ?               \
444
     dominfo.v0.cpu_time :                      \
J
Jim Fehlig 已提交
445 446 447
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.cpu_time :                   \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
448
      dominfo.v2d7.cpu_time :                   \
P
Philipp Hahn 已提交
449
     (hv_versions.dom_interface == 6 ?          \
450
      dominfo.v2d6.cpu_time :                   \
P
Philipp Hahn 已提交
451
     (hv_versions.dom_interface == 5 ?          \
452
      dominfo.v2d5.cpu_time :                   \
J
Jim Fehlig 已提交
453
      dominfo.v2.cpu_time)))))
454

455 456

#define XEN_GETDOMAININFO_CPUCOUNT(dominfo)     \
P
Philipp Hahn 已提交
457
    (hv_versions.hypervisor < 2 ?               \
458
     dominfo.v0.nr_online_vcpus :               \
J
Jim Fehlig 已提交
459 460 461
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.nr_online_vcpus :            \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
462
      dominfo.v2d7.nr_online_vcpus :            \
P
Philipp Hahn 已提交
463
     (hv_versions.dom_interface == 6 ?          \
464
      dominfo.v2d6.nr_online_vcpus :            \
P
Philipp Hahn 已提交
465
     (hv_versions.dom_interface == 5 ?          \
466
      dominfo.v2d5.nr_online_vcpus :            \
J
Jim Fehlig 已提交
467
      dominfo.v2.nr_online_vcpus)))))
468

J
Jim Fehlig 已提交
469
#define XEN_GETDOMAININFO_MAXCPUID(dominfo)     \
P
Philipp Hahn 已提交
470
    (hv_versions.hypervisor < 2 ?               \
471
     dominfo.v0.max_vcpu_id :                   \
J
Jim Fehlig 已提交
472 473 474
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.max_vcpu_id :                \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
475
      dominfo.v2d7.max_vcpu_id :                \
P
Philipp Hahn 已提交
476
     (hv_versions.dom_interface == 6 ?          \
477
      dominfo.v2d6.max_vcpu_id :                \
P
Philipp Hahn 已提交
478
     (hv_versions.dom_interface == 5 ?          \
479
      dominfo.v2d5.max_vcpu_id :                \
J
Jim Fehlig 已提交
480
      dominfo.v2.max_vcpu_id)))))
481

482
#define XEN_GETDOMAININFO_FLAGS(dominfo)        \
P
Philipp Hahn 已提交
483
    (hv_versions.hypervisor < 2 ?               \
484
     dominfo.v0.flags :                         \
J
Jim Fehlig 已提交
485 486 487
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.flags :                      \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
488
      dominfo.v2d7.flags :                      \
P
Philipp Hahn 已提交
489
     (hv_versions.dom_interface == 6 ?          \
490
      dominfo.v2d6.flags :                      \
P
Philipp Hahn 已提交
491
     (hv_versions.dom_interface == 5 ?          \
492
      dominfo.v2d5.flags :                      \
J
Jim Fehlig 已提交
493
      dominfo.v2.flags)))))
494 495

#define XEN_GETDOMAININFO_TOT_PAGES(dominfo)    \
P
Philipp Hahn 已提交
496
    (hv_versions.hypervisor < 2 ?               \
497
     dominfo.v0.tot_pages :                     \
J
Jim Fehlig 已提交
498 499 500
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.tot_pages :                  \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
501
      dominfo.v2d7.tot_pages :                  \
P
Philipp Hahn 已提交
502
     (hv_versions.dom_interface == 6 ?          \
503
      dominfo.v2d6.tot_pages :                  \
P
Philipp Hahn 已提交
504
     (hv_versions.dom_interface == 5 ?          \
505
      dominfo.v2d5.tot_pages :                  \
J
Jim Fehlig 已提交
506
      dominfo.v2.tot_pages)))))
507 508

#define XEN_GETDOMAININFO_MAX_PAGES(dominfo)    \
P
Philipp Hahn 已提交
509
    (hv_versions.hypervisor < 2 ?               \
510
     dominfo.v0.max_pages :                     \
J
Jim Fehlig 已提交
511 512 513
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.max_pages :                  \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
514
      dominfo.v2d7.max_pages :                  \
P
Philipp Hahn 已提交
515
     (hv_versions.dom_interface == 6 ?          \
516
      dominfo.v2d6.max_pages :                  \
P
Philipp Hahn 已提交
517
     (hv_versions.dom_interface == 5 ?          \
518
      dominfo.v2d5.max_pages :                  \
J
Jim Fehlig 已提交
519
      dominfo.v2.max_pages)))))
520

521
#define XEN_GETDOMAININFO_UUID(dominfo)         \
P
Philipp Hahn 已提交
522
    (hv_versions.hypervisor < 2 ?               \
523
     dominfo.v0.handle :                        \
J
Jim Fehlig 已提交
524 525 526
     (hv_versions.dom_interface >= 8 ?          \
      dominfo.v2d8.handle :                     \
     (hv_versions.dom_interface == 7 ?          \
J
Jim Fehlig 已提交
527
      dominfo.v2d7.handle :                     \
P
Philipp Hahn 已提交
528
     (hv_versions.dom_interface == 6 ?          \
529
      dominfo.v2d6.handle :                     \
P
Philipp Hahn 已提交
530
     (hv_versions.dom_interface == 5 ?          \
531
      dominfo.v2d5.handle :                     \
J
Jim Fehlig 已提交
532
      dominfo.v2.handle)))))
533

534

535 536 537 538
static int
lock_pages(void *addr, size_t len)
{
#ifdef __linux__
539 540 541 542 543 544 545
    if (mlock(addr, len) < 0) {
        virReportSystemError(errno,
                             _("Unable to lock %zu bytes of memory"),
                             len);
        return -1;
    }
    return 0;
546
#elif defined(__sun)
547
    return 0;
548 549 550 551 552 553 554
#endif
}

static int
unlock_pages(void *addr, size_t len)
{
#ifdef __linux__
555 556 557 558 559 560 561
    if (munlock(addr, len) < 0) {
        virReportSystemError(errno,
                             _("Unable to unlock %zu bytes of memory"),
                             len);
        return -1;
    }
    return 0;
562
#elif defined(__sun)
563
    return 0;
564 565 566
#endif
}

567 568

struct xen_v0_getdomaininfolistop {
569 570 571 572 573
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v0_getdomaininfo *buffer;
    uint32_t  num_domains;
};
574 575 576 577 578 579 580 581 582 583 584
typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;


struct xen_v2_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
    struct xen_v2_getdomaininfo *buffer;
    uint32_t  num_domains;
};
typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;

585 586 587 588
/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
struct xen_v2s3_getdomaininfolistop {
    domid_t   first_domain;
    uint32_t  max_domains;
589 590
#ifdef __BIG_ENDIAN__
    struct {
591
        int __pad[(sizeof(long long) - sizeof(struct xen_v2d5_getdomaininfo *)) / sizeof(int)];
592 593 594
        struct xen_v2d5_getdomaininfo *v;
    } buffer;
#else
595 596 597 598
    union {
        struct xen_v2d5_getdomaininfo *v;
        uint64_t pad ALIGN_64;
    } buffer;
599
#endif
600 601 602 603
    uint32_t  num_domains;
};
typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;

604

605 606 607 608 609 610 611

struct xen_v0_domainop {
    domid_t   domain;
};
typedef struct xen_v0_domainop xen_v0_domainop;

/*
612
 * The information for a destroydomain system hypercall
613 614 615 616 617 618
 */
#define XEN_V0_OP_DESTROYDOMAIN	9
#define XEN_V1_OP_DESTROYDOMAIN	9
#define XEN_V2_OP_DESTROYDOMAIN	2

/*
619
 * The information for a pausedomain system hypercall
620 621 622 623 624 625
 */
#define XEN_V0_OP_PAUSEDOMAIN	10
#define XEN_V1_OP_PAUSEDOMAIN	10
#define XEN_V2_OP_PAUSEDOMAIN	3

/*
626
 * The information for an unpausedomain system hypercall
627 628 629 630 631 632
 */
#define XEN_V0_OP_UNPAUSEDOMAIN	11
#define XEN_V1_OP_UNPAUSEDOMAIN	11
#define XEN_V2_OP_UNPAUSEDOMAIN	4

/*
E
Eric Blake 已提交
633
 * The information for a setmaxmem system hypercall
634 635 636
 */
#define XEN_V0_OP_SETMAXMEM	28
#define XEN_V1_OP_SETMAXMEM	28
637
#define XEN_V2_OP_SETMAXMEM	11
638 639 640 641 642 643 644 645 646 647 648 649 650

struct xen_v0_setmaxmem {
    domid_t	domain;
    uint64_t	maxmem;
};
typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;

struct xen_v2_setmaxmem {
    uint64_t	maxmem;
};
typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;

651 652 653 654 655
struct xen_v2d5_setmaxmem {
    uint64_t	maxmem ALIGN_64;
};
typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;

656
/*
E
Eric Blake 已提交
657
 * The information for a setmaxvcpu system hypercall
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
 */
#define XEN_V0_OP_SETMAXVCPU	41
#define XEN_V1_OP_SETMAXVCPU	41
#define XEN_V2_OP_SETMAXVCPU	15

struct xen_v0_setmaxvcpu {
    domid_t	domain;
    uint32_t	maxvcpu;
};
typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;

struct xen_v2_setmaxvcpu {
    uint32_t	maxvcpu;
};
typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;

/*
E
Eric Blake 已提交
676
 * The information for a setvcpumap system hypercall
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
 * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
 * hence the difference in structures
 */
#define XEN_V0_OP_SETVCPUMAP	20
#define XEN_V1_OP_SETVCPUMAP	20
#define XEN_V2_OP_SETVCPUMAP	9

struct xen_v0_setvcpumap {
    domid_t	domain;
    uint32_t	vcpu;
    cpumap_t    cpumap;
};
typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;

struct xen_v2_cpumap {
    uint8_t    *bitmap;
    uint32_t    nr_cpus;
};
struct xen_v2_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2_cpumap cpumap;
};
typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;

702 703
/* HV version 2, Dom version 5 requires 64-bit alignment */
struct xen_v2d5_cpumap {
704 705
#ifdef __BIG_ENDIAN__
    struct {
706
        int __pad[(sizeof(long long) - sizeof(uint8_t *)) / sizeof(int)];
707 708 709
        uint8_t *v;
    } bitmap;
#else
710 711 712 713
    union {
        uint8_t    *v;
        uint64_t   pad ALIGN_64;
    } bitmap;
714
#endif
715 716 717 718 719 720 721 722
    uint32_t    nr_cpus;
};
struct xen_v2d5_setvcpumap {
    uint32_t	vcpu;
    struct xen_v2d5_cpumap cpumap;
};
typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;

723
/*
E
Eric Blake 已提交
724
 * The information for a vcpuinfo system hypercall
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
 */
#define XEN_V0_OP_GETVCPUINFO   43
#define XEN_V1_OP_GETVCPUINFO	43
#define XEN_V2_OP_GETVCPUINFO   14

struct xen_v0_vcpuinfo {
    domid_t	domain;		/* owner's domain */
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
    cpumap_t	cpumap;		/* deprecated in V2 */
};
typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;

struct xen_v2_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time;	/* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;

753 754 755 756 757 758 759 760 761 762
struct xen_v2d5_vcpuinfo {
    uint32_t	vcpu;		/* the vcpu number */
    uint8_t	online;		/* seen as on line */
    uint8_t	blocked;	/* blocked on event */
    uint8_t	running;	/* scheduled on CPU */
    uint64_t    cpu_time ALIGN_64; /* nanosecond of CPU used */
    uint32_t	cpu;		/* current mapping */
};
typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;

763 764 765 766 767
/*
 * from V2 the pinning of a vcpu is read with a separate call
 */
#define XEN_V2_OP_GETVCPUMAP	25
typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
768
typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
769

770 771 772 773 774
/*
 * from V2 we get the scheduler information
 */
#define XEN_V2_OP_GETSCHEDULERID	4

775 776 777
/*
 * from V2 we get the available heap information
 */
E
Eric Blake 已提交
778
#define XEN_V2_OP_GETAVAILHEAP		9
779

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
/*
 * from V2 we get the scheduler parameter
 */
#define XEN_V2_OP_SCHEDULER		16
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF       4
#define XEN_SCHEDULER_CREDIT     5
/* get/set scheduler parameters */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1

struct xen_v2_setschedinfo {
    uint32_t sched_id;
    uint32_t cmd;
    union {
        struct xen_domctl_sched_sedf {
            uint64_t period ALIGN_64;
            uint64_t slice  ALIGN_64;
            uint64_t latency ALIGN_64;
            uint32_t extratime;
            uint32_t weight;
        } sedf;
        struct xen_domctl_sched_credit {
            uint16_t weight;
            uint16_t cap;
        } credit;
    } u;
};
typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;


812 813 814 815 816 817 818 819 820
/*
 * The hypercall operation structures also have changed on
 * changeset 86d26e6ec89b
 */
/* the old structure */
struct xen_op_v0 {
    uint32_t cmd;
    uint32_t interface_version;
    union {
821 822 823 824 825 826 827
        xen_v0_getdomaininfolistop getdomaininfolist;
        xen_v0_domainop          domain;
        xen_v0_setmaxmem         setmaxmem;
        xen_v0_setmaxvcpu        setmaxvcpu;
        xen_v0_setvcpumap        setvcpumap;
        xen_v0_vcpuinfo          getvcpuinfo;
        uint8_t padding[128];
828 829 830 831 832 833 834 835 836 837
    } u;
};
typedef struct xen_op_v0 xen_op_v0;
typedef struct xen_op_v0 xen_op_v1;

/* the new structure for systems operations */
struct xen_op_v2_sys {
    uint32_t cmd;
    uint32_t interface_version;
    union {
838 839
        xen_v2_getdomaininfolistop   getdomaininfolist;
        xen_v2s3_getdomaininfolistop getdomaininfolists3;
840
        xen_v2_getschedulerid        getschedulerid;
841
        xen_v2s4_availheap           availheap;
842
        xen_v2s5_availheap           availheap5;
843
        uint8_t padding[128];
844 845 846 847 848 849 850 851 852 853
    } u;
};
typedef struct xen_op_v2_sys xen_op_v2_sys;

/* the new structure for domains operation */
struct xen_op_v2_dom {
    uint32_t cmd;
    uint32_t interface_version;
    domid_t  domain;
    union {
854
        xen_v2_setmaxmem         setmaxmem;
855
        xen_v2d5_setmaxmem       setmaxmemd5;
856 857
        xen_v2_setmaxvcpu        setmaxvcpu;
        xen_v2_setvcpumap        setvcpumap;
858
        xen_v2d5_setvcpumap      setvcpumapd5;
859
        xen_v2_vcpuinfo          getvcpuinfo;
860
        xen_v2d5_vcpuinfo        getvcpuinfod5;
861
        xen_v2_getvcpumap        getvcpumap;
862
        xen_v2d5_getvcpumap      getvcpumapd5;
863 864
        xen_v2_setschedinfo      setschedinfo;
        xen_v2_getschedinfo      getschedinfo;
865
        uint8_t padding[128];
866 867 868
    } u;
};
typedef struct xen_op_v2_dom xen_op_v2_dom;
869 870


871
#ifdef __linux__
872 873
# define XEN_HYPERVISOR_SOCKET	"/proc/xen/privcmd"
# define HYPERVISOR_CAPABILITIES	"/sys/hypervisor/properties/capabilities"
874
#elif defined(__sun)
875
# define XEN_HYPERVISOR_SOCKET	"/dev/xen/privcmd"
876
#else
877
# error "unsupported platform"
878
#endif
879

880
static unsigned long long xenHypervisorGetMaxMemory(virDomainPtr domain);
881

882
struct xenUnifiedDriver xenHypervisorDriver = {
E
Eric Blake 已提交
883 884 885 886 887 888 889 890 891 892 893 894 895 896
    .xenClose = xenHypervisorClose,
    .xenVersion = xenHypervisorGetVersion,
    .xenDomainSuspend = xenHypervisorPauseDomain,
    .xenDomainResume = xenHypervisorResumeDomain,
    .xenDomainDestroyFlags = xenHypervisorDestroyDomainFlags,
    .xenDomainGetOSType = xenHypervisorDomainGetOSType,
    .xenDomainGetMaxMemory = xenHypervisorGetMaxMemory,
    .xenDomainSetMaxMemory = xenHypervisorSetMaxMemory,
    .xenDomainGetInfo = xenHypervisorGetDomainInfo,
    .xenDomainPinVcpu = xenHypervisorPinVcpu,
    .xenDomainGetVcpus = xenHypervisorGetVcpus,
    .xenDomainGetSchedulerType = xenHypervisorGetSchedulerType,
    .xenDomainGetSchedulerParameters = xenHypervisorGetSchedulerParameters,
    .xenDomainSetSchedulerParameters = xenHypervisorSetSchedulerParameters,
897 898
};

899 900 901
/**
 * xenHypervisorDoV0Op:
 * @handle: the handle to the Xen hypervisor
R
Richard W.M. Jones 已提交
902
 * @op: pointer to the hypervisor operation structure
903
 *
E
Eric Blake 已提交
904 905
 * Do a hypervisor operation though the old interface,
 * this leads to a hypervisor call through ioctl.
906 907 908 909 910 911 912 913 914 915
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
{
    int ret;
    v0_hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
916
    op->interface_version = hv_versions.hv << 8;
917 918 919
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

920
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
921
        return -1;
922 923 924

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
925 926 927
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
928 929
    }

930
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
931 932 933
        ret = -1;

    if (ret < 0)
934
        return -1;
935

936
    return 0;
937 938 939 940
}
/**
 * xenHypervisorDoV1Op:
 * @handle: the handle to the Xen hypervisor
R
Richard W.M. Jones 已提交
941
 * @op: pointer to the hypervisor operation structure
942
 *
E
Eric Blake 已提交
943
 * Do a hypervisor v1 operation, this leads to a hypervisor call through
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
 * ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV1Op(int handle, xen_op_v1* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
    op->interface_version = DOM0_INTERFACE_VERSION;
    hc.op = __HYPERVISOR_dom0_op;
    hc.arg[0] = (unsigned long) op;

959
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
960
        return -1;
961 962 963

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
964 965 966
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
967 968
    }

969
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
970 971 972
        ret = -1;

    if (ret < 0)
973
        return -1;
974

975
    return 0;
976 977 978 979 980 981 982
}

/**
 * xenHypervisorDoV2Sys:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor operation structure
 *
E
Eric Blake 已提交
983
 * Do a hypervisor v2 system operation, this leads to a hypervisor
984 985 986 987 988 989 990 991 992 993 994
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
995
    op->interface_version = hv_versions.sys_interface;
996 997 998
    hc.op = __HYPERVISOR_sysctl;
    hc.arg[0] = (unsigned long) op;

999
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
1000
        return -1;
1001 1002 1003

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
1004 1005 1006
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
1007 1008
    }

1009
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
1010 1011 1012
        ret = -1;

    if (ret < 0)
1013
        return -1;
1014

1015
    return 0;
1016 1017 1018 1019 1020 1021 1022
}

/**
 * xenHypervisorDoV2Dom:
 * @handle: the handle to the Xen hypervisor
 * @op: pointer to the hypervisor domain operation structure
 *
E
Eric Blake 已提交
1023
 * Do a hypervisor v2 domain operation, this leads to a hypervisor
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
 * call through ioctl.
 *
 * Returns 0 in case of success and -1 in case of error.
 */
static int
xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
{
    int ret;
    hypercall_t hc;

    memset(&hc, 0, sizeof(hc));
P
Philipp Hahn 已提交
1035
    op->interface_version = hv_versions.dom_interface;
1036 1037 1038
    hc.op = __HYPERVISOR_domctl;
    hc.arg[0] = (unsigned long) op;

1039
    if (lock_pages(op, sizeof(dom0_op_t)) < 0)
1040
        return -1;
1041 1042 1043

    ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
    if (ret < 0) {
1044 1045 1046
        virReportSystemError(errno,
                             _("Unable to issue hypervisor ioctl %d"),
                             xen_ioctl_hypercall_cmd);
1047 1048
    }

1049
    if (unlock_pages(op, sizeof(dom0_op_t)) < 0)
1050 1051 1052
        ret = -1;

    if (ret < 0)
1053
        return -1;
1054

1055
    return 0;
1056 1057 1058 1059 1060 1061 1062 1063 1064
}

/**
 * virXen_getdomaininfolist:
 * @handle: the hypervisor handle
 * @first_domain: first domain in the range
 * @maxids: maximum number of domains to list
 * @dominfos: output structures
 *
1065
 * Do a low level hypercall to list existing domains information
1066 1067 1068 1069 1070
 *
 * Returns the number of domains or -1 in case of failure
 */
static int
virXen_getdomaininfolist(int handle, int first_domain, int maxids,
1071
                         xen_getdomaininfolist *dominfos)
1072 1073 1074
{
    int ret = -1;

1075
    if (lock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1076
                   XEN_GETDOMAININFO_SIZE * maxids) < 0)
1077
        return -1;
1078

P
Philipp Hahn 已提交
1079
    if (hv_versions.hypervisor > 1) {
1080 1081 1082
        xen_op_v2_sys op;

        memset(&op, 0, sizeof(op));
1083
        op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
1084

P
Philipp Hahn 已提交
1085
        if (hv_versions.sys_interface < 3) {
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
            op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolist.max_domains = maxids;
            op.u.getdomaininfolist.buffer = dominfos->v2;
            op.u.getdomaininfolist.num_domains = maxids;
        } else {
            op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
            op.u.getdomaininfolists3.max_domains = maxids;
            op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
            op.u.getdomaininfolists3.num_domains = maxids;
        }
1096
        ret = xenHypervisorDoV2Sys(handle, &op);
1097 1098

        if (ret == 0) {
P
Philipp Hahn 已提交
1099
            if (hv_versions.sys_interface < 3)
1100 1101 1102 1103
                ret = op.u.getdomaininfolist.num_domains;
            else
                ret = op.u.getdomaininfolists3.num_domains;
        }
P
Philipp Hahn 已提交
1104
    } else if (hv_versions.hypervisor == 1) {
1105 1106 1107
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1108 1109 1110 1111 1112 1113 1114 1115
        op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV1Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
P
Philipp Hahn 已提交
1116
    } else if (hv_versions.hypervisor == 0) {
1117 1118 1119
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1120 1121 1122 1123 1124 1125 1126 1127
        op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
        op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
        op.u.getdomaininfolist.max_domains = maxids;
        op.u.getdomaininfolist.buffer = dominfos->v0;
        op.u.getdomaininfolist.num_domains = maxids;
        ret = xenHypervisorDoV0Op(handle, &op);
        if (ret == 0)
            ret = op.u.getdomaininfolist.num_domains;
1128
    }
1129
    if (unlock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
1130
                     XEN_GETDOMAININFO_SIZE * maxids) < 0)
1131
        ret = -1;
1132

1133
    return ret;
1134 1135
}

1136 1137 1138 1139 1140
static int
virXen_getdomaininfo(int handle, int first_domain,
                     xen_getdomaininfo *dominfo) {
    xen_getdomaininfolist dominfos;

P
Philipp Hahn 已提交
1141
    if (hv_versions.hypervisor < 2) {
1142 1143 1144 1145 1146 1147 1148 1149 1150
        dominfos.v0 = &(dominfo->v0);
    } else {
        dominfos.v2 = &(dominfo->v2);
    }

    return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
}


1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
/**
 * xenHypervisorGetSchedulerType:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler parameters.
 *
 * Do a low level hypercall to get scheduler type
 *
 * Returns scheduler name or NULL in case of failure
 */
char *
xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
{
    char *schedulertype = NULL;
    xenUnifiedPrivatePtr priv;

1166
    if (domain->conn == NULL) {
1167 1168
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain or conn is NULL"));
1169 1170 1171 1172
        return NULL;
    }

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1173
    if (priv->handle < 0) {
1174 1175
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("priv->handle invalid"));
1176 1177 1178
        return NULL;
    }
    if (domain->id < 0) {
1179 1180
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1181 1182 1183 1184
        return NULL;
    }

    /*
P
Philipp Hahn 已提交
1185
     * Support only hv_versions.dom_interface >=5
1186
     * (Xen3.1.0 or later)
1187
     * TODO: check on Xen 3.0.3
1188
     */
P
Philipp Hahn 已提交
1189
    if (hv_versions.dom_interface < 5) {
1190 1191
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1192 1193 1194
        return NULL;
    }

P
Philipp Hahn 已提交
1195
    if (hv_versions.hypervisor > 1) {
1196 1197 1198 1199 1200 1201 1202
        xen_op_v2_sys op;
        int ret;

        memset(&op, 0, sizeof(op));
        op.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op);
        if (ret < 0)
1203
            return NULL;
1204 1205

        switch (op.u.getschedulerid.sched_id){
1206 1207
            case XEN_SCHEDULER_SEDF:
                schedulertype = strdup("sedf");
1208
                if (schedulertype == NULL)
1209
                    virReportOOMError();
1210
                if (nparams)
1211
                    *nparams = XEN_SCHED_SEDF_NPARAM;
1212 1213 1214
                break;
            case XEN_SCHEDULER_CREDIT:
                schedulertype = strdup("credit");
1215
                if (schedulertype == NULL)
1216
                    virReportOOMError();
1217
                if (nparams)
1218
                    *nparams = XEN_SCHED_CRED_NPARAM;
1219 1220 1221
                break;
            default:
                break;
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
        }
    }

    return schedulertype;
}

/**
 * xenHypervisorGetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @params: pointer to scheduler parameters.
 *     This memory area should be allocated before calling.
1233 1234
 * @nparams: this parameter must be at least as large as
 *     the given number of scheduler parameters.
1235 1236 1237 1238 1239 1240 1241 1242
 *     from xenHypervisorGetSchedulerType().
 *
 * Do a low level hypercall to get scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorGetSchedulerParameters(virDomainPtr domain,
1243
                                    virTypedParameterPtr params, int *nparams)
1244 1245 1246
{
    xenUnifiedPrivatePtr priv;

1247
    if (domain->conn == NULL) {
1248 1249
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain or conn is NULL"));
1250 1251 1252 1253
        return -1;
    }

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1254
    if (priv->handle < 0) {
1255 1256
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("priv->handle invalid"));
1257 1258 1259
        return -1;
    }
    if (domain->id < 0) {
1260 1261
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1262 1263 1264 1265
        return -1;
    }

    /*
P
Philipp Hahn 已提交
1266
     * Support only hv_versions.dom_interface >=5
1267 1268 1269
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
P
Philipp Hahn 已提交
1270
    if (hv_versions.dom_interface < 5) {
1271 1272
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1273 1274 1275
        return -1;
    }

P
Philipp Hahn 已提交
1276
    if (hv_versions.hypervisor > 1) {
1277 1278 1279 1280 1281 1282 1283 1284
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret < 0)
1285
            return -1;
1286 1287

        switch (op_sys.u.getschedulerid.sched_id){
1288
            case XEN_SCHEDULER_SEDF:
1289
                if (*nparams < XEN_SCHED_SEDF_NPARAM) {
1290 1291
                    virReportError(VIR_ERR_INVALID_ARG,
                                   "%s", _("Invalid parameter count"));
1292 1293 1294
                    return -1;
                }

1295 1296
                /* TODO: Implement for Xen/SEDF */
                TODO
1297
                return -1;
1298 1299 1300 1301 1302 1303 1304 1305
            case XEN_SCHEDULER_CREDIT:
                memset(&op_dom, 0, sizeof(op_dom));
                op_dom.cmd = XEN_V2_OP_SCHEDULER;
                op_dom.domain = (domid_t) domain->id;
                op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
                op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
                ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
                if (ret < 0)
1306
                    return -1;
1307

1308 1309 1310 1311
                if (virTypedParameterAssign(&params[0],
                                            VIR_DOMAIN_SCHEDULER_WEIGHT,
                                            VIR_TYPED_PARAM_UINT,
                                            op_dom.u.getschedinfo.u.credit.weight) < 0)
C
Chris Lalancette 已提交
1312
                    return -1;
1313 1314 1315 1316 1317 1318

                if (*nparams > 1 &&
                    virTypedParameterAssign(&params[1],
                                            VIR_DOMAIN_SCHEDULER_CAP,
                                            VIR_TYPED_PARAM_UINT,
                                            op_dom.u.getschedinfo.u.credit.cap) < 0)
1319
                        return -1;
1320

1321 1322
                if (*nparams > XEN_SCHED_CRED_NPARAM)
                    *nparams = XEN_SCHED_CRED_NPARAM;
1323 1324
                break;
            default:
1325 1326 1327
                virReportError(VIR_ERR_INVALID_ARG,
                               _("Unknown scheduler %d"),
                               op_sys.u.getschedulerid.sched_id);
1328
                return -1;
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
        }
    }

    return 0;
}

/**
 * xenHypervisorSetSchedulerParameters:
 * @domain: pointer to the Xen Hypervisor block
 * @nparams:give a number of scheduler setting parameters .
 *
 * Do a low level hypercall to set scheduler parameters
 *
 * Returns 0 or -1 in case of failure
 */
int
xenHypervisorSetSchedulerParameters(virDomainPtr domain,
1346
                                    virTypedParameterPtr params, int nparams)
1347 1348
{
    int i;
1349
    unsigned int val;
1350
    xenUnifiedPrivatePtr priv;
1351
    char buf[256];
1352

1353
    if (domain->conn == NULL) {
1354 1355
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain or conn is NULL"));
1356 1357 1358
        return -1;
    }

1359 1360 1361
    if (nparams == 0) {
        /* nothing to do, exit early */
        return 0;
1362 1363
    }

1364 1365 1366 1367 1368 1369 1370 1371
    if (virTypedParameterArrayValidate(params, nparams,
                                       VIR_DOMAIN_SCHEDULER_WEIGHT,
                                       VIR_TYPED_PARAM_UINT,
                                       VIR_DOMAIN_SCHEDULER_CAP,
                                       VIR_TYPED_PARAM_UINT,
                                       NULL) < 0)
        return -1;

1372
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
1373
    if (priv->handle < 0) {
1374 1375
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("priv->handle invalid"));
1376 1377 1378
        return -1;
    }
    if (domain->id < 0) {
1379 1380
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
1381 1382 1383 1384
        return -1;
    }

    /*
P
Philipp Hahn 已提交
1385
     * Support only hv_versions.dom_interface >=5
1386 1387 1388
     * (Xen3.1.0 or later)
     * TODO: check on Xen 3.0.3
     */
P
Philipp Hahn 已提交
1389
    if (hv_versions.dom_interface < 5) {
1390 1391
        virReportError(VIR_ERR_NO_XEN, "%s",
                       _("unsupported in dom interface < 5"));
1392 1393 1394
        return -1;
    }

P
Philipp Hahn 已提交
1395
    if (hv_versions.hypervisor > 1) {
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
        xen_op_v2_sys op_sys;
        xen_op_v2_dom op_dom;
        int ret;

        memset(&op_sys, 0, sizeof(op_sys));
        op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret == -1) return -1;

        switch (op_sys.u.getschedulerid.sched_id){
        case XEN_SCHEDULER_SEDF:
            /* TODO: Implement for Xen/SEDF */
            TODO
1409
            return -1;
1410 1411 1412 1413 1414 1415 1416 1417
        case XEN_SCHEDULER_CREDIT: {
            memset(&op_dom, 0, sizeof(op_dom));
            op_dom.cmd = XEN_V2_OP_SCHEDULER;
            op_dom.domain = (domid_t) domain->id;
            op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
            op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;

            /*
1418 1419
             * credit scheduler parameters
             * following values do not change the parameters
1420 1421 1422 1423 1424
             */
            op_dom.u.getschedinfo.u.credit.weight = 0;
            op_dom.u.getschedinfo.u.credit.cap    = (uint16_t)~0U;

            for (i = 0; i < nparams; i++) {
1425
                memset(&buf, 0, sizeof(buf));
1426
                if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_WEIGHT)) {
1427 1428
                    val = params[i].value.ui;
                    if ((val < 1) || (val > USHRT_MAX)) {
1429 1430 1431
                        virReportError(VIR_ERR_INVALID_ARG,
                                       _("Credit scheduler weight parameter (%d) "
                                         "is out of range (1-65535)"), val);
1432
                        return -1;
1433
                    }
1434
                    op_dom.u.getschedinfo.u.credit.weight = val;
1435
                } else if (STREQ(params[i].field, VIR_DOMAIN_SCHEDULER_CAP)) {
1436
                    val = params[i].value.ui;
1437
                    if (val >= USHRT_MAX) {
1438 1439 1440
                        virReportError(VIR_ERR_INVALID_ARG,
                                       _("Credit scheduler cap parameter (%d) is "
                                         "out of range (0-65534)"), val);
1441
                        return -1;
1442
                    }
1443
                    op_dom.u.getschedinfo.u.credit.cap = val;
1444
                }
1445 1446 1447 1448
            }

            ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
            if (ret < 0)
1449
                return -1;
1450
            break;
1451
        }
1452
        default:
1453 1454 1455
            virReportError(VIR_ERR_INVALID_ARG,
                           _("Unknown scheduler %d"),
                           op_sys.u.getschedulerid.sched_id);
1456 1457 1458
            return -1;
        }
    }
1459

1460 1461 1462
    return 0;
}

1463 1464

int
1465 1466 1467
xenHypervisorDomainBlockStats(virDomainPtr dom,
                              const char *path,
                              struct _virDomainBlockStats *stats)
1468
{
1469
#ifdef __linux__
1470
    xenUnifiedPrivatePtr priv;
D
Daniel P. Berrange 已提交
1471
    int ret;
1472

1473
    priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
D
Daniel P. Berrange 已提交
1474 1475
    xenUnifiedLock(priv);
    /* Need to lock because it hits the xenstore handle :-( */
1476
    ret = xenLinuxDomainBlockStats(priv, dom, path, stats);
D
Daniel P. Berrange 已提交
1477 1478
    xenUnifiedUnlock(priv);
    return ret;
1479
#else
1480 1481
    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                   _("block statistics not supported on this platform"));
1482
    return -1;
1483
#endif
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
}

/* Paths have the form vif<domid>.<n> (this interface checks that
 * <domid> is the real domain ID and returns an error if not).
 *
 * In future we may allow you to query bridge stats (virbrX or
 * xenbrX), but that will probably be through a separate
 * virNetwork interface, as yet not decided.
 */
int
1494 1495 1496
xenHypervisorDomainInterfaceStats(virDomainPtr dom,
                                  const char *path,
                                  struct _virDomainInterfaceStats *stats)
1497
{
1498
#ifdef __linux__
1499 1500
    int rqdomid, device;

1501 1502 1503
    /* Verify that the vif requested is one belonging to the current
     * domain.
     */
1504
    if (sscanf(path, "vif%d.%d", &rqdomid, &device) != 2) {
1505 1506
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid path, should be vif<domid>.<n>."));
1507 1508 1509
        return -1;
    }
    if (rqdomid != dom->id) {
1510 1511
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid path, vif<domid> should match this domain ID"));
1512 1513 1514
        return -1;
    }

1515
    return linuxDomainInterfaceStats(path, stats);
1516
#else
1517 1518
    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                   _("/proc/net/dev: Interface not found"));
1519
    return -1;
1520
#endif
1521 1522
}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
/**
 * virXen_pausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to pause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1533
virXen_pausedomain(int handle, int id)
1534 1535 1536
{
    int ret = -1;

P
Philipp Hahn 已提交
1537
    if (hv_versions.hypervisor > 1) {
1538 1539 1540
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1541 1542 1543
        op.cmd = XEN_V2_OP_PAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1544
    } else if (hv_versions.hypervisor == 1) {
1545 1546 1547
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1548 1549 1550
        op.cmd = XEN_V1_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1551
    } else if (hv_versions.hypervisor == 0) {
1552 1553 1554
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1555 1556 1557
        op.cmd = XEN_V0_OP_PAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1558
    }
1559
    return ret;
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
}

/**
 * virXen_unpausedomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to unpause the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1572
virXen_unpausedomain(int handle, int id)
1573 1574 1575
{
    int ret = -1;

P
Philipp Hahn 已提交
1576
    if (hv_versions.hypervisor > 1) {
1577 1578 1579
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1580 1581 1582
        op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1583
    } else if (hv_versions.hypervisor == 1) {
1584 1585 1586
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1587 1588 1589
        op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1590
    } else if (hv_versions.hypervisor == 0) {
1591 1592 1593
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1594 1595 1596
        op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1597
    }
1598
    return ret;
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
}

/**
 * virXen_destroydomain:
 * @handle: the hypervisor handle
 * @id: the domain id
 *
 * Do a low level hypercall to destroy the domain
 *
 * Returns 0 or -1 in case of failure
 */
static int
1611
virXen_destroydomain(int handle, int id)
1612 1613 1614
{
    int ret = -1;

P
Philipp Hahn 已提交
1615
    if (hv_versions.hypervisor > 1) {
1616 1617 1618
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1619 1620 1621
        op.cmd = XEN_V2_OP_DESTROYDOMAIN;
        op.domain = (domid_t) id;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1622
    } else if (hv_versions.hypervisor == 1) {
1623 1624 1625
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1626 1627 1628
        op.cmd = XEN_V1_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1629
    } else if (hv_versions.hypervisor == 0) {
1630 1631 1632
        xen_op_v0 op;

        memset(&op, 0, sizeof(op));
1633 1634 1635
        op.cmd = XEN_V0_OP_DESTROYDOMAIN;
        op.u.domain.domain = (domid_t) id;
        ret = xenHypervisorDoV0Op(handle, &op);
1636
    }
1637
    return ret;
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
}

/**
 * virXen_setmaxmem:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @memory: the amount of memory in kilobytes
 *
 * Do a low level hypercall to change the max memory amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1651
virXen_setmaxmem(int handle, int id, unsigned long memory)
1652 1653 1654
{
    int ret = -1;

P
Philipp Hahn 已提交
1655
    if (hv_versions.hypervisor > 1) {
1656 1657 1658
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1659 1660
        op.cmd = XEN_V2_OP_SETMAXMEM;
        op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1661
        if (hv_versions.dom_interface < 5)
1662 1663 1664
            op.u.setmaxmem.maxmem = memory;
        else
            op.u.setmaxmemd5.maxmem = memory;
1665
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1666
    } else if (hv_versions.hypervisor == 1) {
1667 1668 1669
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1670 1671 1672 1673
        op.cmd = XEN_V1_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1674
    } else if (hv_versions.hypervisor == 0) {
1675
        xen_op_v0 op;
1676 1677

        memset(&op, 0, sizeof(op));
1678 1679 1680 1681
        op.cmd = XEN_V0_OP_SETMAXMEM;
        op.u.setmaxmem.domain = (domid_t) id;
        op.u.setmaxmem.maxmem = memory;
        ret = xenHypervisorDoV0Op(handle, &op);
1682
    }
1683
    return ret;
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
}

/**
 * virXen_setmaxvcpus:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpus: the numbers of vcpus
 *
 * Do a low level hypercall to change the max vcpus amount
 *
 * Returns 0 or -1 in case of failure
 */
static int
1697
virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
1698 1699 1700
{
    int ret = -1;

P
Philipp Hahn 已提交
1701
    if (hv_versions.hypervisor > 1) {
1702 1703 1704
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1705 1706 1707 1708
        op.cmd = XEN_V2_OP_SETMAXVCPU;
        op.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV2Dom(handle, &op);
P
Philipp Hahn 已提交
1709
    } else if (hv_versions.hypervisor == 1) {
1710 1711 1712
        xen_op_v1 op;

        memset(&op, 0, sizeof(op));
1713 1714 1715 1716
        op.cmd = XEN_V1_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1717
    } else if (hv_versions.hypervisor == 0) {
1718
        xen_op_v0 op;
1719 1720

        memset(&op, 0, sizeof(op));
1721 1722 1723 1724
        op.cmd = XEN_V0_OP_SETMAXVCPU;
        op.u.setmaxvcpu.domain = (domid_t) id;
        op.u.setmaxvcpu.maxvcpu = vcpus;
        ret = xenHypervisorDoV0Op(handle, &op);
1725
    }
1726
    return ret;
1727 1728 1729 1730 1731 1732 1733 1734
}

/**
 * virXen_setvcpumap:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
1735
 * @maplen: the size of the bitmap in bytes
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_setvcpumap(int handle, int id, unsigned int vcpu,
                  unsigned char * cpumap, int maplen)
{
    int ret = -1;
1746 1747 1748
    unsigned char *new = NULL;
    unsigned char *bitmap = NULL;
    uint32_t nr_cpus;
1749

P
Philipp Hahn 已提交
1750
    if (hv_versions.hypervisor > 1) {
1751 1752
        xen_op_v2_dom op;

1753
        if (lock_pages(cpumap, maplen) < 0)
1754
            return -1;
1755

1756
        memset(&op, 0, sizeof(op));
1757 1758
        op.cmd = XEN_V2_OP_SETVCPUMAP;
        op.domain = (domid_t) id;
1759 1760 1761 1762

        /* The allocated memory to cpumap must be 'sizeof(uint64_t)' byte *
         * for Xen, and also nr_cpus must be 'sizeof(uint64_t) * 8'       */
        if (maplen < 8) {
1763
            if (VIR_ALLOC_N(new, sizeof(uint64_t)) < 0) {
1764
                virReportOOMError();
1765
                return -1;
1766 1767 1768 1769 1770 1771 1772 1773 1774
            }
            memcpy(new, cpumap, maplen);
            bitmap = new;
            nr_cpus = sizeof(uint64_t) * 8;
        } else {
            bitmap = cpumap;
            nr_cpus = maplen * 8;
        }

P
Philipp Hahn 已提交
1775
        if (hv_versions.dom_interface < 5) {
1776
            op.u.setvcpumap.vcpu = vcpu;
1777 1778
            op.u.setvcpumap.cpumap.bitmap = bitmap;
            op.u.setvcpumap.cpumap.nr_cpus = nr_cpus;
1779 1780
        } else {
            op.u.setvcpumapd5.vcpu = vcpu;
1781 1782
            op.u.setvcpumapd5.cpumap.bitmap.v = bitmap;
            op.u.setvcpumapd5.cpumap.nr_cpus = nr_cpus;
1783
        }
1784
        ret = xenHypervisorDoV2Dom(handle, &op);
1785
        VIR_FREE(new);
1786

1787
        if (unlock_pages(cpumap, maplen) < 0)
1788
            ret = -1;
1789
    } else {
1790 1791 1792
        cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
        uint64_t *pm = &xen_cpumap;
        int j;
1793

1794
        if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
1795
            return -1;
1796

1797 1798 1799
        memset(pm, 0, sizeof(cpumap_t));
        for (j = 0; j < maplen; j++)
            *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
1800

P
Philipp Hahn 已提交
1801
        if (hv_versions.hypervisor == 1) {
1802 1803 1804 1805 1806 1807 1808 1809
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV1Op(handle, &op);
P
Philipp Hahn 已提交
1810
        } else if (hv_versions.hypervisor == 0) {
1811 1812 1813 1814 1815 1816 1817 1818 1819
            xen_op_v0 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_SETVCPUMAP;
            op.u.setvcpumap.domain = (domid_t) id;
            op.u.setvcpumap.vcpu = vcpu;
            op.u.setvcpumap.cpumap = xen_cpumap;
            ret = xenHypervisorDoV0Op(handle, &op);
        }
1820
    }
1821
    return ret;
1822
}
1823

1824

1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
/**
 * virXen_getvcpusinfo:
 * @handle: the hypervisor handle
 * @id: the domain id
 * @vcpu: the vcpu to map
 * @cpumap: the bitmap for this vcpu
 * @maplen: the size of the bitmap in bytes
 *
 * Do a low level hypercall to change the pinning for vcpu
 *
 * Returns 0 or -1 in case of failure
 */
static int
virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
1839
                    unsigned char *cpumap, int maplen)
1840 1841 1842
{
    int ret = -1;

P
Philipp Hahn 已提交
1843
    if (hv_versions.hypervisor > 1) {
1844 1845 1846
        xen_op_v2_dom op;

        memset(&op, 0, sizeof(op));
1847 1848
        op.cmd = XEN_V2_OP_GETVCPUINFO;
        op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1849
        if (hv_versions.dom_interface < 5)
1850 1851 1852
            op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
        else
            op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
1853
        ret = xenHypervisorDoV2Dom(handle, &op);
1854

1855
        if (ret < 0)
1856
            return -1;
1857
        ipt->number = vcpu;
P
Philipp Hahn 已提交
1858
        if (hv_versions.dom_interface < 5) {
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
        } else {
            if (op.u.getvcpuinfod5.online) {
                if (op.u.getvcpuinfod5.running)
                    ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfod5.blocked)
                    ipt->state = VIR_VCPU_BLOCKED;
            } else
                ipt->state = VIR_VCPU_OFFLINE;

            ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
            ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
1880 1881
        }
        if ((cpumap != NULL) && (maplen > 0)) {
1882
            if (lock_pages(cpumap, maplen) < 0)
1883
                return -1;
1884

1885
            memset(cpumap, 0, maplen);
1886 1887 1888
            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V2_OP_GETVCPUMAP;
            op.domain = (domid_t) id;
P
Philipp Hahn 已提交
1889
            if (hv_versions.dom_interface < 5) {
1890 1891 1892 1893 1894 1895 1896 1897
                op.u.getvcpumap.vcpu = vcpu;
                op.u.getvcpumap.cpumap.bitmap = cpumap;
                op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
            } else {
                op.u.getvcpumapd5.vcpu = vcpu;
                op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
                op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
            }
1898
            ret = xenHypervisorDoV2Dom(handle, &op);
1899
            if (unlock_pages(cpumap, maplen) < 0)
1900 1901
                ret = -1;
        }
1902
    } else {
1903 1904 1905 1906 1907
        int mapl = maplen;
        int cpu;

        if (maplen > (int)sizeof(cpumap_t))
            mapl = (int)sizeof(cpumap_t);
1908

P
Philipp Hahn 已提交
1909
        if (hv_versions.hypervisor == 1) {
1910 1911 1912 1913 1914 1915 1916 1917
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V1_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV1Op(handle, &op);
            if (ret < 0)
1918
                return -1;
1919
            ipt->number = vcpu;
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
P
Philipp Hahn 已提交
1933
        } else if (hv_versions.hypervisor == 0) {
1934 1935 1936 1937 1938 1939 1940 1941
            xen_op_v1 op;

            memset(&op, 0, sizeof(op));
            op.cmd = XEN_V0_OP_GETVCPUINFO;
            op.u.getvcpuinfo.domain = (domid_t) id;
            op.u.getvcpuinfo.vcpu = vcpu;
            ret = xenHypervisorDoV0Op(handle, &op);
            if (ret < 0)
1942
                return -1;
1943
            ipt->number = vcpu;
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
            if (op.u.getvcpuinfo.online) {
                if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
                if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
            }
            else ipt->state = VIR_VCPU_OFFLINE;
            ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
            ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
            if ((cpumap != NULL) && (maplen > 0)) {
                for (cpu = 0; cpu < (mapl * 8); cpu++) {
                    if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
                        VIR_USE_CPU(cpumap, cpu);
                }
            }
        }
1958
    }
1959
    return ret;
1960
}
1961

1962 1963
/**
 * xenHypervisorInit:
P
Philipp Hahn 已提交
1964 1965
 * @override_versions: pointer to optional struct xenHypervisorVersions with
 *     version information used instead of automatic version detection.
1966 1967 1968
 *
 * Initialize the hypervisor layer. Try to detect the kind of interface
 * used i.e. pre or post changeset 10277
P
Philipp Hahn 已提交
1969 1970
 *
 * Returns 0 or -1 in case of failure
1971
 */
1972
int
P
Philipp Hahn 已提交
1973
xenHypervisorInit(struct xenHypervisorVersions *override_versions)
1974
{
1975
    int fd, ret, cmd, errcode;
1976
    hypercall_t hc;
1977
    v0_hypercall_t v0_hc;
1978
    xen_getdomaininfo info;
D
Daniel Veillard 已提交
1979
    virVcpuInfoPtr ipt = NULL;
1980

1981 1982 1983 1984
    /* Compile regular expressions used by xenHypervisorGetCapabilities.
     * Note that errors here are really internal errors since these
     * regexps should never fail to compile.
     */
1985
    errcode = regcomp(&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
1986 1987
    if (errcode != 0) {
        char error[100];
1988 1989
        regerror(errcode, &flags_hvm_rec, error, sizeof(error));
        regfree(&flags_hvm_rec);
1990
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
1991 1992
        return -1;
    }
1993
    errcode = regcomp(&flags_pae_rec, flags_pae_re, REG_EXTENDED);
1994 1995
    if (errcode != 0) {
        char error[100];
1996 1997 1998
        regerror(errcode, &flags_pae_rec, error, sizeof(error));
        regfree(&flags_pae_rec);
        regfree(&flags_hvm_rec);
1999
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
2000 2001
        return -1;
    }
2002
    errcode = regcomp(&xen_cap_rec, xen_cap_re, REG_EXTENDED);
2003 2004
    if (errcode != 0) {
        char error[100];
2005 2006 2007 2008
        regerror(errcode, &xen_cap_rec, error, sizeof(error));
        regfree(&xen_cap_rec);
        regfree(&flags_pae_rec);
        regfree(&flags_hvm_rec);
2009
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s", error);
2010 2011 2012
        return -1;
    }

P
Philipp Hahn 已提交
2013 2014 2015 2016 2017
    if (override_versions) {
      hv_versions = *override_versions;
      return 0;
    }

2018
    /* Xen hypervisor version detection begins. */
2019 2020
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
    if (ret < 0) {
P
Philipp Hahn 已提交
2021
        hv_versions.hypervisor = -1;
2022
        return -1;
2023 2024 2025
    }
    fd = ret;

2026 2027 2028 2029
    /*
     * The size of the hypervisor call block changed July 2006
     * this detect if we are using the new or old hypercall_t structure
     */
2030 2031 2032 2033 2034 2035 2036 2037
    hc.op = __HYPERVISOR_xen_version;
    hc.arg[0] = (unsigned long) XENVER_version;
    hc.arg[1] = 0;

    cmd = IOCTL_PRIVCMD_HYPERCALL;
    ret = ioctl(fd, cmd, (unsigned long) &hc);

    if ((ret != -1) && (ret != 0)) {
2038
        VIR_DEBUG("Using new hypervisor call: %X", ret);
P
Philipp Hahn 已提交
2039
        hv_versions.hv = ret;
2040 2041
        xen_ioctl_hypercall_cmd = cmd;
        goto detect_v2;
2042
    }
2043

2044
#ifndef __sun
2045 2046 2047 2048 2049 2050 2051 2052
    /*
     * check if the old hypercall are actually working
     */
    v0_hc.op = __HYPERVISOR_xen_version;
    v0_hc.arg[0] = (unsigned long) XENVER_version;
    v0_hc.arg[1] = 0;
    cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
    ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
2053
    if ((ret != -1) && (ret != 0)) {
2054
        VIR_DEBUG("Using old hypervisor call: %X", ret);
P
Philipp Hahn 已提交
2055
        hv_versions.hv = ret;
2056
        xen_ioctl_hypercall_cmd = cmd;
P
Philipp Hahn 已提交
2057
        hv_versions.hypervisor = 0;
2058
        goto done;
2059
    }
2060
#endif
2061

2062
    /*
R
Richard W.M. Jones 已提交
2063
     * we failed to make any hypercall
2064 2065
     */

P
Philipp Hahn 已提交
2066
    hv_versions.hypervisor = -1;
2067 2068 2069
    virReportSystemError(errno,
                         _("Unable to issue hypervisor ioctl %lu"),
                         (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
2070
    VIR_FORCE_CLOSE(fd);
2071
    return -1;
2072

2073
 detect_v2:
2074 2075 2076 2077 2078
    /*
     * The hypercalls were refactored into 3 different section in August 2006
     * Try to detect if we are running a version post 3.0.2 with the new ones
     * or the old ones
     */
P
Philipp Hahn 已提交
2079
    hv_versions.hypervisor = 2;
2080

2081
    if (VIR_ALLOC(ipt) < 0) {
2082
        virReportOOMError();
2083
        return -1;
2084
    }
2085
    /* Currently consider RHEL5.0 Fedora7, xen-3.1, and xen-unstable */
P
Philipp Hahn 已提交
2086
    hv_versions.sys_interface = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
2087
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2088
        /* RHEL 5.0 */
P
Philipp Hahn 已提交
2089
        hv_versions.dom_interface = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
2090
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2091
            VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver3");
2092 2093 2094
            goto done;
        }
        /* Fedora 7 */
P
Philipp Hahn 已提交
2095
        hv_versions.dom_interface = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
2096
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2097
            VIR_DEBUG("Using hypervisor call v2, sys ver2 dom ver4");
2098 2099 2100 2101
            goto done;
        }
    }

P
Philipp Hahn 已提交
2102
    hv_versions.sys_interface = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
2103
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2104
        /* xen-3.1 */
P
Philipp Hahn 已提交
2105
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2106
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2107
            VIR_DEBUG("Using hypervisor call v2, sys ver3 dom ver5");
2108 2109
            goto done;
        }
2110
    }
2111

P
Philipp Hahn 已提交
2112
    hv_versions.sys_interface = 4; /* XEN_SYSCTL_INTERFACE_VERSION */
2113
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
2114
        /* Fedora 8 */
P
Philipp Hahn 已提交
2115
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2116
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2117
            VIR_DEBUG("Using hypervisor call v2, sys ver4 dom ver5");
2118 2119 2120 2121
            goto done;
        }
    }

P
Philipp Hahn 已提交
2122
    hv_versions.sys_interface = 6; /* XEN_SYSCTL_INTERFACE_VERSION */
2123 2124
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        /* Xen 3.2, Fedora 9 */
P
Philipp Hahn 已提交
2125
        hv_versions.dom_interface = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
2126
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
2127
            VIR_DEBUG("Using hypervisor call v2, sys ver6 dom ver5");
2128 2129
            goto done;
        }
J
Jim Fehlig 已提交
2130 2131 2132
    }

    /* Xen 4.0 */
P
Philipp Hahn 已提交
2133
    hv_versions.sys_interface = 7; /* XEN_SYSCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2134
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
P
Philipp Hahn 已提交
2135
        hv_versions.dom_interface = 6; /* XEN_DOMCTL_INTERFACE_VERSION */
2136
        VIR_DEBUG("Using hypervisor call v2, sys ver7 dom ver6");
J
Jim Fehlig 已提交
2137
        goto done;
2138 2139
    }

J
Jim Fehlig 已提交
2140 2141 2142
    /* Xen 4.1
     * sysctl version 8 -> xen-unstable c/s 21118:28e5409e3fb3
     * domctl version 7 -> xen-unstable c/s 21212:de94884a669c
J
Jim Fehlig 已提交
2143
     * domctl version 8 -> xen-unstable c/s 23874:651aed73b39c
J
Jim Fehlig 已提交
2144
     */
P
Philipp Hahn 已提交
2145
    hv_versions.sys_interface = 8; /* XEN_SYSCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2146
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
P
Philipp Hahn 已提交
2147
        hv_versions.dom_interface = 7; /* XEN_DOMCTL_INTERFACE_VERSION */
J
Jim Fehlig 已提交
2148 2149 2150 2151 2152 2153 2154 2155 2156
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver7");
            goto done;
        }
        hv_versions.dom_interface = 8; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver8 dom ver8");
            goto done;
        }
J
Jim Fehlig 已提交
2157 2158
    }

2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
    /* Xen 4.2
     * sysctl version 9 -> xen-unstable c/s 24102:dc8e55c90604
     * domctl version 8 -> unchanged from Xen 4.1
     */
    hv_versions.sys_interface = 9; /* XEN_SYSCTL_INTERFACE_VERSION */
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        hv_versions.dom_interface = 8; /* XEN_DOMCTL_INTERFACE_VERSION */
        if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
            VIR_DEBUG("Using hypervisor call v2, sys ver9 dom ver8");
            goto done;
        }
    }
2171

J
Jim Fehlig 已提交
2172 2173 2174 2175 2176 2177 2178
    hv_versions.hypervisor = 1;
    hv_versions.sys_interface = -1;
    if (virXen_getdomaininfo(fd, 0, &info) == 1) {
        VIR_DEBUG("Using hypervisor call v1");
        goto done;
    }

2179
    /*
R
Richard W.M. Jones 已提交
2180
     * we failed to make the getdomaininfolist hypercall
2181
     */
P
Philipp Hahn 已提交
2182
    hv_versions.hypervisor = -1;
2183 2184 2185 2186
    virReportSystemError(errno,
                         _("Unable to issue hypervisor ioctl %lu"),
                         (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
    VIR_DEBUG("Failed to find any Xen hypervisor method");
2187
    VIR_FORCE_CLOSE(fd);
2188
    VIR_FREE(ipt);
2189
    return -1;
2190

2191
 done:
2192
    VIR_FORCE_CLOSE(fd);
2193
    VIR_FREE(ipt);
2194
    return 0;
2195 2196
}

2197 2198 2199 2200 2201 2202 2203

static int xenHypervisorOnceInit(void) {
    return xenHypervisorInit(NULL);
}

VIR_ONCE_GLOBAL_INIT(xenHypervisor)

2204 2205
/**
 * xenHypervisorOpen:
2206 2207 2208
 * @conn: pointer to the connection block
 * @name: URL for the target, NULL for local
 * @flags: combination of virDrvOpenFlag(s)
2209 2210 2211
 *
 * Connects to the Xen hypervisor.
 *
2212
 * Returns 0 or -1 in case of error.
2213
 */
2214
virDrvOpenStatus
2215
xenHypervisorOpen(virConnectPtr conn,
2216
                  virConnectAuthPtr auth ATTRIBUTE_UNUSED,
E
Eric Blake 已提交
2217
                  unsigned int flags)
2218
{
2219
    int ret;
2220
    xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
2221

E
Eric Blake 已提交
2222 2223
    virCheckFlags(VIR_CONNECT_RO, VIR_DRV_OPEN_ERROR);

2224 2225
    if (xenHypervisorInitialize() < 0)
        return -1;
2226

2227
    priv->handle = -1;
2228

2229
    ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
2230
    if (ret < 0) {
2231
        virReportError(VIR_ERR_NO_XEN, "%s", XEN_HYPERVISOR_SOCKET);
2232
        return -1;
2233
    }
2234 2235

    priv->handle = ret;
2236

2237
    return 0;
2238 2239 2240 2241
}

/**
 * xenHypervisorClose:
2242
 * @conn: pointer to the connection block
2243 2244 2245 2246 2247
 *
 * Close the connection to the Xen hypervisor.
 *
 * Returns 0 in case of success or -1 in case of error.
 */
2248
int
2249
xenHypervisorClose(virConnectPtr conn)
2250
{
2251
    int ret;
2252
    xenUnifiedPrivatePtr priv;
2253

2254
    if (conn == NULL)
2255
        return -1;
2256

2257 2258 2259 2260 2261
    priv = (xenUnifiedPrivatePtr) conn->privateData;

    if (priv->handle < 0)
        return -1;

2262
    ret = VIR_CLOSE(priv->handle);
2263
    if (ret < 0)
2264
        return -1;
2265

2266
    return 0;
2267 2268 2269
}


2270 2271
/**
 * xenHypervisorGetVersion:
2272 2273
 * @conn: pointer to the connection block
 * @hvVer: where to store the version
2274 2275 2276
 *
 * Call the hypervisor to extracts his own internal API version
 *
2277
 * Returns 0 in case of success, -1 in case of error
2278
 */
2279 2280
int
xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
2281
{
2282 2283 2284 2285 2286 2287
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || hvVer == NULL)
2288
        return -1;
P
Philipp Hahn 已提交
2289
    *hvVer = (hv_versions.hv >> 16) * 1000000 + (hv_versions.hv & 0xFFFF) * 1000;
2290
    return 0;
2291 2292
}

2293
struct guest_arch {
2294
    virArch arch;
2295 2296 2297 2298 2299 2300 2301
    int hvm;
    int pae;
    int nonpae;
    int ia64_be;
};


2302
static int xenDefaultConsoleType(const char *ostype,
2303
                                 virArch arch ATTRIBUTE_UNUSED)
2304 2305 2306 2307 2308 2309 2310
{
    if (STREQ(ostype, "hvm"))
        return VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_SERIAL;
    else
        return VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_XEN;
}

2311
static virCapsPtr
2312
xenHypervisorBuildCapabilities(virConnectPtr conn,
2313
                               virArch hostarch,
2314
                               int host_pae,
2315
                               const char *hvm_type,
2316 2317 2318 2319
                               struct guest_arch *guest_archs,
                               int nr_guest_archs) {
    virCapsPtr caps;
    int i;
P
Philipp Hahn 已提交
2320 2321
    int hv_major = hv_versions.hv >> 16;
    int hv_minor = hv_versions.hv & 0xFFFF;
2322

2323
    if ((caps = virCapabilitiesNew(hostarch, 1, 1)) == NULL)
2324
        goto no_memory;
2325 2326 2327

    virCapabilitiesSetMacPrefix(caps, (unsigned char[]){ 0x00, 0x16, 0x3e });

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
    if (hvm_type && STRNEQ(hvm_type, "") &&
        virCapabilitiesAddHostFeature(caps, hvm_type) < 0)
        goto no_memory;
    if (host_pae &&
        virCapabilitiesAddHostFeature(caps, "pae") < 0)
        goto no_memory;


    if (virCapabilitiesAddHostMigrateTransport(caps,
                                               "xenmigr") < 0)
        goto no_memory;


P
Philipp Hahn 已提交
2341
    if (hv_versions.sys_interface >= SYS_IFACE_MIN_VERS_NUMA && conn != NULL) {
2342
        if (xenDaemonNodeGetTopology(conn, caps) != 0) {
2343 2344 2345 2346 2347 2348 2349
            virCapabilitiesFree(caps);
            return NULL;
        }
    }

    for (i = 0; i < nr_guest_archs; ++i) {
        virCapsGuestPtr guest;
2350 2351 2352
        char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"};
        virCapsGuestMachinePtr *machines;

2353
        if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
2354
            goto no_memory;
2355 2356 2357

        if ((guest = virCapabilitiesAddGuest(caps,
                                             guest_archs[i].hvm ? "hvm" : "xen",
2358 2359
                                             guest_archs[i].arch,
                                             (hostarch == VIR_ARCH_X86_64 ?
2360 2361 2362 2363 2364 2365
                                              "/usr/lib64/xen/bin/qemu-dm" :
                                              "/usr/lib/xen/bin/qemu-dm"),
                                             (guest_archs[i].hvm ?
                                              "/usr/lib/xen/boot/hvmloader" :
                                              NULL),
                                             1,
2366 2367
                                             machines)) == NULL) {
            virCapabilitiesFreeMachines(machines, 1);
2368
            goto no_memory;
2369
        }
2370
        machines = NULL;
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406

        if (virCapabilitiesAddGuestDomain(guest,
                                          "xen",
                                          NULL,
                                          NULL,
                                          0,
                                          NULL) == NULL)
            goto no_memory;

        if (guest_archs[i].pae &&
            virCapabilitiesAddGuestFeature(guest,
                                           "pae",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].nonpae &&
            virCapabilitiesAddGuestFeature(guest,
                                           "nonpae",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].ia64_be &&
            virCapabilitiesAddGuestFeature(guest,
                                           "ia64_be",
                                           1,
                                           0) == NULL)
            goto no_memory;

        if (guest_archs[i].hvm) {
            if (virCapabilitiesAddGuestFeature(guest,
                                               "acpi",
                                               1, 1) == NULL)
                goto no_memory;

2407
            /* In Xen 3.1.0, APIC is always on and can't be toggled */
2408 2409 2410 2411 2412 2413 2414
            if (virCapabilitiesAddGuestFeature(guest,
                                               "apic",
                                               1,
                                               (hv_major > 3 &&
                                                hv_minor > 0 ?
                                                0 : 1)) == NULL)
                goto no_memory;
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424

            /* Xen 3.3.x and beyond supports enabling/disabling
             * hardware assisted paging.  Default is off.
             */
            if ((hv_major == 3 && hv_minor >= 3) || (hv_major > 3))
                if (virCapabilitiesAddGuestFeature(guest,
                                                   "hap",
                                                   0,
                                                   1) == NULL)
                    goto no_memory;
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434

            /* Xen 3.4.x and beyond supports the Viridian (Hyper-V)
             * enlightenment interface.  Default is off.
             */
            if ((hv_major == 3 && hv_minor >= 4) || (hv_major > 3))
                if (virCapabilitiesAddGuestFeature(guest,
                                                   "viridian",
                                                   0,
                                                   1) == NULL)
                    goto no_memory;
2435
        }
2436

2437 2438
    }

2439
    caps->defaultConsoleTargetType = xenDefaultConsoleType;
2440

2441 2442 2443 2444 2445 2446 2447
    return caps;

 no_memory:
    virCapabilitiesFree(caps);
    return NULL;
}

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
#ifdef __sun

static int
get_cpu_flags(virConnectPtr conn, const char **hvm, int *pae, int *longmode)
{
    struct {
        uint32_t r_eax, r_ebx, r_ecx, r_edx;
    } regs;

    char tmpbuf[20];
    int ret = 0;
    int fd;

    /* returns -1, errno 22 if in 32-bit mode */
    *longmode = (sysinfo(SI_ARCHITECTURE_64, tmpbuf, sizeof(tmpbuf)) != -1);

    if ((fd = open("/dev/cpu/self/cpuid", O_RDONLY)) == -1 ||
        pread(fd, &regs, sizeof(regs), 0) != sizeof(regs)) {
2466
        virReportSystemError(errno, "%s", _("could not read CPU flags"));
2467 2468 2469 2470 2471 2472 2473
        goto out;
    }

    *pae = 0;
    *hvm = "";

    if (STREQLEN((const char *)&regs.r_ebx, "AuthcAMDenti", 12)) {
2474
        if (pread(fd, &regs, sizeof(regs), 0x80000001) == sizeof(regs)) {
2475 2476 2477 2478 2479 2480 2481 2482
            /* Read secure virtual machine bit (bit 2 of ECX feature ID) */
            if ((regs.r_ecx >> 2) & 1) {
                *hvm = "svm";
            }
            if ((regs.r_edx >> 6) & 1)
                *pae = 1;
        }
    } else if (STREQLEN((const char *)&regs.r_ebx, "GenuntelineI", 12)) {
2483
        if (pread(fd, &regs, sizeof(regs), 0x00000001) == sizeof(regs)) {
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
            /* Read VMXE feature bit (bit 5 of ECX feature ID) */
            if ((regs.r_ecx >> 5) & 1)
                *hvm = "vmx";
            if ((regs.r_edx >> 6) & 1)
                *pae = 1;
        }
    }

    ret = 1;

out:
2495
    VIR_FORCE_CLOSE(fd);
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
    return ret;
}

static virCapsPtr
xenHypervisorMakeCapabilitiesSunOS(virConnectPtr conn)
{
    struct guest_arch guest_arches[32];
    int i = 0;
    virCapsPtr caps = NULL;
    int pae, longmode;
    const char *hvm;

    if (!get_cpu_flags(conn, &hvm, &pae, &longmode))
        return NULL;

2511
    guest_arches[i].arch = VIR_ARCH_I686;
2512 2513 2514 2515 2516 2517 2518
    guest_arches[i].hvm = 0;
    guest_arches[i].pae = pae;
    guest_arches[i].nonpae = !pae;
    guest_arches[i].ia64_be = 0;
    i++;

    if (longmode) {
2519
        guest_arches[i].arch = VIR_ARCH_X86_64;
2520 2521 2522 2523 2524 2525 2526 2527
        guest_arches[i].hvm = 0;
        guest_arches[i].pae = 0;
        guest_arches[i].nonpae = 0;
        guest_arches[i].ia64_be = 0;
        i++;
    }

    if (hvm[0] != '\0') {
2528
        guest_arches[i].arch = VIR_ARCH_I686;
2529 2530 2531 2532 2533 2534 2535
        guest_arches[i].hvm = 1;
        guest_arches[i].pae = pae;
        guest_arches[i].nonpae = 1;
        guest_arches[i].ia64_be = 0;
        i++;

        if (longmode) {
2536
            guest_arches[i].arch = VIR_ARCH_X86_64;
2537 2538 2539 2540 2541 2542 2543 2544 2545
            guest_arches[i].hvm = 1;
            guest_arches[i].pae = 0;
            guest_arches[i].nonpae = 0;
            guest_arches[i].ia64_be = 0;
            i++;
        }
    }

    if ((caps = xenHypervisorBuildCapabilities(conn,
2546
                                               virArchFromHost(),
2547 2548
                                               pae, hvm,
                                               guest_arches, i)) == NULL)
2549
        virReportOOMError();
2550 2551 2552 2553 2554 2555

    return caps;
}

#endif /* __sun */

2556
/**
2557
 * xenHypervisorMakeCapabilitiesInternal:
2558
 * @conn: pointer to the connection block
2559 2560
 * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
 * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
2561 2562 2563
 *
 * Return the capabilities of this hypervisor.
 */
2564
virCapsPtr
2565
xenHypervisorMakeCapabilitiesInternal(virConnectPtr conn,
2566
                                      virArch hostarch,
2567
                                      FILE *cpuinfo, FILE *capabilities)
2568 2569
{
    char line[1024], *str, *token;
2570
    regmatch_t subs[4];
2571
    char *saveptr = NULL;
2572
    int i;
2573 2574 2575

    char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
    int host_pae = 0;
2576
    struct guest_arch guest_archs[32];
2577
    int nr_guest_archs = 0;
2578
    virCapsPtr caps = NULL;
2579

2580 2581
    memset(guest_archs, 0, sizeof(guest_archs));

2582 2583 2584 2585
    /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
     * It's not clear if this will work on IA64, let alone other
     * architectures and non-Linux. (XXX)
     */
2586
    if (cpuinfo) {
2587 2588
        while (fgets(line, sizeof(line), cpuinfo)) {
            if (regexec(&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
2589
                && subs[0].rm_so != -1) {
C
Chris Lalancette 已提交
2590 2591 2592 2593
                if (virStrncpy(hvm_type,
                               &line[subs[1].rm_so],
                               subs[1].rm_eo-subs[1].rm_so,
                               sizeof(hvm_type)) == NULL)
2594
                    goto no_memory;
2595
            } else if (regexec(&flags_pae_rec, line, 0, NULL, 0) == 0)
2596 2597
                host_pae = 1;
        }
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
    }

    /* Most of the useful info is in /sys/hypervisor/properties/capabilities
     * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
     *
     * It is a space-separated list of supported guest architectures.
     *
     * For x86:
     *    TYP-VER-ARCH[p]
     *    ^   ^   ^    ^
     *    |   |   |    +-- PAE supported
     *    |   |   +------- x86_32 or x86_64
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     *
     * For PPC this file appears to be always empty (?)
     *
     * For IA64:
     *    TYP-VER-ARCH[be]
     *    ^   ^   ^    ^
     *    |   |   |    +-- Big-endian supported
     *    |   |   +------- always "ia64"
     *    |   +----------- the version of Xen, eg. "3.0"
     *    +--------------- "xen" or "hvm" for para or full virt respectively
     */

    /* Expecting one line in this file - ignore any more. */
2625
    if ((capabilities) && (fgets(line, sizeof(line), capabilities))) {
2626 2627 2628 2629
        /* Split the line into tokens.  strtok_r is OK here because we "own"
         * this buffer.  Parse out the features from each token.
         */
        for (str = line, nr_guest_archs = 0;
2630
             nr_guest_archs < sizeof(guest_archs) / sizeof(guest_archs[0])
2631
                 && (token = strtok_r(str, " ", &saveptr)) != NULL;
2632 2633
             str = NULL) {

2634 2635
            if (regexec(&xen_cap_rec, token, sizeof(subs) / sizeof(subs[0]),
                        subs, 0) == 0) {
2636
                int hvm = STRPREFIX(&token[subs[1].rm_so], "hvm");
2637 2638
                int pae = 0, nonpae = 0, ia64_be = 0;
                virArch arch;
2639 2640

                if (STRPREFIX(&token[subs[2].rm_so], "x86_32")) {
2641
                    arch = VIR_ARCH_I686;
2642 2643
                    if (subs[3].rm_so != -1 &&
                        STRPREFIX(&token[subs[3].rm_so], "p"))
2644 2645 2646 2647
                        pae = 1;
                    else
                        nonpae = 1;
                }
2648
                else if (STRPREFIX(&token[subs[2].rm_so], "x86_64")) {
2649
                    arch = VIR_ARCH_X86_64;
2650
                }
2651
                else if (STRPREFIX(&token[subs[2].rm_so], "ia64")) {
2652
                    arch = VIR_ARCH_ITANIUM;
2653 2654
                    if (subs[3].rm_so != -1 &&
                        STRPREFIX(&token[subs[3].rm_so], "be"))
2655 2656
                        ia64_be = 1;
                }
2657
                else if (STRPREFIX(&token[subs[2].rm_so], "powerpc64")) {
2658
                    arch = VIR_ARCH_PPC64;
2659
                } else {
2660
                    /* XXX surely no other Xen archs exist. Arrrrrrrrrm  */
2661 2662
                    continue;
                }
2663

2664 2665
                /* Search for existing matching (model,hvm) tuple */
                for (i = 0 ; i < nr_guest_archs ; i++) {
2666
                    if (guest_archs[i].arch == arch &&
2667 2668 2669 2670
                        guest_archs[i].hvm == hvm) {
                        break;
                    }
                }
2671

2672
                /* Too many arch flavours - highly unlikely ! */
J
Jim Meyering 已提交
2673
                if (i >= ARRAY_CARDINALITY(guest_archs))
2674 2675 2676 2677 2678
                    continue;
                /* Didn't find a match, so create a new one */
                if (i == nr_guest_archs)
                    nr_guest_archs++;

2679
                guest_archs[i].arch = arch;
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
                guest_archs[i].hvm = hvm;

                /* Careful not to overwrite a previous positive
                   setting with a negative one here - some archs
                   can do both pae & non-pae, but Xen reports
                   separately capabilities so we're merging archs */
                if (pae)
                    guest_archs[i].pae = pae;
                if (nonpae)
                    guest_archs[i].nonpae = nonpae;
                if (ia64_be)
                    guest_archs[i].ia64_be = ia64_be;
2692 2693 2694 2695
            }
        }
    }

2696
    if ((caps = xenHypervisorBuildCapabilities(conn,
2697
                                               hostarch,
2698 2699 2700 2701 2702
                                               host_pae,
                                               hvm_type,
                                               guest_archs,
                                               nr_guest_archs)) == NULL)
        goto no_memory;
2703

2704
    return caps;
2705

2706
 no_memory:
2707
    virReportOOMError();
2708
    virCapabilitiesFree(caps);
2709 2710 2711 2712
    return NULL;
}

/**
2713
 * xenHypervisorMakeCapabilities:
2714 2715 2716
 *
 * Return the capabilities of this hypervisor.
 */
2717
virCapsPtr
2718
xenHypervisorMakeCapabilities(virConnectPtr conn)
2719
{
2720 2721 2722
#ifdef __sun
    return xenHypervisorMakeCapabilitiesSunOS(conn);
#else
2723
    virCapsPtr caps = NULL;
2724 2725
    FILE *cpuinfo, *capabilities;

2726
    cpuinfo = fopen("/proc/cpuinfo", "r");
2727 2728
    if (cpuinfo == NULL) {
        if (errno != ENOENT) {
2729
            virReportSystemError(errno,
2730 2731
                                 _("cannot read file %s"),
                                 "/proc/cpuinfo");
2732 2733 2734 2735
            return NULL;
        }
    }

2736
    capabilities = fopen("/sys/hypervisor/properties/capabilities", "r");
2737 2738
    if (capabilities == NULL) {
        if (errno != ENOENT) {
2739
            VIR_FORCE_FCLOSE(cpuinfo);
2740
            virReportSystemError(errno,
2741 2742
                                 _("cannot read file %s"),
                                 "/sys/hypervisor/properties/capabilities");
2743 2744 2745 2746
            return NULL;
        }
    }

2747
    caps = xenHypervisorMakeCapabilitiesInternal(conn,
2748
                                                 virArchFromHost(),
2749 2750
                                                 cpuinfo,
                                                 capabilities);
2751
    if (caps == NULL)
2752
        goto cleanup;
2753

2754 2755 2756
    if (virNodeSuspendGetTargetMask(&caps->host.powerMgmt) < 0)
        VIR_WARN("Failed to get host power management capabilities");

2757
cleanup:
2758 2759
    VIR_FORCE_FCLOSE(cpuinfo);
    VIR_FORCE_FCLOSE(capabilities);
2760

2761
    return caps;
2762
#endif /* __sun */
2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
}



/**
 * xenHypervisorGetCapabilities:
 * @conn: pointer to the connection block
 *
 * Return the capabilities of this hypervisor.
 */
char *
2774
xenHypervisorGetCapabilities(virConnectPtr conn)
2775 2776 2777 2778 2779
{
    xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
    char *xml;

    if (!(xml = virCapabilitiesFormatXML(priv->caps))) {
2780
        virReportOOMError();
2781 2782 2783
        return NULL;
    }

2784
    return xml;
2785 2786
}

2787

2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
/**
 * xenHypervisorNumOfDomains:
 * @conn: pointer to the connection block
 *
 * Provides the number of active domains.
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorNumOfDomains(virConnectPtr conn)
{
2799
    xen_getdomaininfolist dominfos;
2800 2801 2802
    int ret, nbids;
    static int last_maxids = 2;
    int maxids = last_maxids;
2803
    xenUnifiedPrivatePtr priv;
2804

2805 2806 2807 2808
    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
2809
        return -1;
2810

2811 2812
 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2813
        virReportOOMError();
2814
        return -1;
2815 2816
    }

2817 2818
    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

2819
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2820

2821
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2822 2823

    if (ret < 0)
2824
        return -1;
2825

2826
    nbids = ret;
2827 2828 2829 2830 2831
    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
2832
    if (nbids == maxids) {
2833 2834 2835 2836 2837 2838
        if (maxids < 65000) {
            last_maxids *= 2;
            maxids *= 2;
            goto retry;
        }
        nbids = -1;
2839 2840
    }
    if ((nbids < 0) || (nbids > maxids))
2841 2842
        return -1;
    return nbids;
2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
}

/**
 * xenHypervisorListDomains:
 * @conn: pointer to the connection block
 * @ids: array to collect the list of IDs of active domains
 * @maxids: size of @ids
 *
 * Collect the list of active domains, and store their ID in @maxids
 *
 * Returns the number of domain found or -1 in case of error
 */
int
xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
{
2858
    xen_getdomaininfolist dominfos;
2859
    int ret, nbids, i;
2860 2861 2862 2863
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
2864

2865 2866
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 ||
2867
        (ids == NULL) || (maxids < 0))
2868
        return -1;
2869

2870
    if (maxids == 0)
2871
        return 0;
2872

2873
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
2874
        virReportOOMError();
2875
        return -1;
2876
    }
2877 2878

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
2879 2880
    memset(ids, 0, maxids * sizeof(int));

2881
    ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
2882 2883

    if (ret < 0) {
2884
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2885
        return -1;
2886 2887
    }

2888
    nbids = ret;
2889
    if ((nbids < 0) || (nbids > maxids)) {
2890
        XEN_GETDOMAININFOLIST_FREE(dominfos);
2891
        return -1;
2892 2893 2894
    }

    for (i = 0;i < nbids;i++) {
2895
        ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
2896 2897
    }

2898
    XEN_GETDOMAININFOLIST_FREE(dominfos);
2899
    return nbids;
2900 2901
}

2902 2903

char *
2904
xenHypervisorDomainGetOSType(virDomainPtr dom)
2905 2906 2907
{
    xenUnifiedPrivatePtr priv;
    xen_getdomaininfo dominfo;
2908
    char *ostype = NULL;
2909 2910

    priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
2911
    if (priv->handle < 0) {
2912 2913
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain shut off or invalid"));
2914
        return NULL;
2915
    }
2916 2917

    /* HV's earlier than 3.1.0 don't include the HVM flags in guests status*/
P
Philipp Hahn 已提交
2918 2919
    if (hv_versions.hypervisor < 2 ||
        hv_versions.dom_interface < 4) {
2920 2921
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("unsupported in dom interface < 4"));
2922
        return NULL;
2923
    }
2924 2925 2926

    XEN_GETDOMAININFO_CLEAR(dominfo);

2927
    if (virXen_getdomaininfo(priv->handle, dom->id, &dominfo) < 0) {
2928 2929
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
2930
        return NULL;
2931
    }
2932

2933
    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != dom->id) {
2934 2935
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
2936
        return NULL;
2937
    }
2938 2939

    if (XEN_GETDOMAININFO_FLAGS(dominfo) & DOMFLAGS_HVM)
2940 2941 2942 2943 2944
        ostype = strdup("hvm");
    else
        ostype = strdup("linux");

    if (ostype == NULL)
2945
        virReportOOMError();
2946 2947

    return ostype;
2948 2949
}

2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
int
xenHypervisorHasDomain(virConnectPtr conn,
                       int id)
{
    xenUnifiedPrivatePtr priv;
    xen_getdomaininfo dominfo;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
        return 0;

    XEN_GETDOMAININFO_CLEAR(dominfo);

    if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
        return 0;

    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
        return 0;

    return 1;
}

2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
virDomainPtr
xenHypervisorLookupDomainByID(virConnectPtr conn,
                              int id)
{
    xenUnifiedPrivatePtr priv;
    xen_getdomaininfo dominfo;
    virDomainPtr ret;
    char *name;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
2983
        return NULL;
2984 2985 2986 2987

    XEN_GETDOMAININFO_CLEAR(dominfo);

    if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
2988
        return NULL;
2989 2990

    if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
2991
        return NULL;
2992

D
Daniel P. Berrange 已提交
2993 2994 2995 2996
    xenUnifiedLock(priv);
    name = xenStoreDomainGetName(conn, id);
    xenUnifiedUnlock(priv);
    if (!name)
2997
        return NULL;
2998 2999 3000 3001

    ret = virGetDomain(conn, name, XEN_GETDOMAININFO_UUID(dominfo));
    if (ret)
        ret->id = id;
3002
    VIR_FREE(name);
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
    return ret;
}


virDomainPtr
xenHypervisorLookupDomainByUUID(virConnectPtr conn,
                                const unsigned char *uuid)
{
    xen_getdomaininfolist dominfos;
    xenUnifiedPrivatePtr priv;
    virDomainPtr ret;
    char *name;
    int maxids = 100, nids, i, id;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
3019
        return NULL;
3020 3021 3022

 retry:
    if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
3023
        virReportOOMError();
3024
        return NULL;
3025 3026 3027 3028 3029 3030 3031 3032
    }

    XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);

    nids = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);

    if (nids < 0) {
        XEN_GETDOMAININFOLIST_FREE(dominfos);
3033
        return NULL;
3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
    }

    /* Can't possibly have more than 65,000 concurrent guests
     * so limit how many times we try, to avoid increasing
     * without bound & thus allocating all of system memory !
     * XXX I'll regret this comment in a few years time ;-)
     */
    if (nids == maxids) {
        XEN_GETDOMAININFOLIST_FREE(dominfos);
        if (maxids < 65000) {
            maxids *= 2;
            goto retry;
        }
3047
        return NULL;
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
    }

    id = -1;
    for (i = 0 ; i < nids ; i++) {
        if (memcmp(XEN_GETDOMAININFOLIST_UUID(dominfos, i), uuid, VIR_UUID_BUFLEN) == 0) {
            id = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
            break;
        }
    }
    XEN_GETDOMAININFOLIST_FREE(dominfos);

    if (id == -1)
3060
        return NULL;
3061

D
Daniel P. Berrange 已提交
3062 3063 3064 3065
    xenUnifiedLock(priv);
    name = xenStoreDomainGetName(conn, id);
    xenUnifiedUnlock(priv);
    if (!name)
3066
        return NULL;
3067 3068 3069 3070

    ret = virGetDomain(conn, name, uuid);
    if (ret)
        ret->id = id;
3071
    VIR_FREE(name);
3072 3073 3074
    return ret;
}

3075
/**
3076
 * xenHypervisorGetMaxVcpus:
3077 3078 3079 3080
 *
 * Returns the maximum of CPU defined by Xen.
 */
int
3081 3082
xenHypervisorGetMaxVcpus(virConnectPtr conn,
                         const char *type ATTRIBUTE_UNUSED)
3083
{
3084 3085 3086 3087 3088 3089
    xenUnifiedPrivatePtr priv;

    if (conn == NULL)
        return -1;
    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
3090
        return -1;
3091 3092 3093 3094

    return MAX_VIRT_CPUS;
}

3095
/**
3096 3097 3098
 * xenHypervisorGetDomMaxMemory:
 * @conn: connection data
 * @id: domain id
3099
 *
3100
 * Retrieve the maximum amount of physical memory allocated to a
3101
 * domain.
3102 3103 3104
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
3105 3106
unsigned long
xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
3107
{
3108
    xenUnifiedPrivatePtr priv;
3109
    xen_getdomaininfo dominfo;
3110 3111
    int ret;

3112 3113 3114 3115 3116 3117
    if (conn == NULL)
        return 0;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0)
        return 0;
3118

3119 3120
    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3121 3122
        if (kb_per_pages <= 0)
            kb_per_pages = 4;
3123 3124
    }

3125
    XEN_GETDOMAININFO_CLEAR(dominfo);
3126

3127
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3128

3129
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3130
        return 0;
3131

3132
    return (unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages;
3133 3134
}

3135 3136 3137
/**
 * xenHypervisorGetMaxMemory:
 * @domain: a domain object or NULL
3138
 *
3139 3140 3141 3142 3143 3144
 * Retrieve the maximum amount of physical memory allocated to a
 * domain. If domain is NULL, then this get the amount of memory reserved
 * to Domain0 i.e. the domain where the application runs.
 *
 * Returns the memory size in kilobytes or 0 in case of error.
 */
3145
static unsigned long long ATTRIBUTE_NONNULL(1)
3146 3147
xenHypervisorGetMaxMemory(virDomainPtr domain)
{
3148 3149
    xenUnifiedPrivatePtr priv;

3150
    if (domain->conn == NULL)
3151 3152 3153 3154
        return 0;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
3155
        return 0;
3156

3157
    return xenHypervisorGetDomMaxMemory(domain->conn, domain->id);
3158 3159
}

3160
/**
3161 3162 3163
 * xenHypervisorGetDomInfo:
 * @conn: connection data
 * @id: the domain ID
3164
 * @info: the place where information should be stored
3165
 *
E
Eric Blake 已提交
3166
 * Do a hypervisor call to get the related set of domain information.
3167 3168 3169 3170
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3171
xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
3172
{
3173
    xenUnifiedPrivatePtr priv;
3174
    xen_getdomaininfo dominfo;
3175
    int ret;
3176
    uint32_t domain_flags, domain_state, domain_shutdown_cause;
3177 3178 3179

    if (kb_per_pages == 0) {
        kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
3180 3181
        if (kb_per_pages <= 0)
            kb_per_pages = 4;
3182
    }
3183

3184 3185 3186 3187 3188
    if (conn == NULL)
        return -1;

    priv = (xenUnifiedPrivatePtr) conn->privateData;
    if (priv->handle < 0 || info == NULL)
3189
        return -1;
3190

3191
    memset(info, 0, sizeof(virDomainInfo));
3192
    XEN_GETDOMAININFO_CLEAR(dominfo);
3193

3194
    ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
3195

3196
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
3197
        return -1;
3198

3199
    domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
3200 3201
    domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
    domain_state = domain_flags & 0xFF; /* Mask out high bits */
3202
    switch (domain_state) {
3203 3204 3205 3206
        case DOMFLAGS_DYING:
            info->state = VIR_DOMAIN_SHUTDOWN;
            break;
        case DOMFLAGS_SHUTDOWN:
3207 3208 3209 3210 3211 3212 3213 3214 3215
            /* The domain is shutdown.  Determine the cause. */
            domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
            switch (domain_shutdown_cause) {
                case SHUTDOWN_crash:
                    info->state = VIR_DOMAIN_CRASHED;
                    break;
                default:
                    info->state = VIR_DOMAIN_SHUTOFF;
            }
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227
            break;
        case DOMFLAGS_PAUSED:
            info->state = VIR_DOMAIN_PAUSED;
            break;
        case DOMFLAGS_BLOCKED:
            info->state = VIR_DOMAIN_BLOCKED;
            break;
        case DOMFLAGS_RUNNING:
            info->state = VIR_DOMAIN_RUNNING;
            break;
        default:
            info->state = VIR_DOMAIN_NOSTATE;
3228 3229 3230 3231 3232 3233 3234
    }

    /*
     * the API brings back the cpu time in nanoseconds,
     * convert to microseconds, same thing convert to
     * kilobytes from page counts
     */
3235
    info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
3236
    info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
3237
    info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
3238
    if (info->maxMem != UINT_MAX)
3239
        info->maxMem *= kb_per_pages;
3240
    info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
3241
    return 0;
3242 3243
}

3244 3245 3246
/**
 * xenHypervisorGetDomainInfo:
 * @domain: pointer to the domain block
3247
 * @info: the place where information should be stored
3248
 *
E
Eric Blake 已提交
3249
 * Do a hypervisor call to get the related set of domain information.
3250 3251 3252 3253 3254 3255
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
{
3256 3257
    xenUnifiedPrivatePtr priv;

3258
    if (domain->conn == NULL)
3259 3260 3261 3262
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || info == NULL ||
3263
        (domain->id < 0))
3264
        return -1;
3265

3266
    return xenHypervisorGetDomInfo(domain->conn, domain->id, info);
3267 3268 3269

}

3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
/**
 * xenHypervisorGetDomainState:
 * @domain: pointer to the domain block
 * @state: returned state of the domain
 * @reason: returned reason for the state
 * @flags: additional flags, 0 for now
 *
 * Do a hypervisor call to get the related set of domain information.
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
xenHypervisorGetDomainState(virDomainPtr domain,
                            int *state,
                            int *reason,
E
Eric Blake 已提交
3285
                            unsigned int flags)
3286 3287 3288 3289
{
    xenUnifiedPrivatePtr priv = domain->conn->privateData;
    virDomainInfo info;

E
Eric Blake 已提交
3290 3291
    virCheckFlags(0, -1);

3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
    if (domain->conn == NULL)
        return -1;

    if (priv->handle < 0 || domain->id < 0)
        return -1;

    if (xenHypervisorGetDomInfo(domain->conn, domain->id, &info) < 0)
        return -1;

    *state = info.state;
    if (reason)
        *reason = 0;

    return 0;
}

3308 3309 3310 3311
/**
 * xenHypervisorNodeGetCellsFreeMemory:
 * @conn: pointer to the hypervisor connection
 * @freeMems: pointer to the array of unsigned long long
3312 3313
 * @startCell: index of first cell to return freeMems info on.
 * @maxCells: Maximum number of cells for which freeMems information can
3314 3315 3316 3317
 *            be returned.
 *
 * This call returns the amount of free memory in one or more NUMA cells.
 * The @freeMems array must be allocated by the caller and will be filled
3318 3319 3320
 * with the amount of free memory in kilobytes for each cell requested,
 * starting with startCell (in freeMems[0]), up to either
 * (startCell + maxCells), or the number of additional cells in the node,
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
 * whichever is smaller.
 *
 * Returns the number of entries filled in freeMems, or -1 in case of error.
 */
int
xenHypervisorNodeGetCellsFreeMemory(virConnectPtr conn, unsigned long long *freeMems,
                                    int startCell, int maxCells)
{
    xen_op_v2_sys op_sys;
    int i, j, ret;
    xenUnifiedPrivatePtr priv;
3332

3333
    if (conn == NULL) {
3334
        virReportError(VIR_ERR_INVALID_ARG, "%s", _("invalid argument"));
3335 3336
        return -1;
    }
3337

D
Daniel P. Berrange 已提交
3338 3339 3340
    priv = conn->privateData;

    if (priv->nbNodeCells < 0) {
3341 3342
        virReportError(VIR_ERR_XEN_CALL, "%s",
                       _("cannot determine actual number of cells"));
3343
        return -1;
3344 3345
    }

D
Daniel P. Berrange 已提交
3346
    if ((maxCells < 1) || (startCell >= priv->nbNodeCells)) {
3347 3348
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid argument"));
3349 3350
        return -1;
    }
3351

3352
    /*
P
Philipp Hahn 已提交
3353
     * Support only hv_versions.sys_interface >=4
3354
     */
P
Philipp Hahn 已提交
3355
    if (hv_versions.sys_interface < SYS_IFACE_MIN_VERS_NUMA) {
3356 3357
        virReportError(VIR_ERR_XEN_CALL, "%s",
                       _("unsupported in sys interface < 4"));
3358 3359 3360 3361
        return -1;
    }

    if (priv->handle < 0) {
3362 3363
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("priv->handle invalid"));
3364 3365 3366 3367 3368 3369
        return -1;
    }

    memset(&op_sys, 0, sizeof(op_sys));
    op_sys.cmd = XEN_V2_OP_GETAVAILHEAP;

D
Daniel P. Berrange 已提交
3370
    for (i = startCell, j = 0;(i < priv->nbNodeCells) && (j < maxCells);i++,j++) {
P
Philipp Hahn 已提交
3371
        if (hv_versions.sys_interface >= 5)
3372 3373 3374
            op_sys.u.availheap5.node = i;
        else
            op_sys.u.availheap.node = i;
3375 3376
        ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
        if (ret < 0) {
3377
            return -1;
3378
        }
P
Philipp Hahn 已提交
3379
        if (hv_versions.sys_interface >= 5)
3380 3381 3382
            freeMems[j] = op_sys.u.availheap5.avail_bytes;
        else
            freeMems[j] = op_sys.u.availheap.avail_bytes;
3383
    }
3384
    return j;
3385 3386 3387
}


3388 3389
/**
 * xenHypervisorPauseDomain:
3390
 * @domain: pointer to the domain block
3391
 *
E
Eric Blake 已提交
3392
 * Do a hypervisor call to pause the given domain
3393 3394 3395 3396
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3397
xenHypervisorPauseDomain(virDomainPtr domain)
3398
{
3399
    int ret;
3400
    xenUnifiedPrivatePtr priv;
3401

3402
    if (domain->conn == NULL)
3403 3404 3405 3406
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
3407
        return -1;
3408

3409
    ret = virXen_pausedomain(priv->handle, domain->id);
3410
    if (ret < 0)
3411 3412
        return -1;
    return 0;
3413 3414 3415 3416
}

/**
 * xenHypervisorResumeDomain:
3417
 * @domain: pointer to the domain block
3418
 *
E
Eric Blake 已提交
3419
 * Do a hypervisor call to resume the given domain
3420 3421 3422 3423
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3424
xenHypervisorResumeDomain(virDomainPtr domain)
3425
{
3426
    int ret;
3427 3428
    xenUnifiedPrivatePtr priv;

3429
    if (domain->conn == NULL)
3430
        return -1;
3431

3432 3433
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
3434
        return -1;
3435

3436
    ret = virXen_unpausedomain(priv->handle, domain->id);
3437
    if (ret < 0)
3438 3439
        return -1;
    return 0;
3440 3441 3442
}

/**
3443
 * xenHypervisorDestroyDomainFlags:
3444
 * @domain: pointer to the domain block
3445
 * @flags: an OR'ed set of virDomainDestroyFlagsValues
3446
 *
E
Eric Blake 已提交
3447
 * Do a hypervisor call to destroy the given domain
3448
 *
3449 3450 3451
 * Calling this function with no @flags set (equal to zero)
 * is equivalent to calling xenHypervisorDestroyDomain.
 *
3452 3453 3454
 * Returns 0 in case of success, -1 in case of error.
 */
int
3455 3456
xenHypervisorDestroyDomainFlags(virDomainPtr domain,
                                unsigned int flags)
3457
{
3458
    int ret;
3459 3460
    xenUnifiedPrivatePtr priv;

3461 3462
    virCheckFlags(0, -1);

3463
    if (domain->conn == NULL)
3464
        return -1;
3465

3466 3467
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
3468
        return -1;
3469

3470
    ret = virXen_destroydomain(priv->handle, domain->id);
3471
    if (ret < 0)
3472 3473
        return -1;
    return 0;
3474 3475
}

3476 3477
/**
 * xenHypervisorSetMaxMemory:
3478
 * @domain: pointer to the domain block
3479 3480
 * @memory: the max memory size in kilobytes.
 *
E
Eric Blake 已提交
3481
 * Do a hypervisor call to change the maximum amount of memory used
3482 3483 3484 3485
 *
 * Returns 0 in case of success, -1 in case of error.
 */
int
3486
xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
3487
{
3488
    int ret;
3489
    xenUnifiedPrivatePtr priv;
3490

3491
    if (domain->conn == NULL)
3492 3493 3494 3495
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0)
3496
        return -1;
3497

3498
    ret = virXen_setmaxmem(priv->handle, domain->id, memory);
3499
    if (ret < 0)
3500 3501
        return -1;
    return 0;
3502
}
3503

3504

3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
/**
 * xenHypervisorSetVcpus:
 * @domain: pointer to domain object
 * @nvcpus: the new number of virtual CPUs for this domain
 *
 * Dynamically change the number of virtual CPUs used by the domain.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorSetVcpus(virDomainPtr domain, unsigned int nvcpus)
{
3518
    int ret;
3519 3520
    xenUnifiedPrivatePtr priv;

3521
    if (domain->conn == NULL)
3522
        return -1;
3523

3524 3525
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || domain->id < 0 || nvcpus < 1)
3526
        return -1;
3527

3528
    ret = virXen_setmaxvcpus(priv->handle, domain->id, nvcpus);
3529
    if (ret < 0)
3530 3531
        return -1;
    return 0;
3532 3533 3534 3535 3536 3537 3538 3539
}

/**
 * xenHypervisorPinVcpu:
 * @domain: pointer to domain object
 * @vcpu: virtual CPU number
 * @cpumap: pointer to a bit map of real CPUs (in 8-bit bytes)
 * @maplen: length of cpumap in bytes
3540
 *
3541 3542 3543 3544 3545 3546 3547 3548 3549
 * Dynamically change the real CPUs which can be allocated to a virtual CPU.
 *
 * Returns 0 in case of success, -1 in case of failure.
 */

int
xenHypervisorPinVcpu(virDomainPtr domain, unsigned int vcpu,
                     unsigned char *cpumap, int maplen)
{
3550
    int ret;
3551
    xenUnifiedPrivatePtr priv;
3552

3553
    if (domain->conn == NULL)
3554 3555 3556 3557
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
3558
        (cpumap == NULL) || (maplen < 1))
3559
        return -1;
3560

3561
    ret = virXen_setvcpumap(priv->handle, domain->id, vcpu,
3562 3563
                            cpumap, maplen);
    if (ret < 0)
3564 3565
        return -1;
    return 0;
3566 3567 3568 3569 3570 3571 3572
}

/**
 * virDomainGetVcpus:
 * @domain: pointer to domain object, or NULL for Domain0
 * @info: pointer to an array of virVcpuInfo structures (OUT)
 * @maxinfo: number of structures in info array
E
Eric Blake 已提交
3573
 * @cpumaps: pointer to a bit map of real CPUs for all vcpus of this domain (in 8-bit bytes) (OUT)
D
Daniel Veillard 已提交
3574
 *	If cpumaps is NULL, then no cpumap information is returned by the API.
3575 3576 3577 3578 3579 3580
 *	It's assumed there is <maxinfo> cpumap in cpumaps array.
 *	The memory allocated to cpumaps must be (maxinfo * maplen) bytes
 *	(ie: calloc(maxinfo, maplen)).
 *	One cpumap inside cpumaps has the format described in virDomainPinVcpu() API.
 * @maplen: number of bytes in one cpumap, from 1 up to size of CPU map in
 *	underlying virtualization system (Xen...).
3581
 *
3582
 * Extract information about virtual CPUs of domain, store it in info array
R
Richard W.M. Jones 已提交
3583
 * and also in cpumaps if this pointer isn't NULL.
3584 3585 3586 3587 3588
 *
 * Returns the number of info filled in case of success, -1 in case of failure.
 */
int
xenHypervisorGetVcpus(virDomainPtr domain, virVcpuInfoPtr info, int maxinfo,
3589
                      unsigned char *cpumaps, int maplen)
3590
{
3591
    xen_getdomaininfo dominfo;
3592
    int ret;
3593
    xenUnifiedPrivatePtr priv;
3594
    virVcpuInfoPtr ipt;
3595
    int nbinfo, i;
3596

3597
    if (domain->conn == NULL)
3598 3599 3600 3601
        return -1;

    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0 || (domain->id < 0) ||
3602
        (info == NULL) || (maxinfo < 1) ||
3603
        (sizeof(cpumap_t) & 7)) {
3604 3605
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domain shut off or invalid"));
3606
        return -1;
3607 3608
    }
    if ((cpumaps != NULL) && (maplen < 1)) {
3609 3610
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("invalid argument"));
3611
        return -1;
3612
    }
3613
    /* first get the number of virtual CPUs in this domain */
3614
    XEN_GETDOMAININFO_CLEAR(dominfo);
3615
    ret = virXen_getdomaininfo(priv->handle, domain->id,
3616
                               &dominfo);
3617

3618
    if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id)) {
3619 3620
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("cannot get domain details"));
3621
        return -1;
3622
    }
3623
    nbinfo = XEN_GETDOMAININFO_CPUCOUNT(dominfo) + 1;
3624 3625 3626
    if (nbinfo > maxinfo) nbinfo = maxinfo;

    if (cpumaps != NULL)
3627
        memset(cpumaps, 0, maxinfo * maplen);
3628

3629 3630
    for (i = 0, ipt = info; i < nbinfo; i++, ipt++) {
        if ((cpumaps != NULL) && (i < maxinfo)) {
3631
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3632 3633 3634
                                      ipt,
                                      (unsigned char *)VIR_GET_CPUMAP(cpumaps, maplen, i),
                                      maplen);
3635
            if (ret < 0) {
3636 3637
                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                               _("cannot get VCPUs info"));
3638
                return -1;
3639
            }
3640
        } else {
3641
            ret = virXen_getvcpusinfo(priv->handle, domain->id, i,
3642
                                      ipt, NULL, 0);
3643
            if (ret < 0) {
3644 3645
                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                               _("cannot get VCPUs info"));
3646
                return -1;
3647
            }
3648
        }
3649 3650 3651
    }
    return nbinfo;
}
3652

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666
/**
 * xenHypervisorGetVcpuMax:
 *
 *  Returns the maximum number of virtual CPUs supported for
 *  the guest VM. If the guest is inactive, this is the maximum
 *  of CPU defined by Xen. If the guest is running this reflect
 *  the maximum number of virtual CPUs the guest was booted with.
 */
int
xenHypervisorGetVcpuMax(virDomainPtr domain)
{
    xen_getdomaininfo dominfo;
    int ret;
    int maxcpu;
3667 3668
    xenUnifiedPrivatePtr priv;

3669
    if (domain->conn == NULL)
3670
        return -1;
3671

3672 3673
    priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
    if (priv->handle < 0)
3674
        return -1;
3675 3676 3677 3678 3679 3680

    /* inactive domain */
    if (domain->id < 0) {
        maxcpu = MAX_VIRT_CPUS;
    } else {
        XEN_GETDOMAININFO_CLEAR(dominfo);
3681
        ret = virXen_getdomaininfo(priv->handle, domain->id,
3682 3683 3684
                                   &dominfo);

        if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id))
3685
            return -1;
3686 3687 3688 3689 3690 3691
        maxcpu = XEN_GETDOMAININFO_MAXCPUID(dominfo) + 1;
    }

    return maxcpu;
}

J
John Levon 已提交
3692 3693 3694 3695 3696 3697
/**
 * xenHavePrivilege()
 *
 * Return true if the current process should be able to connect to Xen.
 */
int
3698
xenHavePrivilege(void)
J
John Levon 已提交
3699 3700
{
#ifdef __sun
3701
    return priv_ineffect(PRIV_XVM_CONTROL);
J
John Levon 已提交
3702
#else
3703
    return access(XEN_HYPERVISOR_SOCKET, R_OK) == 0;
J
John Levon 已提交
3704 3705
#endif
}