提交 365d88ac 编写于 作者: O Oleksandr Andrushchenko 提交者: Tom Rini

xen: Add essential and required interface headers

Add essential and required Xen interface headers only taken from
the stable Linux kernel stable/linux-5.7.y at commit
66dfe4522160 Linux 5.7.5.

These are better suited for U-boot than the original headers
from Xen as they are the stripped versions of the same.

At the same time use public protocols from Xen RELEASE-4.13.1, at
commit 6278553325a9 update Xen version to 4.13.1
as those have more comments in them.
Signed-off-by: NOleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: NAnastasiia Lukianenko <anastasiia_lukianenko@epam.com>
Acked-by: NPeng Fan <peng.fan@nxp.com>
上级 75189730
/* SPDX-License-Identifier: GPL-2.0
*
* Guest OS interface to ARM Xen.
*
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
*/
#ifndef _ASM_ARM_XEN_INTERFACE_H
#define _ASM_ARM_XEN_INTERFACE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#define uint64_aligned_t u64 __attribute__((aligned(8)))
#define __DEFINE_GUEST_HANDLE(name, type) \
typedef struct { union { type * p; uint64_aligned_t q; }; } \
__guest_handle_ ## name
#define DEFINE_GUEST_HANDLE_STRUCT(name) \
__DEFINE_GUEST_HANDLE(name, struct name)
#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
#define GUEST_HANDLE(name) __guest_handle_ ## name
#define set_xen_guest_handle(hnd, val) \
do { \
if (sizeof(hnd) == 8) \
*(u64 *)&(hnd) = 0; \
(hnd).p = val; \
} while (0)
#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
#ifndef __ASSEMBLY__
/* Explicitly size integers that represent pfns in the interface with
* Xen so that we can have one ABI that works for 32 and 64 bit guests.
* Note that this means that the xen_pfn_t type may be capable of
* representing pfn's which the guest cannot represent in its own pfn
* type. However since pfn space is controlled by the guest this is
* fine since it simply wouldn't be able to create any sure pfns in
* the first place.
*/
typedef u64 xen_pfn_t;
#define PRI_xen_pfn "llx"
typedef u64 xen_ulong_t;
#define PRI_xen_ulong "llx"
typedef s64 xen_long_t;
#define PRI_xen_long "llx"
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(u64);
DEFINE_GUEST_HANDLE(u32);
DEFINE_GUEST_HANDLE(xen_pfn_t);
DEFINE_GUEST_HANDLE(xen_ulong_t);
/* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 1
struct arch_vcpu_info { };
struct arch_shared_info { };
/* TODO: Move pvclock definitions some place arch independent */
struct pvclock_vcpu_time_info {
u32 version;
u32 pad0;
u64 tsc_timestamp;
u64 system_time;
u32 tsc_to_system_mul;
s8 tsc_shift;
u8 flags;
u8 pad[2];
} __attribute__((__packed__)); /* 32 bytes */
/* It is OK to have a 12 bytes struct with no padding because it is packed */
struct pvclock_wall_clock {
u32 version;
u32 sec;
u32 nsec;
u32 sec_hi;
} __attribute__((__packed__));
#endif
#endif /* _ASM_ARM_XEN_INTERFACE_H */
/* SPDX-License-Identifier: GPL-2.0
*
* event_channel.h
*
* Event channels between domains.
*
* Copyright (c) 2003-2004, K A Fraser.
*/
#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
#define __XEN_PUBLIC_EVENT_CHANNEL_H__
#include <xen/interface/xen.h>
typedef u32 evtchn_port_t;
DEFINE_GUEST_HANDLE(evtchn_port_t);
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
* accepting interdomain bindings from domain <remote_dom>. A fresh port
* is allocated in <dom> and returned as <port>.
* NOTES:
* 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
* 2. <rdom> may be DOMID_SELF, allowing loopback connections.
*/
#define EVTCHNOP_alloc_unbound 6
struct evtchn_alloc_unbound {
/* IN parameters */
domid_t dom, remote_dom;
/* OUT parameters */
evtchn_port_t port;
};
/*
* EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
* the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
* a port that is unbound and marked as accepting bindings from the calling
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
* NOTES:
* 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
#define EVTCHNOP_bind_interdomain 0
struct evtchn_bind_interdomain {
/* IN parameters. */
domid_t remote_dom;
evtchn_port_t remote_port;
/* OUT parameters. */
evtchn_port_t local_port;
};
/*
* EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
* vcpu.
* NOTES:
* 1. A virtual IRQ may be bound to at most one event channel per vcpu.
* 2. The allocated event channel is bound to the specified vcpu. The binding
* may not be changed.
*/
#define EVTCHNOP_bind_virq 1
struct evtchn_bind_virq {
/* IN parameters. */
u32 virq;
u32 vcpu;
/* OUT parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
* NOTES:
* 1. A physical IRQ may be bound to at most one event channel per domain.
* 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
*/
#define EVTCHNOP_bind_pirq 2
struct evtchn_bind_pirq {
/* IN parameters. */
u32 pirq;
#define BIND_PIRQ__WILL_SHARE 1
u32 flags; /* BIND_PIRQ__* */
/* OUT parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
* NOTES:
* 1. The allocated event channel is bound to the specified vcpu. The binding
* may not be changed.
*/
#define EVTCHNOP_bind_ipi 7
struct evtchn_bind_ipi {
u32 vcpu;
/* OUT parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_close: Close a local event channel <port>. If the channel is
* interdomain then the remote end is placed in the unbound state
* (EVTCHNSTAT_unbound), awaiting a new connection.
*/
#define EVTCHNOP_close 3
struct evtchn_close {
/* IN parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_send: Send an event to the remote end of the channel whose local
* endpoint is <port>.
*/
#define EVTCHNOP_send 4
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_status: Get the current status of the communication channel which
* has an endpoint at <dom, port>.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may obtain the status of an event
* channel for which <dom> is not DOMID_SELF.
*/
#define EVTCHNOP_status 5
struct evtchn_status {
/* IN parameters */
domid_t dom;
evtchn_port_t port;
/* OUT parameters */
#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
u32 status;
u32 vcpu; /* VCPU to which this channel is bound. */
union {
struct {
domid_t dom;
} unbound; /* EVTCHNSTAT_unbound */
struct {
domid_t dom;
evtchn_port_t port;
} interdomain; /* EVTCHNSTAT_interdomain */
u32 pirq; /* EVTCHNSTAT_pirq */
u32 virq; /* EVTCHNSTAT_virq */
} u;
};
/*
* EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
* event is pending.
* NOTES:
* 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
* the binding. This binding cannot be changed.
* 2. All other channels notify vcpu0 by default. This default is set when
* the channel is allocated (a port that is freed and subsequently reused
* has its binding reset to vcpu0).
*/
#define EVTCHNOP_bind_vcpu 8
struct evtchn_bind_vcpu {
/* IN parameters. */
evtchn_port_t port;
u32 vcpu;
};
/*
* EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
* a notification to the appropriate VCPU if an event is pending.
*/
#define EVTCHNOP_unmask 9
struct evtchn_unmask {
/* IN parameters. */
evtchn_port_t port;
};
/*
* EVTCHNOP_reset: Close all event channels associated with specified domain.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
*/
#define EVTCHNOP_reset 10
struct evtchn_reset {
/* IN parameters. */
domid_t dom;
};
/*
* EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
*/
#define EVTCHNOP_init_control 11
struct evtchn_init_control {
/* IN parameters. */
u64 control_gfn;
u32 offset;
u32 vcpu;
/* OUT parameters. */
u8 link_bits;
u8 _pad[7];
};
/*
* EVTCHNOP_expand_array: add an additional page to the event array.
*/
#define EVTCHNOP_expand_array 12
struct evtchn_expand_array {
/* IN parameters. */
u64 array_gfn;
};
/*
* EVTCHNOP_set_priority: set the priority for an event channel.
*/
#define EVTCHNOP_set_priority 13
struct evtchn_set_priority {
/* IN parameters. */
evtchn_port_t port;
u32 priority;
};
struct evtchn_op {
u32 cmd; /* EVTCHNOP_* */
union {
struct evtchn_alloc_unbound alloc_unbound;
struct evtchn_bind_interdomain bind_interdomain;
struct evtchn_bind_virq bind_virq;
struct evtchn_bind_pirq bind_pirq;
struct evtchn_bind_ipi bind_ipi;
struct evtchn_close close;
struct evtchn_send send;
struct evtchn_status status;
struct evtchn_bind_vcpu bind_vcpu;
struct evtchn_unmask unmask;
} u;
};
DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
/*
* 2-level ABI
*/
#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
/*
* FIFO ABI
*/
/* Events may have priorities from 0 (highest) to 15 (lowest). */
#define EVTCHN_FIFO_PRIORITY_MAX 0
#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
#define EVTCHN_FIFO_PRIORITY_MIN 15
#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
typedef u32 event_word_t;
#define EVTCHN_FIFO_PENDING 31
#define EVTCHN_FIFO_MASKED 30
#define EVTCHN_FIFO_LINKED 29
#define EVTCHN_FIFO_BUSY 28
#define EVTCHN_FIFO_LINK_BITS 17
#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
struct evtchn_fifo_control_block {
u32 ready;
u32 _rsvd;
event_word_t head[EVTCHN_FIFO_MAX_QUEUES];
};
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
/* SPDX-License-Identifier: MIT
*
* grant_table.h
*
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
* Copyright (c) 2004, K A Fraser
*/
#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
#define __XEN_PUBLIC_GRANT_TABLE_H__
#include <xen/interface/xen.h>
/***********************************
* GRANT TABLE REPRESENTATION
*/
/* Some rough guidelines on accessing and updating grant-table entries
* in a concurrency-safe manner. For more information, Linux contains a
* reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
*
* NB. WMB is a no-op on current-generation x86 processors. However, a
* compiler barrier will still be required.
*
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
* GTF_permit_access: Frame to which access is permitted.
* GTF_accept_transfer: Pseudo-phys frame slot being filled by new
* frame, or zero if none.
* 3. Write memory barrier (WMB).
* 4. Write ent->flags, inc. valid type.
*
* Invalidating an unused GTF_permit_access entry:
* 1. flags = ent->flags.
* 2. Observe that !(flags & (GTF_reading|GTF_writing)).
* 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
* NB. No need for WMB as reuse of entry is control-dependent on success of
* step 3, and all architectures guarantee ordering of ctrl-dep writes.
*
* Invalidating an in-use GTF_permit_access entry:
* This cannot be done directly. Request assistance from the domain controller
* which can set a timeout on the use of a grant entry and take necessary
* action. (NB. This is not yet implemented!).
*
* Invalidating an unused GTF_accept_transfer entry:
* 1. flags = ent->flags.
* 2. Observe that !(flags & GTF_transfer_committed). [*]
* 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
* NB. No need for WMB as reuse of entry is control-dependent on success of
* step 3, and all architectures guarantee ordering of ctrl-dep writes.
* [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
* The guest must /not/ modify the grant entry until the address of the
* transferred frame is written. It is safe for the guest to spin waiting
* for this to occur (detect by observing GTF_transfer_completed in
* ent->flags).
*
* Invalidating a committed GTF_accept_transfer entry:
* 1. Wait for (ent->flags & GTF_transfer_completed).
*
* Changing a GTF_permit_access from writable to read-only:
* Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
*
* Changing a GTF_permit_access from read-only to writable:
* Use SMP-safe bit-setting instruction.
*/
/*
* Reference to a grant entry in a specified domain's grant table.
*/
typedef u32 grant_ref_t;
/*
* A grant table comprises a packed array of grant entries in one or more
* page frames shared between Xen and a guest.
* [XEN]: This field is written by Xen and read by the sharing guest.
* [GST]: This field is written by the guest and read by Xen.
*/
/*
* Version 1 of the grant table entry structure is maintained purely
* for backwards compatibility. New guests should use version 2.
*/
struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
u16 flags;
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
* GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
* GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
*/
u32 frame;
};
/*
* Type of grant entry.
* GTF_invalid: This grant entry grants no privileges.
* GTF_permit_access: Allow @domid to map/access @frame.
* GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
* to this guest. Xen writes the page number to @frame.
* GTF_transitive: Allow @domid to transitively access a subrange of
* @trans_grant in @trans_domid. No mappings are allowed.
*/
#define GTF_invalid (0U << 0)
#define GTF_permit_access (1U << 0)
#define GTF_accept_transfer (2U << 0)
#define GTF_transitive (3U << 0)
#define GTF_type_mask (3U << 0)
/*
* Subflags for GTF_permit_access.
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
* GTF_sub_page: Grant access to only a subrange of the page. @domid
* will only be allowed to copy from the grant, and not
* map it. [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U << _GTF_readonly)
#define _GTF_reading (3)
#define GTF_reading (1U << _GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U << _GTF_writing)
#define _GTF_sub_page (8)
#define GTF_sub_page (1U << _GTF_sub_page)
/*
* Subflags for GTF_accept_transfer:
* GTF_transfer_committed: Xen sets this flag to indicate that it is committed
* to transferring ownership of a page frame. When a guest sees this flag
* it must /not/ modify the grant entry until GTF_transfer_completed is
* set by Xen.
* GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
* after reading GTF_transfer_committed. Xen will always write the frame
* address, followed by ORing this flag, in a timely manner.
*/
#define _GTF_transfer_committed (2)
#define GTF_transfer_committed (1U << _GTF_transfer_committed)
#define _GTF_transfer_completed (3)
#define GTF_transfer_completed (1U << _GTF_transfer_completed)
/*
* Version 2 grant table entries. These fulfil the same role as
* version 1 entries, but can represent more complicated operations.
* Any given domain will have either a version 1 or a version 2 table,
* and every entry in the table will be the same version.
*
* The interface by which domains use grant references does not depend
* on the grant table version in use by the other domain.
*/
/*
* Version 1 and version 2 grant entries share a common prefix. The
* fields of the prefix are documented as part of struct
* grant_entry_v1.
*/
struct grant_entry_header {
u16 flags;
domid_t domid;
};
/*
* Version 2 of the grant entry structure, here is a union because three
* different types are suppotted: full_page, sub_page and transitive.
*/
union grant_entry_v2 {
struct grant_entry_header hdr;
/*
* This member is used for V1-style full page grants, where either:
*
* -- hdr.type is GTF_accept_transfer, or
* -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
*
* In that case, the frame field has the same semantics as the
* field of the same name in the V1 entry structure.
*/
struct {
struct grant_entry_header hdr;
u32 pad0;
u64 frame;
} full_page;
/*
* If the grant type is GTF_grant_access and GTF_sub_page is set,
* @domid is allowed to access bytes [@page_off,@page_off+@length)
* in frame @frame.
*/
struct {
struct grant_entry_header hdr;
u16 page_off;
u16 length;
u64 frame;
} sub_page;
/*
* If the grant is GTF_transitive, @domid is allowed to use the
* grant @gref in domain @trans_domid, as if it was the local
* domain. Obviously, the transitive access must be compatible
* with the original grant.
*/
struct {
struct grant_entry_header hdr;
domid_t trans_domid;
u16 pad0;
grant_ref_t gref;
} transitive;
u32 __spacer[4]; /* Pad to a power of two */
};
typedef u16 grant_status_t;
/***********************************
* GRANT TABLE QUERIES AND USES
*/
/*
* Handle to track a mapping created via a grant reference.
*/
typedef u32 grant_handle_t;
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
* that must be presented later to destroy the mapping(s). On error, <handle>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
* via which I/O devices may access the granted frame.
* 2. If GNTMAP_host_map is specified then a mapping will be added at
* either a host virtual address in the current address space, or at
* a PTE at the specified machine address. The type of mapping to
* perform is selected through the GNTMAP_contains_pte flag, and the
* address is specified in <host_addr>.
* 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
* host mapping is destroyed by other means then it is *NOT* guaranteed
* to be accounted to the correct grant reference!
*/
#define GNTTABOP_map_grant_ref 0
struct gnttab_map_grant_ref {
/* IN parameters. */
u64 host_addr;
u32 flags; /* GNTMAP_* */
grant_ref_t ref;
domid_t dom;
/* OUT parameters. */
s16 status; /* GNTST_* */
grant_handle_t handle;
u64 dev_bus_addr;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
/*
* GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
* tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
* field is ignored. If non-zero, they must refer to a device/host mapping
* that is tracked by <handle>
* NOTES:
* 1. The call may fail in an undefined manner if either mapping is not
* tracked by <handle>.
* 3. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
#define GNTTABOP_unmap_grant_ref 1
struct gnttab_unmap_grant_ref {
/* IN parameters. */
u64 host_addr;
u64 dev_bus_addr;
grant_handle_t handle;
/* OUT parameters. */
s16 status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
/*
* GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
* <nr_frames> pages. The frame addresses are written to the <frame_list>.
* Only <nr_frames> addresses are written, even if the table is larger.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
* 3. Xen may not support more than a single grant-table page per domain.
*/
#define GNTTABOP_setup_table 2
struct gnttab_setup_table {
/* IN parameters. */
domid_t dom;
u32 nr_frames;
/* OUT parameters. */
s16 status; /* GNTST_* */
GUEST_HANDLE(xen_pfn_t)frame_list;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
/*
* GNTTABOP_dump_table: Dump the contents of the grant table to the
* xen console. Debugging use only.
*/
#define GNTTABOP_dump_table 3
struct gnttab_dump_table {
/* IN parameters. */
domid_t dom;
/* OUT parameters. */
s16 status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
/*
* GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
* foreign domain has previously registered its interest in the transfer via
* <domid, ref>.
*
* Note that, even if the transfer fails, the specified page no longer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
*/
#define GNTTABOP_transfer 4
struct gnttab_transfer {
/* IN parameters. */
xen_pfn_t mfn;
domid_t domid;
grant_ref_t ref;
/* OUT parameters. */
s16 status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
/*
* GNTTABOP_copy: Hypervisor based copy
* source and destinations can be eithers MFNs or, for foreign domains,
* grant references. the foreign domain has to grant read/write access
* in its grant table.
*
* The flags specify what type source and destinations are (either MFN
* or grant reference).
*
* Note that this can also be used to copy data between two domains
* via a third party if the source and destination domains had previously
* grant appropriate access to their pages to the third party.
*
* source_offset specifies an offset in the source frame, dest_offset
* the offset in the target frame and len specifies the number of
* bytes to be copied.
*/
#define _GNTCOPY_source_gref (0)
#define GNTCOPY_source_gref (1 << _GNTCOPY_source_gref)
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1 << _GNTCOPY_dest_gref)
#define GNTTABOP_copy 5
struct gnttab_copy {
/* IN parameters. */
struct {
union {
grant_ref_t ref;
xen_pfn_t gmfn;
} u;
domid_t domid;
u16 offset;
} source, dest;
u16 len;
u16 flags; /* GNTCOPY_* */
/* OUT parameters. */
s16 status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
/*
* GNTTABOP_query_size: Query the current and maximum sizes of the shared
* grant table.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
#define GNTTABOP_query_size 6
struct gnttab_query_size {
/* IN parameters. */
domid_t dom;
/* OUT parameters. */
u32 nr_frames;
u32 max_nr_frames;
s16 status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
/*
* GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
* tracked by <handle> but atomically replace the page table entry with one
* pointing to the machine address under <new_addr>. <new_addr> will be
* redirected to the null entry.
* NOTES:
* 1. The call may fail in an undefined manner if either mapping is not
* tracked by <handle>.
* 2. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
#define GNTTABOP_unmap_and_replace 7
struct gnttab_unmap_and_replace {
/* IN parameters. */
u64 host_addr;
u64 new_addr;
grant_handle_t handle;
/* OUT parameters. */
s16 status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
/*
* GNTTABOP_set_version: Request a particular version of the grant
* table shared table structure. This operation can only be performed
* once in any given domain. It must be performed before any grants
* are activated; otherwise, the domain will be stuck with version 1.
* The only defined versions are 1 and 2.
*/
#define GNTTABOP_set_version 8
struct gnttab_set_version {
/* IN parameters */
u32 version;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
/*
* GNTTABOP_get_status_frames: Get the list of frames used to store grant
* status for <dom>. In grant format version 2, the status is separated
* from the other shared grant fields to allow more efficient synchronization
* using barriers instead of atomic cmpexch operations.
* <nr_frames> specify the size of vector <frame_list>.
* The frame addresses are returned in the <frame_list>.
* Only <nr_frames> addresses are returned, even if the table is larger.
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
#define GNTTABOP_get_status_frames 9
struct gnttab_get_status_frames {
/* IN parameters. */
u32 nr_frames;
domid_t dom;
/* OUT parameters. */
s16 status; /* GNTST_* */
GUEST_HANDLE(u64)frame_list;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
/*
* GNTTABOP_get_version: Get the grant table version which is in
* effect for domain <dom>.
*/
#define GNTTABOP_get_version 10
struct gnttab_get_version {
/* IN parameters */
domid_t dom;
u16 pad;
/* OUT parameters */
u32 version;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
/*
* Issue one or more cache maintenance operations on a portion of a
* page granted to the calling domain by a foreign domain.
*/
#define GNTTABOP_cache_flush 12
struct gnttab_cache_flush {
union {
u64 dev_bus_addr;
grant_ref_t ref;
} a;
u16 offset; /* offset from start of grant */
u16 length; /* size within the grant */
#define GNTTAB_CACHE_CLEAN (1 << 0)
#define GNTTAB_CACHE_INVAL (1 << 1)
#define GNTTAB_CACHE_SOURCE_GREF (1 << 31)
u32 op;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
/*
* Bitfield values for update_pin_status.flags.
*/
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
#define GNTMAP_device_map (1 << _GNTMAP_device_map)
/* Map the grant entry for access by host CPUs. */
#define _GNTMAP_host_map (1)
#define GNTMAP_host_map (1 << _GNTMAP_host_map)
/* Accesses to the granted frame will be restricted to read-only access. */
#define _GNTMAP_readonly (2)
#define GNTMAP_readonly (1 << _GNTMAP_readonly)
/*
* GNTMAP_host_map subflag:
* 0 => The host mapping is usable only by the guest OS.
* 1 => The host mapping is usable by guest OS + current application.
*/
#define _GNTMAP_application_map (3)
#define GNTMAP_application_map (1 << _GNTMAP_application_map)
/*
* GNTMAP_contains_pte subflag:
* 0 => This map request contains a host virtual address.
* 1 => This map request contains the machine addess of the PTE to update.
*/
#define _GNTMAP_contains_pte (4)
#define GNTMAP_contains_pte (1 << _GNTMAP_contains_pte)
/*
* Bits to be placed in guest kernel available PTE bits (architecture
* dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
*/
#define _GNTMAP_guest_avail0 (16)
#define GNTMAP_guest_avail_mask ((u32)~0 << _GNTMAP_guest_avail0)
/*
* Values for error status returns. All errors are -ve.
*/
#define GNTST_okay (0) /* Normal return. */
#define GNTST_general_error (-1) /* General undefined error. */
#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTST_eagain (-12) /* Operation not done; try again. */
#define GNTTABOP_error_msgs { \
"okay", \
"undefined error", \
"unrecognised domain id", \
"invalid grant reference", \
"invalid mapping handle", \
"invalid virtual address", \
"invalid device address", \
"no spare translation slot in the I/O MMU", \
"permission denied", \
"bad page", \
"copy arguments cross page boundary", \
"page address size too large", \
"operation not done; try again" \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
/* SPDX-License-Identifier: MIT
*
* hvm_op.h
*
* Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
/* Get/set subcommands: the second argument of the hypercall is a
* pointer to a xen_hvm_param struct.
*/
#define HVMOP_set_param 0
#define HVMOP_get_param 1
struct xen_hvm_param {
domid_t domid; /* IN */
u32 index; /* IN */
u64 value; /* IN/OUT */
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param);
/* Hint from PV drivers for pagetable destruction. */
#define HVMOP_pagetable_dying 9
struct xen_hvm_pagetable_dying {
/* Domain with a pagetable about to be destroyed. */
domid_t domid;
/* guest physical address of the toplevel pagetable dying */
aligned_u64 gpa;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_pagetable_dying);
enum hvmmem_type_t {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
};
#define HVMOP_get_mem_type 15
/* Return hvmmem_type_t for the specified pfn. */
struct xen_hvm_get_mem_type {
/* Domain to be queried. */
domid_t domid;
/* OUT variable. */
u16 mem_type;
u16 pad[2]; /* align next field on 8-byte boundary */
/* IN variable. */
u64 pfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type);
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
/* SPDX-License-Identifier: MIT
*
* params.h
*
* HVM parameters. HVM (Hardware Virtual Machine) is the type of instance
* that mimics bare-metal server setup which provides better hardware
* isolation.
*/
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__
#include <xen/interface/hvm/hvm_op.h>
/*
* Parameter space for HVMOP_{set,get}_param.
*/
#define HVM_PARAM_CALLBACK_IRQ 0
/*
* How should CPU0 event-channel notifications be delivered?
*
* If val == 0 then CPU0 event-channel notifications are not delivered.
* If val != 0, val[63:56] encodes the type, as follows:
*/
#define HVM_PARAM_CALLBACK_TYPE_GSI 0
/*
* val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
* and disables all notifications.
*/
#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
/*
* val[55:0] is a delivery PCI INTx line:
* Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
*/
#if defined(__i386__) || defined(__x86_64__)
#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
/*
* val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
* if this delivery method is available.
*/
#elif defined(__arm__) || defined(__aarch64__)
#define HVM_PARAM_CALLBACK_TYPE_PPI 2
/*
* val[55:16] needs to be zero.
* val[15:8] is interrupt flag of the PPI used by event-channel:
* bit 8: the PPI is edge(1) or level(0) triggered
* bit 9: the PPI is active low(1) or high(0)
* val[7:0] is a PPI number used by event-channel.
* This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
* the notification is handled by the interrupt controller.
*/
#endif
#define HVM_PARAM_STORE_PFN 1
#define HVM_PARAM_STORE_EVTCHN 2
#define HVM_PARAM_PAE_ENABLED 4
#define HVM_PARAM_IOREQ_PFN 5
#define HVM_PARAM_BUFIOREQ_PFN 6
/*
* Set mode for virtual timers (currently x86 only):
* delay_for_missed_ticks (default):
* Do not advance a vcpu's time beyond the correct delivery time for
* interrupts that have been missed due to preemption. Deliver missed
* interrupts when the vcpu is rescheduled and advance the vcpu's virtual
* time stepwise for each one.
* no_delay_for_missed_ticks:
* As above, missed interrupts are delivered, but guest time always tracks
* wallclock (i.e., real) time while doing so.
* no_missed_ticks_pending:
* No missed interrupts are held pending. Instead, to ensure ticks are
* delivered at some non-zero rate, if we detect missed ticks then the
* internal tick alarm is not disabled if the VCPU is preempted during the
* next tick period.
* one_missed_tick_pending:
* Missed interrupts are collapsed together and delivered as one 'late tick'.
* Guest time always tracks wallclock (i.e., real) time.
*/
#define HVM_PARAM_TIMER_MODE 10
#define HVMPTM_delay_for_missed_ticks 0
#define HVMPTM_no_delay_for_missed_ticks 1
#define HVMPTM_no_missed_ticks_pending 2
#define HVMPTM_one_missed_tick_pending 3
/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
#define HVM_PARAM_HPET_ENABLED 11
/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
#define HVM_PARAM_IDENT_PT 12
/* Device Model domain, defaults to 0. */
#define HVM_PARAM_DM_DOMAIN 13
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14
/* TSS used on Intel when CR0.PE=0. */
#define HVM_PARAM_VM86_TSS 15
/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
#define HVM_PARAM_VPT_ALIGN 16
/* Console debug shared memory ring and event channel */
#define HVM_PARAM_CONSOLE_PFN 17
#define HVM_PARAM_CONSOLE_EVTCHN 18
#define HVM_NR_PARAMS 19
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
此差异已折叠。
/* SPDX-License-Identifier: MIT
*
* console.h
*
* Console I/O interface for Xen guest OSes.
*
* Copyright (c) 2005, Keir Fraser
*/
#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
#define __XEN_PUBLIC_IO_CONSOLE_H__
typedef u32 XENCONS_RING_IDX;
#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring) - 1))
struct xencons_interface {
char in[1024];
char out[2048];
XENCONS_RING_IDX in_cons, in_prod;
XENCONS_RING_IDX out_cons, out_prod;
};
#ifdef XEN_WANT_FLEX_CONSOLE_RING
#include "ring.h"
DEFINE_XEN_FLEX_RING(xencons);
#endif
#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
/* SPDX-License-Identifier: MIT
*
* protocols.h
*
* Copyright (c) 2008, Keir Fraser
*
* Xen protocols, which are used as ABI rules governing the format of all
* ring request and response structures.
*/
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
#if defined(__i386__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__arm__) || defined(__aarch64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#else
# error arch fixup needed here
#endif
#endif
此差异已折叠。
/* SPDX-License-Identifier: MIT
*
* xenbus.h
*
* Xenbus protocol details.
*
* Copyright (C) 2005 XenSource Ltd.
*/
#ifndef _XEN_PUBLIC_IO_XENBUS_H
#define _XEN_PUBLIC_IO_XENBUS_H
/*
* The state of either end of the Xenbus, i.e. the current communication
* status of initialisation across the bus. States here imply nothing about
* the state of the connection between the driver and the kernel's device
* layers.
*/
enum xenbus_state {
XenbusStateUnknown = 0,
XenbusStateInitialising = 1,
/*
* InitWait: Finished early initialisation but waiting for information
* from the peer or hotplug scripts.
*/
XenbusStateInitWait = 2,
/*
* Initialised: Waiting for a connection from the peer.
*/
XenbusStateInitialised = 3,
XenbusStateConnected = 4,
/*
* Closing: The device is being closed due to an error or an unplug event.
*/
XenbusStateClosing = 5,
XenbusStateClosed = 6,
/*
* Reconfiguring: The device is being reconfigured.
*/
XenbusStateReconfiguring = 7,
XenbusStateReconfigured = 8
};
typedef enum xenbus_state XenbusState;
#endif /* _XEN_PUBLIC_IO_XENBUS_H */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
/* SPDX-License-Identifier: MIT
*
* Details of the "wire" protocol between Xen Store Daemon and client
* library or guest kernel.
*
* Copyright (C) 2005 Rusty Russell IBM Corporation
*/
#ifndef _XS_WIRE_H
#define _XS_WIRE_H
enum xsd_sockmsg_type {
XS_CONTROL,
#define XS_DEBUG XS_CONTROL
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
XS_WATCH,
XS_UNWATCH,
XS_TRANSACTION_START,
XS_TRANSACTION_END,
XS_INTRODUCE,
XS_RELEASE,
XS_GET_DOMAIN_PATH,
XS_WRITE,
XS_MKDIR,
XS_RM,
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
/* XS_RESTRICT has been removed */
XS_RESET_WATCHES = XS_SET_TARGET + 2,
XS_DIRECTORY_PART,
XS_TYPE_COUNT, /* Number of valid types. */
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
#define XS_WRITE_CREATE "CREATE"
#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
/* We hand errors as strings, for portability. */
struct xsd_errors {
int errnum;
const char *errstring;
};
#ifdef EINVAL
#define XSD_ERROR(x) { x, #x }
/* LINTED: static unused */
static struct xsd_errors xsd_errors[]
#if defined(__GNUC__)
__attribute__((unused))
#endif
= {
XSD_ERROR(EINVAL),
XSD_ERROR(EACCES),
XSD_ERROR(EEXIST),
XSD_ERROR(EISDIR),
XSD_ERROR(ENOENT),
XSD_ERROR(ENOMEM),
XSD_ERROR(ENOSPC),
XSD_ERROR(EIO),
XSD_ERROR(ENOTEMPTY),
XSD_ERROR(ENOSYS),
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
XSD_ERROR(EISCONN),
XSD_ERROR(E2BIG)
};
#endif
struct xsd_sockmsg {
u32 type; /* XS_??? */
u32 req_id;/* Request identifier, echoed in daemon's response. */
u32 tx_id; /* Transaction id (0 if not related to a transaction). */
u32 len; /* Length of data following this. */
/* Generally followed by nul-terminated string(s). */
};
enum xs_watch_type {
XS_WATCH_PATH = 0,
XS_WATCH_TOKEN
};
/*
* `incontents 150 xenstore_struct XenStore wire protocol.
*
* Inter-domain shared memory communications.
*/
#define XENSTORE_RING_SIZE 1024
typedef u32 XENSTORE_RING_IDX;
#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE - 1))
struct xenstore_domain_interface {
char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
u32 server_features; /* Bitmap of features supported by the server */
u32 connection;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
#define XENSTORE_PAYLOAD_MAX 4096
/* Violating these just gets you an error back */
#define XENSTORE_ABS_PATH_MAX 3072
#define XENSTORE_REL_PATH_MAX 2048
/* The ability to reconnect a ring */
#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
/* Valid values for the connection field */
#define XENSTORE_CONNECTED 0 /* the steady-state */
#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
#endif /* _XS_WIRE_H */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*/
/* SPDX-License-Identifier: GPL-2.0
*
* memory.h
*
* Memory reservation and information.
*
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
#ifndef __XEN_PUBLIC_MEMORY_H__
#define __XEN_PUBLIC_MEMORY_H__
/*
* Increase or decrease the specified domain's memory reservation. Returns a
* -ve errcode on failure, or the # extents successfully allocated or freed.
* arg == addr of struct xen_memory_reservation.
*/
#define XENMEM_increase_reservation 0
#define XENMEM_decrease_reservation 1
#define XENMEM_populate_physmap 6
struct xen_memory_reservation {
/*
* XENMEM_increase_reservation:
* OUT: MFN (*not* GMFN) bases of extents that were allocated
* XENMEM_decrease_reservation:
* IN: GMFN bases of extents to free
* XENMEM_populate_physmap:
* IN: GPFN bases of extents to populate with memory
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
*/
GUEST_HANDLE(xen_pfn_t)extent_start;
/* Number of extents, and size/alignment of each (2^extent_order pages). */
xen_ulong_t nr_extents;
unsigned int extent_order;
/*
* Maximum # bits addressable by the user of the allocated region (e.g.,
* I/O devices often have a 32-bit limitation even in 64-bit systems). If
* zero then the user has no addressing restriction.
* This field is not used by XENMEM_decrease_reservation.
*/
unsigned int address_bits;
/*
* Domain whose reservation is being changed.
* Unprivileged domains can specify only DOMID_SELF.
*/
domid_t domid;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
/*
* An atomic exchange of memory pages. If return code is zero then
* @out.extent_list provides GMFNs of the newly-allocated memory.
* Returns zero on complete success, otherwise a negative error code.
* On complete success then always @nr_exchanged == @in.nr_extents.
* On partial success @nr_exchanged indicates how much work was done.
*/
#define XENMEM_exchange 11
struct xen_memory_exchange {
/*
* [IN] Details of memory extents to be exchanged (GMFN bases).
* Note that @in.address_bits is ignored and unused.
*/
struct xen_memory_reservation in;
/*
* [IN/OUT] Details of new memory extents.
* We require that:
* 1. @in.domid == @out.domid
* 2. @in.nr_extents << @in.extent_order ==
* @out.nr_extents << @out.extent_order
* 3. @in.extent_start and @out.extent_start lists must not overlap
* 4. @out.extent_start lists GPFN bases to be populated
* 5. @out.extent_start is overwritten with allocated GMFN bases
*/
struct xen_memory_reservation out;
/*
* [OUT] Number of input extents that were successfully exchanged:
* 1. The first @nr_exchanged input extents were successfully
* deallocated.
* 2. The corresponding first entries in the output extent list correctly
* indicate the GMFNs that were successfully exchanged.
* 3. All other input and output extents are untouched.
* 4. If not all input exents are exchanged then the return code of this
* command will be non-zero.
* 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
*/
xen_ulong_t nr_exchanged;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_memory_exchange);
/*
* Returns the maximum machine frame number of mapped RAM in this system.
* This command always succeeds (it never returns an error code).
* arg == NULL.
*/
#define XENMEM_maximum_ram_page 2
/*
* Returns the current or maximum memory reservation, in pages, of the
* specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
* arg == addr of domid_t.
*/
#define XENMEM_current_reservation 3
#define XENMEM_maximum_reservation 4
/*
* Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
* mapping table. Architectures which do not have a m2p table do not implement
* this command.
* arg == addr of xen_machphys_mfn_list_t.
*/
#define XENMEM_machphys_mfn_list 5
struct xen_machphys_mfn_list {
/*
* Size of the 'extent_start' array. Fewer entries will be filled if the
* machphys table is smaller than max_extents * 2MB.
*/
unsigned int max_extents;
/*
* Pointer to buffer to fill with list of extent starts. If there are
* any large discontiguities in the machine address space, 2MB gaps in
* the machphys table will be represented by an MFN base of zero.
*/
GUEST_HANDLE(xen_pfn_t)extent_start;
/*
* Number of extents written to the above array. This will be smaller
* than 'max_extents' if the machphys table is smaller than max_e * 2MB.
*/
unsigned int nr_extents;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
/*
* Returns the location in virtual address space of the machine_to_phys
* mapping table. Architectures which do not have a m2p table, or which do not
* map it by default into guest address space, do not implement this command.
* arg == addr of xen_machphys_mapping_t.
*/
#define XENMEM_machphys_mapping 12
struct xen_machphys_mapping {
xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
};
DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
* XENMEM_add_to_physmap_range only.
*/
#define XENMAPSPACE_dev_mmio 5 /* device mmio region */
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.
* arg == addr of xen_add_to_physmap_t.
*/
#define XENMEM_add_to_physmap 7
struct xen_add_to_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* Number of pages to go through for gmfn_range */
u16 size;
/* Source mapping space. */
unsigned int space;
/* Index into source mapping space. */
xen_ulong_t idx;
/* GPFN where the source mapping page should appear. */
xen_pfn_t gpfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
/*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/
#define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range {
/* IN */
/* Which domain to change the mapping for. */
domid_t domid;
u16 space; /* => enum phys_map_space */
/* Number of pages to go through */
u16 size;
domid_t foreign_domid; /* IFF gmfn_foreign */
/* Indexes into space being mapped. */
GUEST_HANDLE(xen_ulong_t)idxs;
/* GPFN in domid where the source mapping page should appear. */
GUEST_HANDLE(xen_pfn_t)gpfns;
/* OUT */
/* Per index error code. */
GUEST_HANDLE(int)errs;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
/*
* Returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map).
* arg == addr of struct xen_memory_map.
*/
#define XENMEM_memory_map 9
struct xen_memory_map {
/*
* On call the number of entries which can be stored in buffer. On
* return the number of entries which have been stored in
* buffer.
*/
unsigned int nr_entries;
/*
* Entries in the buffer are in the same format as returned by the
* BIOS INT 0x15 EAX=0xE820 call.
*/
GUEST_HANDLE(void)buffer;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
/*
* Returns the real physical memory map. Passes the same structure as
* XENMEM_memory_map.
* arg == addr of struct xen_memory_map.
*/
#define XENMEM_machine_memory_map 10
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
struct xen_remove_from_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
/*
* Get the pages for a particular guest resource, so that they can be
* mapped directly by a tools domain.
*/
#define XENMEM_acquire_resource 28
struct xen_mem_acquire_resource {
/* IN - The domain whose resource is to be mapped */
domid_t domid;
/* IN - the type of resource */
u16 type;
#define XENMEM_resource_ioreq_server 0
#define XENMEM_resource_grant_table 1
/*
* IN - a type-specific resource identifier, which must be zero
* unless stated otherwise.
*
* type == XENMEM_resource_ioreq_server -> id == ioreq server id
* type == XENMEM_resource_grant_table -> id defined below
*/
u32 id;
#define XENMEM_resource_grant_table_id_shared 0
#define XENMEM_resource_grant_table_id_status 1
/* IN/OUT - As an IN parameter number of frames of the resource
* to be mapped. However, if the specified value is 0 and
* frame_list is NULL then this field will be set to the
* maximum value supported by the implementation on return.
*/
u32 nr_frames;
/*
* OUT - Must be zero on entry. On return this may contain a bitwise
* OR of the following values.
*/
u32 flags;
/* The resource pages have been assigned to the calling domain */
#define _XENMEM_rsrc_acq_caller_owned 0
#define XENMEM_rsrc_acq_caller_owned (1u << _XENMEM_rsrc_acq_caller_owned)
/*
* IN - the index of the initial frame to be mapped. This parameter
* is ignored if nr_frames is 0.
*/
u64 frame;
#define XENMEM_resource_ioreq_server_frame_bufioreq 0
#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
/*
* IN/OUT - If the tools domain is PV then, upon return, frame_list
* will be populated with the MFNs of the resource.
* If the tools domain is HVM then it is expected that, on
* entry, frame_list will be populated with a list of GFNs
* that will be mapped to the MFNs of the resource.
* If -EIO is returned then the frame_list has only been
* partially mapped and it is up to the caller to unmap all
* the GFNs.
* This parameter may be NULL if nr_frames is 0.
*/
GUEST_HANDLE(xen_pfn_t)frame_list;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mem_acquire_resource);
#endif /* __XEN_PUBLIC_MEMORY_H__ */
/* SPDX-License-Identifier: MIT
*
* sched.h
*
* Scheduler state interactions
*
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
#ifndef __XEN_PUBLIC_SCHED_H__
#define __XEN_PUBLIC_SCHED_H__
#include <xen/interface/event_channel.h>
/*
* Guest Scheduler Operations
*
* The SCHEDOP interface provides mechanisms for a guest to interact
* with the scheduler, including yield, blocking and shutting itself
* down.
*/
/*
* The prototype for this hypercall is:
* long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
*
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
* ... == Additional Operation-specific extra arguments, described below.
*
* Versions of Xen prior to 3.0.2 provided only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
* long sched_op(int cmd, unsigned long arg)
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
*
* This legacy version is available to new guests as:
* long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
*/
/*
* Voluntarily yield the CPU.
* @arg == NULL.
*/
#define SCHEDOP_yield 0
/*
* Block execution of this VCPU until an event is received for processing.
* If called with event upcalls masked, this operation will atomically
* reenable event delivery and check for pending events before blocking the
* VCPU. This avoids a "wakeup waiting" race.
* @arg == NULL.
*/
#define SCHEDOP_block 1
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
* @arg == pointer to sched_shutdown structure.
*
* If the sched_shutdown_t reason is SHUTDOWN_suspend then
* x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
* of the guest's start info page. RDX/EDX is the third hypercall
* argument.
*
* In addition, which reason is SHUTDOWN_suspend this hypercall
* returns 1 if suspend was cancelled or the domain was merely
* checkpointed, and 0 if it is resuming in a new domain.
*/
#define SCHEDOP_shutdown 2
/*
* Poll a set of event-channel ports. Return when one or more are pending. An
* optional timeout may be specified.
* @arg == pointer to sched_poll structure.
*/
#define SCHEDOP_poll 3
/*
* Declare a shutdown for another domain. The main use of this function is
* in interpreting shutdown requests and reasons for fully-virtualized
* domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
* @arg == pointer to sched_remote_shutdown structure.
*/
#define SCHEDOP_remote_shutdown 4
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
* @arg == sched_shutdown, as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
/*
* Setup, poke and destroy a domain watchdog timer.
* @arg == pointer to sched_watchdog structure.
* With id == 0, setup a domain watchdog timer to cause domain shutdown
* after timeout, returns watchdog id.
* With id != 0 and timeout == 0, destroy domain watchdog timer.
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
/*
* Override the current vcpu affinity by pinning it to one physical cpu or
* undo this override restoring the previous affinity.
* @arg == pointer to sched_pin_override structure.
*
* A negative pcpu value will undo a previous pin override and restore the
* previous cpu affinity.
* This call is allowed for the hardware domain only and requires the cpu
* to be part of the domain's cpupool.
*/
#define SCHEDOP_pin_override 7
struct sched_shutdown {
unsigned int reason; /* SHUTDOWN_* => shutdown reason */
};
DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
struct sched_poll {
GUEST_HANDLE(evtchn_port_t)ports;
unsigned int nr_ports;
u64 timeout;
};
DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
struct sched_remote_shutdown {
domid_t domain_id; /* Remote domain ID */
unsigned int reason; /* SHUTDOWN_* => shutdown reason */
};
DEFINE_GUEST_HANDLE_STRUCT(sched_remote_shutdown);
struct sched_watchdog {
u32 id; /* watchdog ID */
u32 timeout; /* timeout */
};
DEFINE_GUEST_HANDLE_STRUCT(sched_watchdog);
struct sched_pin_override {
s32 pcpu;
};
DEFINE_GUEST_HANDLE_STRUCT(sched_pin_override);
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
*/
#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
/*
* Domain asked to perform 'soft reset' for it. The expected behavior is to
* reset internal Xen state for the domain returning it to the point where it
* was created but leaving the domain's memory contents and vCPU contexts
* intact. This will allow the domain to start over and set up all Xen specific
* interfaces again.
*/
#define SHUTDOWN_soft_reset 5
#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
#endif /* __XEN_PUBLIC_SCHED_H__ */
/* SPDX-License-Identifier: MIT
*
* xen.h
*
* Guest OS interface to Xen.
*
* Copyright (c) 2004, K A Fraser
*/
#ifndef __XEN_PUBLIC_XEN_H__
#define __XEN_PUBLIC_XEN_H__
#include <xen/arm/interface.h>
/*
* XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
*/
/*
* x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
* EAX = return value
* (argument registers may be clobbered on return)
* x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
* RAX = return value
* (argument registers not clobbered on return; RCX, R11 are)
*/
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_set_gdt 2
#define __HYPERVISOR_stack_switch 3
#define __HYPERVISOR_set_callbacks 4
#define __HYPERVISOR_fpu_taskswitch 5
#define __HYPERVISOR_sched_op_compat 6
#define __HYPERVISOR_platform_op 7
#define __HYPERVISOR_set_debugreg 8
#define __HYPERVISOR_get_debugreg 9
#define __HYPERVISOR_update_descriptor 10
#define __HYPERVISOR_memory_op 12
#define __HYPERVISOR_multicall 13
#define __HYPERVISOR_update_va_mapping 14
#define __HYPERVISOR_set_timer_op 15
#define __HYPERVISOR_event_channel_op_compat 16
#define __HYPERVISOR_xen_version 17
#define __HYPERVISOR_console_io 18
#define __HYPERVISOR_physdev_op_compat 19
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_iret 23 /* x86 only */
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
#define __HYPERVISOR_xenoprof_op 31
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
#define __HYPERVISOR_sysctl 35
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
#define __HYPERVISOR_xenpmu_op 40
#define __HYPERVISOR_dm_op 41
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#define __HYPERVISOR_arch_1 49
#define __HYPERVISOR_arch_2 50
#define __HYPERVISOR_arch_3 51
#define __HYPERVISOR_arch_4 52
#define __HYPERVISOR_arch_5 53
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
#ifndef __ASSEMBLY__
typedef u16 domid_t;
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
#define DOMID_FIRST_RESERVED (0x7FF0U)
/* DOMID_SELF is used in certain contexts to refer to oneself. */
#define DOMID_SELF (0x7FF0U)
/*
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.
* Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
* is useful to ensure that no mappings to the OS's own heap are accidentally
* installed. (e.g., in Linux this could cause havoc as reference counts
* aren't adjusted on the I/O-mapping code path).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
* be specified by any calling domain.
*/
#define DOMID_IO (0x7FF1U)
/*
* DOMID_XEN is used to allow privileged domains to map restricted parts of
* Xen's heap space (e.g., the machine_to_phys table).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
* the caller is privileged.
*/
#define DOMID_XEN (0x7FF2U)
/* DOMID_COW is used as the owner of sharable pages */
#define DOMID_COW (0x7FF3U)
/* DOMID_INVALID is used to identify pages with unknown owner. */
#define DOMID_INVALID (0x7FF4U)
/* Idle domain. */
#define DOMID_IDLE (0x7FFFU)
struct vcpu_info {
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
* a pending notification for a particular VCPU. It is then cleared
* by the guest OS /before/ checking for pending work, thus avoiding
* a set-and-check race. Note that the mask is only accessed by Xen
* on the CPU that is currently hosting the VCPU. This means that the
* pending and mask flags can be updated by the guest without special
* synchronisation (i.e., no need for the x86 LOCK prefix).
* This may seem suboptimal because if the pending flag is set by
* a different CPU then an IPI may be scheduled even when the mask
* is set. However, note:
* 1. The task of 'interrupt holdoff' is covered by the per-event-
* channel mask bits. A 'noisy' event that is continually being
* triggered can be masked at source at this very precise
* granularity.
* 2. The main purpose of the per-VCPU mask is therefore to restrict
* reentrant execution: whether for concurrency control, or to
* prevent unbounded stack usage. Whatever the purpose, we expect
* that the mask will be asserted only for short periods at a time,
* and so the likelihood of a 'spurious' IPI is suitably small.
* The mask is read before making an event upcall to the guest: a
* non-zero mask therefore guarantees that the VCPU will not receive
* an upcall activation. The mask is cleared when the VCPU requests
* to block: this avoids wakeup-waiting races.
*/
u8 evtchn_upcall_pending;
u8 evtchn_upcall_mask;
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */
/*
* Xen/kernel shared data -- pointer provided in start_info.
* NB. We expect that this struct is smaller than a page.
*/
struct shared_info {
struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
/*
* A domain can create "event channels" on which it can send and receive
* asynchronous event notifications. There are three classes of event that
* are delivered by this mechanism:
* 1. Bi-directional inter- and intra-domain connections. Domains must
* arrange out-of-band to set up a connection (usually by allocating
* an unbound 'listener' port and avertising that via a storage service
* such as xenstore).
* 2. Physical interrupts. A domain with suitable hardware-access
* privileges can bind an event-channel port to a physical interrupt
* source.
* 3. Virtual interrupts ('events'). A domain can bind an event-channel
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
* will cause an asynchronous upcall to be scheduled. This bit is only
* updated by the guest. It is read-only within Xen. If a channel
* becomes pending while the channel is masked then the 'edge' is lost
* (i.e., when the channel is unmasked, the guest must manually handle
* pending notifications as no upcall will be scheduled by Xen).
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
* their gettimeofday() syscall on this wallclock-base value.
*/
struct pvclock_wall_clock wc;
struct arch_shared_info arch;
};
#else /* __ASSEMBLY__ */
/* In assembly code we cannot use C numeric constant suffixes. */
#define mk_unsigned_long(x) x
#endif /* !__ASSEMBLY__ */
#endif /* __XEN_PUBLIC_XEN_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册