page.h 7.6 KB
Newer Older
1 2
#ifndef _ASM_X86_XEN_PAGE_H
#define _ASM_X86_XEN_PAGE_H
3

4 5 6
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
7
#include <linux/pfn.h>
8
#include <linux/mm.h>
9 10

#include <asm/uaccess.h>
11
#include <asm/page.h>
12 13
#include <asm/pgtable.h>

14
#include <xen/interface/xen.h>
15
#include <xen/grant_table.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include <xen/features.h>

/* Xen machine address */
typedef struct xmaddr {
	phys_addr_t maddr;
} xmaddr_t;

/* Xen pseudo-physical address */
typedef struct xpaddr {
	phys_addr_t paddr;
} xpaddr_t;

#define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
#define XPADDR(x)	((xpaddr_t) { .paddr = (x) })

/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY	(~0UL)
33 34
#define FOREIGN_FRAME_BIT	(1UL<<(BITS_PER_LONG-1))
#define IDENTITY_FRAME_BIT	(1UL<<(BITS_PER_LONG-2))
35
#define FOREIGN_FRAME(m)	((m) | FOREIGN_FRAME_BIT)
36
#define IDENTITY_FRAME(m)	((m) | IDENTITY_FRAME_BIT)
37

38 39
#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))

40
extern unsigned long *machine_to_phys_mapping;
41
extern unsigned long  machine_to_phys_nr;
42 43 44
extern unsigned long *xen_p2m_addr;
extern unsigned long  xen_p2m_size;
extern unsigned long  xen_max_p2m_pfn;
45

46
extern unsigned long get_phys_to_machine(unsigned long pfn);
47
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49 50
extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
						    unsigned long pfn_e);
51

52 53 54 55
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
				   struct gnttab_map_grant_ref *kmap_ops,
				   struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
56
				     struct gnttab_unmap_grant_ref *kunmap_ops,
57
				     struct page **pages, unsigned int count);
58

59 60 61 62 63 64 65 66 67 68 69 70 71 72
/*
 * Helper functions to write or read unsigned long values to/from
 * memory, when the access may fault.
 */
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
	return __put_user(val, (unsigned long __user *)addr);
}

static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
{
	return __get_user(*val, (unsigned long __user *)addr);
}

73 74 75 76 77 78
/*
 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
 *   bits (identity or foreign) are set.
 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
 *   identity or foreign indicator will be still set. __pfn_to_mfn() is
79 80 81
 *   encapsulating get_phys_to_machine() which is called in special cases only.
 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
 *   cases needing an extended handling.
82 83 84
 */
static inline unsigned long __pfn_to_mfn(unsigned long pfn)
{
85 86 87 88 89 90 91 92 93 94 95 96 97
	unsigned long mfn;

	if (pfn < xen_p2m_size)
		mfn = xen_p2m_addr[pfn];
	else if (unlikely(pfn < xen_max_p2m_pfn))
		return get_phys_to_machine(pfn);
	else
		return IDENTITY_FRAME(pfn);

	if (unlikely(mfn == INVALID_P2M_ENTRY))
		return get_phys_to_machine(pfn);

	return mfn;
98 99
}

100 101
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
102 103
	unsigned long mfn;

104 105 106
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return pfn;

107
	mfn = __pfn_to_mfn(pfn);
108 109

	if (mfn != INVALID_P2M_ENTRY)
110
		mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
111 112

	return mfn;
113 114 115 116 117 118 119
}

static inline int phys_to_machine_mapping_valid(unsigned long pfn)
{
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 1;

120
	return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
121 122
}

123
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
124 125
{
	unsigned long pfn;
126
	int ret;
127 128 129 130

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return mfn;

131 132 133
	if (unlikely(mfn >= machine_to_phys_nr))
		return ~0;

134 135 136 137 138
	/*
	 * The array access can fail (e.g., device space beyond end of RAM).
	 * In such cases it doesn't matter what we return (we return garbage),
	 * but we must handle the fault without crashing!
	 */
139
	ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
140
	if (ret < 0)
141 142 143 144 145 146 147 148 149 150 151 152 153
		return ~0;

	return pfn;
}

static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
	unsigned long pfn;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return mfn;

	pfn = mfn_to_pfn_no_overrides(mfn);
154 155
	if (__pfn_to_mfn(pfn) != mfn)
		pfn = ~0;
156

157
	/*
158 159
	 * pfn is ~0 if there are no entries in the m2p for mfn or the
	 * entry doesn't map back to the mfn.
160
	 */
161
	if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
162
		pfn = mfn;
163

164 165 166 167 168 169
	return pfn;
}

static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
	unsigned offset = phys.paddr & ~PAGE_MASK;
170
	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
171 172 173 174 175
}

static inline xpaddr_t machine_to_phys(xmaddr_t machine)
{
	unsigned offset = machine.maddr & ~PAGE_MASK;
176
	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
}

/*
 * We detect special mappings in one of two ways:
 *  1. If the MFN is an I/O page then Xen will set the m2p entry
 *     to be outside our maximum possible pseudophys range.
 *  2. If the MFN belongs to a different domain then we will certainly
 *     not have MFN in our p2m table. Conversely, if the page is ours,
 *     then we'll have p2m(m2p(MFN))==MFN.
 * If we detect a special mapping then it doesn't have a 'struct page'.
 * We force !pfn_valid() by returning an out-of-range pointer.
 *
 * NB. These checks require that, for any MFN that is not in our reservation,
 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
 *
 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
 *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
 *      require. In all the cases we care about, the FOREIGN_FRAME bit is
 *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
 */
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
{
201 202 203 204 205 206
	unsigned long pfn;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return mfn;

	pfn = mfn_to_pfn(mfn);
207
	if (__pfn_to_mfn(pfn) != mfn)
208
		return -1; /* force !pfn_valid() */
209 210 211 212 213
	return pfn;
}

/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v))))
214 215
#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))
#define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))
216 217 218 219
#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))

static inline unsigned long pte_mfn(pte_t pte)
{
220
	return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
221 222 223 224 225 226 227
}

static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
	pte_t pte;

	pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
228
			massage_pgprot(pgprot);
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	return pte;
}

static inline pteval_t pte_val_ma(pte_t pte)
{
	return pte.pte;
}

static inline pte_t __pte_ma(pteval_t x)
{
	return (pte_t) { .pte = x };
}

#define pmd_val_ma(v) ((v).pmd)
244
#ifdef __PAGETABLE_PUD_FOLDED
245
#define pud_val_ma(v) ((v).pgd.pgd)
246 247 248
#else
#define pud_val_ma(v) ((v).pud)
#endif
249 250 251 252
#define __pmd_ma(x)	((pmd_t) { (x) } )

#define pgd_val_ma(x)	((x).pgd)

253
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
254

255
xmaddr_t arbitrary_virt_to_machine(void *address);
256
unsigned long arbitrary_virt_to_mfn(void *vaddr);
257 258 259
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);

260
#define xen_remap(cookie, size) ioremap((cookie), (size));
261
#define xen_unmap(cookie) iounmap((cookie))
262

263 264 265 266 267 268 269
static inline bool xen_arch_need_swiotlb(struct device *dev,
					 unsigned long pfn,
					 unsigned long mfn)
{
	return false;
}

270 271 272 273 274
static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
{
	return __get_free_pages(__GFP_NOWARN, order);
}

275
#endif /* _ASM_X86_XEN_PAGE_H */