hash.h 5.8 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
#define _ASM_POWERPC_BOOK3S_64_HASH_H
3 4
#ifdef __KERNEL__

5 6
/*
 * Common bits between 4K and 64K pages in a linux-style PTE.
7
 * Additional bits may be defined in pgtable-hash64-*.h
8 9 10 11 12 13 14 15
 *
 * Note: We only support user read/write permissions. Supervisor always
 * have full read/write to pages above PAGE_OFFSET (pages below that
 * always use the user access permissions).
 *
 * We could create separate kernel read-only if we used the 3 PP bits
 * combinations that newer processors provide but we currently don't.
 */
16
#define H_PAGE_BUSY		0x00800 /* software: PTE & hash are busy */
17
#define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
18 19 20 21
#define H_PAGE_F_GIX_SHIFT	57
#define H_PAGE_F_GIX		(7ul << 57)	/* HPTE index within HPTEG */
#define H_PAGE_F_SECOND		(1ul << 60)	/* HPTE is in 2ndary HPTEG */
#define H_PAGE_HASHPTE		(1ul << 61)	/* PTE has associated HPTE */
22

23 24 25 26 27 28 29 30 31
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/hash-64k.h>
#else
#include <asm/book3s/64/hash-4k.h>
#endif

/*
 * Size of EA range mapped by our pagetables.
 */
32 33 34
#define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
35 36

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
37 38 39 40 41
/*
 * only with hash we need to use the second half of pmd page table
 * to store pointer to deposited pgtable_t
 */
#define H_PMD_CACHE_INDEX	(H_PMD_INDEX_SIZE + 1)
42
#else
43
#define H_PMD_CACHE_INDEX	H_PMD_INDEX_SIZE
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#endif
/*
 * Define the address range of the kernel non-linear virtual area
 */
#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
#define KERN_VIRT_SIZE	ASM_CONST(0x0000100000000000)

/*
 * The vmalloc space starts at the beginning of that region, and
 * occupies half of it on hash CPUs and a quarter of it on Book3E
 * (we keep a quarter for the virtual memmap)
 */
#define VMALLOC_START	KERN_VIRT_START
#define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 1)
#define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)

/*
 * Region IDs
 */
#define REGION_SHIFT		60UL
#define REGION_MASK		(0xfUL << REGION_SHIFT)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

#define VMALLOC_REGION_ID	(REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
#define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
#define USER_REGION_ID		(0UL)

/*
 * Defines the address of the vmemap area, in its own region on
 * hash table CPUs.
 */
#define VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)

#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* CONFIG_PPC_MM_SLICES */
82

83 84 85 86 87

/* PTEIDX nibble */
#define _PTEIDX_SECONDARY	0x8
#define _PTEIDX_GROUP_IX	0x7

88 89 90 91
#define PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS		(PMD_TABLE_SIZE-1)

#ifndef __ASSEMBLY__
92
#define	pmd_bad(pmd)		(pmd_val(pmd) & PMD_BAD_BITS)
93

94
#define	pud_bad(pud)		(pud_val(pud) & PUD_BAD_BITS)
95 96 97

extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
			    pte_t *ptep, unsigned long pte, int huge);
98
extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
99 100 101 102 103 104 105
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
				       unsigned long addr,
				       pte_t *ptep, unsigned long clr,
				       unsigned long set,
				       int huge)
{
106 107
	__be64 old_be, tmp_be;
	unsigned long old;
108 109 110

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%3		# pte_update\n\
111
	and.	%1,%0,%6\n\
112 113 114 115 116
	bne-	1b \n\
	andc	%1,%0,%4 \n\
	or	%1,%1,%7\n\
	stdcx.	%1,0,%3 \n\
	bne-	1b"
117 118
	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
119
	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
120 121 122 123 124
	: "cc" );
	/* huge pages use the old page table lock */
	if (!huge)
		assert_pte_locked(mm, addr);

125
	old = be64_to_cpu(old_be);
126
	if (old & H_PAGE_HASHPTE)
127 128 129 130 131 132 133 134 135 136
		hpte_need_flush(mm, addr, ptep, old, huge);

	return old;
}

/* Set the dirty and/or accessed bits atomically in a linux PTE, this
 * function doesn't need to flush the hash entry
 */
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
137 138
	__be64 old, tmp, val, mask;

139
	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
140
			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
141

142
	val = pte_raw(entry) & mask;
143 144 145

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%4\n\
146
		and.	%1,%0,%6\n\
147 148 149 150 151
		bne-	1b \n\
		or	%0,%3,%0\n\
		stdcx.	%0,0,%4\n\
		bne-	1b"
	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
152
	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
153 154 155
	:"cc");
}

156 157 158 159 160
static inline int pgd_bad(pgd_t pgd)
{
	return (pgd_val(pgd) == 0);
}

161
#define __HAVE_ARCH_PTE_SAME
162 163 164 165 166
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
}

167
/* Generic accessors to PTE bits */
168
static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; }
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

/* This low level function performs the actual PTE insertion
 * Setting the PTE depends on the MMU type and other factors. It's
 * an horrible mess that I'm not going to try to clean up now but
 * I'm keeping it in one place rather than spread around
 */
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
				pte_t *ptep, pte_t pte, int percpu)
{
	/*
	 * Anything else just stores the PTE normally. That covers all 64-bit
	 * cases, and 32-bit non-hash with 32-bit PTEs.
	 */
	*ptep = pte;
}

185 186 187 188 189 190 191 192 193 194 195 196 197
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
				   pmd_t *pmdp, unsigned long old_pmd);
#else
static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
					  unsigned long addr, pmd_t *pmdp,
					  unsigned long old_pmd)
{
	WARN(1, "%s called with THP disabled\n", __func__);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* !__ASSEMBLY__ */
198
#endif /* __KERNEL__ */
199
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */