page_vma_mapped.c 7.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#include <linux/mm.h>
#include <linux/rmap.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#include "internal.h"

static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
	page_vma_mapped_walk_done(pvmw);
	return false;
}

static bool map_pte(struct page_vma_mapped_walk *pvmw)
{
	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
	if (!(pvmw->flags & PVMW_SYNC)) {
		if (pvmw->flags & PVMW_MIGRATION) {
			if (!is_swap_pte(*pvmw->pte))
				return false;
		} else {
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
			/*
			 * We get here when we are trying to unmap a private
			 * device page from the process address space. Such
			 * page is not CPU accessible and thus is mapped as
			 * a special swap entry, nonetheless it still does
			 * count as a valid regular mapping for the page (and
			 * is accounted as such in page maps count).
			 *
			 * So handle this special case as if it was a normal
			 * page mapping ie lock CPU page table and returns
			 * true.
			 *
			 * For more details on device private memory see HMM
			 * (include/linux/hmm.h or mm/hmm.c).
			 */
			if (is_swap_pte(*pvmw->pte)) {
				swp_entry_t entry;

				/* Handle un-addressable ZONE_DEVICE memory */
				entry = pte_to_swp_entry(*pvmw->pte);
				if (!is_device_private_entry(entry))
					return false;
			} else if (!pte_present(*pvmw->pte))
47 48 49 50 51 52 53 54
				return false;
		}
	}
	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
	spin_lock(pvmw->ptl);
	return true;
}

55
static inline bool pfn_is_match(struct page *page, unsigned long pfn)
56
{
57 58 59 60 61
	unsigned long page_pfn = page_to_pfn(page);

	/* normal page and hugetlbfs page */
	if (!PageTransCompound(page) || PageHuge(page))
		return page_pfn == pfn;
62 63

	/* THP can be referenced by any subpage */
64
	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
65 66
}

67 68
/**
 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69
 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
70 71 72 73
 *
 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
 * mapped. check_pte() has to validate this.
 *
74 75
 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 * arbitrary page.
76 77 78 79
 *
 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 * entry that points to @pvmw->page or any subpage in case of THP.
 *
80 81
 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 * pvmw->page or any subpage in case of THP.
82 83 84 85
 *
 * Otherwise, return false.
 *
 */
86 87
static bool check_pte(struct page_vma_mapped_walk *pvmw)
{
88 89
	unsigned long pfn;

90 91 92 93 94
	if (pvmw->flags & PVMW_MIGRATION) {
		swp_entry_t entry;
		if (!is_swap_pte(*pvmw->pte))
			return false;
		entry = pte_to_swp_entry(*pvmw->pte);
95

96 97
		if (!is_migration_entry(entry))
			return false;
98

99 100 101
		pfn = migration_entry_to_pfn(entry);
	} else if (is_swap_pte(*pvmw->pte)) {
		swp_entry_t entry;
102

103 104 105
		/* Handle un-addressable ZONE_DEVICE memory */
		entry = pte_to_swp_entry(*pvmw->pte);
		if (!is_device_private_entry(entry))
106 107
			return false;

108 109 110
		pfn = device_private_entry_to_pfn(entry);
	} else {
		if (!pte_present(*pvmw->pte))
111
			return false;
112 113

		pfn = pte_pfn(*pvmw->pte);
114 115
	}

116
	return pfn_is_match(pvmw->page, pfn);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
}

/**
 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
 * @pvmw->address
 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
 * must be set. pmd, pte and ptl must be NULL.
 *
 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
 * adjusted if needed (for PTE-mapped THPs).
 *
 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
 * a loop to find all PTEs that map the THP.
 *
 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
 * regardless of which page table level the page is mapped at. @pvmw->pmd is
 * NULL.
 *
L
Lu Jialin 已提交
137
 * Returns false if there are no more page table entries for the page in
138 139 140 141 142 143 144 145 146 147
 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
 *
 * If you need to stop the walk before page_vma_mapped_walk() returned false,
 * use page_vma_mapped_walk_done(). It will do the housekeeping.
 */
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
	struct mm_struct *mm = pvmw->vma->vm_mm;
	struct page *page = pvmw->page;
	pgd_t *pgd;
148
	p4d_t *p4d;
149
	pud_t *pud;
150
	pmd_t pmde;
151 152 153 154 155

	/* The only possible pmd mapping has been handled on last iteration */
	if (pvmw->pmd && !pvmw->pte)
		return not_found(pvmw);

156
	if (unlikely(PageHuge(page))) {
157 158 159 160
		/* The only possible mapping was handled on last iteration */
		if (pvmw->pte)
			return not_found(pvmw);

161
		/* when pud is not present, pte will be NULL */
162
		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
163 164 165 166 167 168 169 170 171
		if (!pvmw->pte)
			return false;

		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
		spin_lock(pvmw->ptl);
		if (!check_pte(pvmw))
			return not_found(pvmw);
		return true;
	}
172 173 174

	if (pvmw->pte)
		goto next_pte;
175 176 177 178
restart:
	pgd = pgd_offset(mm, pvmw->address);
	if (!pgd_present(*pgd))
		return false;
179 180 181 182
	p4d = p4d_offset(pgd, pvmw->address);
	if (!p4d_present(*p4d))
		return false;
	pud = pud_offset(p4d, pvmw->address);
183 184 185
	if (!pud_present(*pud))
		return false;
	pvmw->pmd = pmd_offset(pud, pvmw->address);
186 187 188 189 190 191 192
	/*
	 * Make sure the pmd value isn't cached in a register by the
	 * compiler and used as a stale value after we've observed a
	 * subsequent update.
	 */
	pmde = READ_ONCE(*pvmw->pmd);
	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
193
		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
194 195
		pmde = *pvmw->pmd;
		if (likely(pmd_trans_huge(pmde))) {
196 197
			if (pvmw->flags & PVMW_MIGRATION)
				return not_found(pvmw);
198
			if (pmd_page(pmde) != page)
199 200 201
				return not_found(pvmw);
			return true;
		}
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		if (!pmd_present(pmde)) {
			swp_entry_t entry;

			if (!thp_migration_supported() ||
			    !(pvmw->flags & PVMW_MIGRATION))
				return not_found(pvmw);
			entry = pmd_to_swp_entry(pmde);
			if (!is_migration_entry(entry) ||
			    migration_entry_to_page(entry) != page)
				return not_found(pvmw);
			return true;
		}
		/* THP pmd was split under us: handle on pte level */
		spin_unlock(pvmw->ptl);
		pvmw->ptl = NULL;
217
	} else if (!pmd_present(pmde)) {
218 219 220 221 222
		/*
		 * If PVMW_SYNC, take and drop THP pmd lock so that we
		 * cannot return prematurely, while zap_huge_pmd() has
		 * cleared *pmd but not decremented compound_mapcount().
		 */
223
		if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
224 225 226 227
			spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);

			spin_unlock(ptl);
		}
228
		return false;
229 230 231 232
	}
	if (!map_pte(pvmw))
		goto next_pte;
	while (1) {
233 234
		unsigned long end;

235 236
		if (check_pte(pvmw))
			return true;
237 238
next_pte:
		/* Seek to next pte only makes sense for THP */
239
		if (!PageTransHuge(page))
240
			return not_found(pvmw);
241
		end = vma_address_end(page, pvmw->vma);
242
		do {
243
			pvmw->address += PAGE_SIZE;
244
			if (pvmw->address >= end)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
				return not_found(pvmw);
			/* Did we cross page table boundary? */
			if (pvmw->address % PMD_SIZE == 0) {
				pte_unmap(pvmw->pte);
				if (pvmw->ptl) {
					spin_unlock(pvmw->ptl);
					pvmw->ptl = NULL;
				}
				goto restart;
			} else {
				pvmw->pte++;
			}
		} while (pte_none(*pvmw->pte));

		if (!pvmw->ptl) {
			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
			spin_lock(pvmw->ptl);
		}
	}
}
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

/**
 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 * @page: the page to test
 * @vma: the VMA to test
 *
 * Returns 1 if the page is mapped into the page tables of the VMA, 0
 * if the page is not mapped into the page tables of this VMA.  Only
 * valid for normal file or anonymous VMAs.
 */
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
	struct page_vma_mapped_walk pvmw = {
		.page = page,
		.vma = vma,
		.flags = PVMW_SYNC,
	};

283 284
	pvmw.address = vma_address(page, vma);
	if (pvmw.address == -EFAULT)
285 286 287 288 289 290
		return 0;
	if (!page_vma_mapped_walk(&pvmw))
		return 0;
	page_vma_mapped_walk_done(&pvmw);
	return 1;
}