diff --git a/include/linux/hmm.h b/include/linux/hmm.h index f4a09ed223acdc4b4459718bff8ff2514ec9d4d7..866a0fa104c42a167ee542599cea49ac93671cf9 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -37,16 +37,17 @@ * will fail. Must be combined with HMM_PFN_REQ_FAULT. */ enum hmm_pfn_flags { - /* Output flags */ + /* Output fields and flags */ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), + HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8), /* Input flags */ HMM_PFN_REQ_FAULT = HMM_PFN_VALID, HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, - HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR, + HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT, }; /* @@ -61,6 +62,25 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS); } +/* + * hmm_pfn_to_map_order() - return the CPU mapping size order + * + * This is optionally useful to optimize processing of the pfn result + * array. It indicates that the page starts at the order aligned VA and is + * 1<> HMM_PFN_ORDER_SHIFT) & 0x1F; +} + /* * struct hmm_range - track invalidation lock on virtual address range * diff --git a/mm/hmm.c b/mm/hmm.c index e9a545751108cd165dd61a4c67589967bf9e8602..0809baee49d0d88de7d0988ab26a273cab966ce0 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -165,12 +165,19 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, return hmm_pfns_fill(addr, end, range, 0); } +static inline unsigned long hmm_pfn_flags_order(unsigned long order) +{ + return order << HMM_PFN_ORDER_SHIFT; +} + static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) { if (pmd_protnone(pmd)) return 0; - return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -389,7 +396,9 @@ static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, { if (!pud_present(pud)) return 0; - return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -474,7 +483,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; - cpu_flags = pte_to_hmm_pfn_flags(range, entry); + cpu_flags = pte_to_hmm_pfn_flags(range, entry) | + hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); if (required_fault) {