提交 f6719cc4 编写于 作者: M Mike Kravetz 提交者: Yang Yingliang

hugetlbfs: hugetlb_fault_mutex_hash() cleanup

stable inclusion
from linux-4.19.193
commit a92212ef6326c8dc09003c7af4e1ba7da0b77e44

--------------------------------

commit 55254636 upstream.

A new clang diagnostic (-Wsizeof-array-div) warns about the calculation
to determine the number of u32's in an array of unsigned longs.
Suppress warning by adding parentheses.

While looking at the above issue, noticed that the 'address' parameter
to hugetlb_fault_mutex_hash is no longer used.  So, remove it from the
definition and all callers.

No functional change.

Link: http://lkml.kernel.org/r/20190919011847.18400-1-mike.kravetz@oracle.comSigned-off-by: NMike Kravetz <mike.kravetz@oracle.com>
Reported-by: NNathan Chancellor <natechancellor@gmail.com>
Reviewed-by: NNathan Chancellor <natechancellor@gmail.com>
Reviewed-by: NDavidlohr Bueso <dbueso@suse.de>
Reviewed-by: NAndrew Morton <akpm@linux-foundation.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Ilie Halip <ilie.halip@gmail.com>
Cc: David Bolvansky <david.bolvansky@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 d1fd69d9
...@@ -491,7 +491,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, ...@@ -491,7 +491,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash; u32 hash;
index = page->index; index = page->index;
hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); hash = hugetlb_fault_mutex_hash(h, mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* /*
...@@ -756,7 +756,7 @@ static int hugetlbfs_fallocate_chunk(pgoff_t start, pgoff_t end, ...@@ -756,7 +756,7 @@ static int hugetlbfs_fallocate_chunk(pgoff_t start, pgoff_t end,
addr = index * hpage_size; addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); hash = hugetlb_fault_mutex_hash(h, mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */ /* See if already present in mapping to avoid alloc/free */
......
...@@ -124,7 +124,7 @@ void free_huge_page(struct page *page); ...@@ -124,7 +124,7 @@ void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode); void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table; extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address); pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
......
...@@ -4003,7 +4003,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ...@@ -4003,7 +4003,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
* handling userfault. Reacquire after handling * handling userfault. Reacquire after handling
* fault to make calling code simpler. * fault to make calling code simpler.
*/ */
hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING); ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
...@@ -4131,7 +4131,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ...@@ -4131,7 +4131,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address) pgoff_t idx)
{ {
unsigned long key[2]; unsigned long key[2];
u32 hash; u32 hash;
...@@ -4139,7 +4139,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, ...@@ -4139,7 +4139,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
key[0] = (unsigned long) mapping; key[0] = (unsigned long) mapping;
key[1] = idx; key[1] = idx;
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
return hash & (num_fault_mutexes - 1); return hash & (num_fault_mutexes - 1);
} }
...@@ -4149,7 +4149,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, ...@@ -4149,7 +4149,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
* return 0 and avoid the hashing overhead. * return 0 and avoid the hashing overhead.
*/ */
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address) pgoff_t idx)
{ {
return 0; return 0;
} }
...@@ -4193,7 +4193,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4193,7 +4193,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate * get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache. * the same page in the page cache.
*/ */
hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep); entry = huge_ptep_get(ptep);
......
...@@ -297,7 +297,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -297,7 +297,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
*/ */
idx = linear_page_index(dst_vma, dst_addr); idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping; mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM; err = -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册