提交 f83a275d 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm: account for MAP_SHARED mappings using VM_MAYSHARE and not VM_SHARED in hugetlbfs

Addresses http://bugzilla.kernel.org/show_bug.cgi?id=13302

hugetlbfs reserves huge pages but does not fault them at mmap() time to
ensure that future faults succeed.  The reservation behaviour differs
depending on whether the mapping was mapped MAP_SHARED or MAP_PRIVATE.
For MAP_SHARED mappings, hugepages are reserved when mmap() is first
called and are tracked based on information associated with the inode.
Other processes mapping MAP_SHARED use the same reservation.  MAP_PRIVATE
track the reservations based on the VMA created as part of the mmap()
operation.  Each process mapping MAP_PRIVATE must make its own
reservation.

hugetlbfs currently checks if a VMA is MAP_SHARED with the VM_SHARED flag
and not VM_MAYSHARE.  For file-backed mappings, such as hugetlbfs,
VM_SHARED is set only if the mapping is MAP_SHARED and the file was opened
read-write.  If a shared memory mapping was mapped shared-read-write for
populating of data and mapped shared-read-only by other processes, then
hugetlbfs would account for the mapping as if it was MAP_PRIVATE.  This
causes processes to fail to map the file MAP_SHARED even though it should
succeed as the reservation is there.

This patch alters mm/hugetlb.c and replaces VM_SHARED with VM_MAYSHARE
when the intent of the code was to check whether the VMA was mapped
MAP_SHARED or MAP_PRIVATE.
Signed-off-by: NMel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: <starlight@binnacle.cx>
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 32b154c0
...@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref) ...@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
static struct resv_map *vma_resv_map(struct vm_area_struct *vma) static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{ {
VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_MAYSHARE))
return (struct resv_map *)(get_vma_private_data(vma) & return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK); ~HPAGE_RESV_MASK);
return NULL; return NULL;
...@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) ...@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{ {
VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_SHARED); VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, (get_vma_private_data(vma) & set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map); HPAGE_RESV_MASK) | (unsigned long)map);
...@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) ...@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{ {
VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_SHARED); VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, get_vma_private_data(vma) | flags); set_vma_private_data(vma, get_vma_private_data(vma) | flags);
} }
...@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h, ...@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
if (vma->vm_flags & VM_NORESERVE) if (vma->vm_flags & VM_NORESERVE)
return; return;
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_MAYSHARE) {
/* Shared mappings always use reserves */ /* Shared mappings always use reserves */
h->resv_huge_pages--; h->resv_huge_pages--;
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
...@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h, ...@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
void reset_vma_resv_huge_pages(struct vm_area_struct *vma) void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{ {
VM_BUG_ON(!is_vm_hugetlb_page(vma)); VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_MAYSHARE))
vma->vm_private_data = (void *)0; vma->vm_private_data = (void *)0;
} }
/* Returns true if the VMA has associated reserve pages */ /* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma) static int vma_has_reserves(struct vm_area_struct *vma)
{ {
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_MAYSHARE)
return 1; return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1; return 1;
...@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h, ...@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h,
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
return region_chg(&inode->i_mapping->private_list, return region_chg(&inode->i_mapping->private_list,
idx, idx + 1); idx, idx + 1);
...@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h, ...@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr); pgoff_t idx = vma_hugecache_offset(h, vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1); region_add(&inode->i_mapping->private_list, idx, idx + 1);
...@@ -1893,7 +1893,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1893,7 +1893,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* at the time of fork() could consume its reserves on COW instead * at the time of fork() could consume its reserves on COW instead
* of the full address range. * of the full address range.
*/ */
if (!(vma->vm_flags & VM_SHARED) && if (!(vma->vm_flags & VM_MAYSHARE) &&
is_vma_resv_set(vma, HPAGE_RESV_OWNER) && is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page) old_page != pagecache_page)
outside_reserve = 1; outside_reserve = 1;
...@@ -2000,7 +2000,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2000,7 +2000,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
clear_huge_page(page, address, huge_page_size(h)); clear_huge_page(page, address, huge_page_size(h));
__SetPageUptodate(page); __SetPageUptodate(page);
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_MAYSHARE) {
int err; int err;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
...@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_mutex; goto out_mutex;
} }
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h, pagecache_page = hugetlbfs_pagecache_page(h,
vma, address); vma, address);
} }
...@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* to reserve the full area even if read-only as mprotect() may be * to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping * called to make the mapping read-write. Assume !vma is a shm mapping
*/ */
if (!vma || vma->vm_flags & VM_SHARED) if (!vma || vma->vm_flags & VM_MAYSHARE)
chg = region_chg(&inode->i_mapping->private_list, from, to); chg = region_chg(&inode->i_mapping->private_list, from, to);
else { else {
struct resv_map *resv_map = resv_map_alloc(); struct resv_map *resv_map = resv_map_alloc();
...@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* consumed reservations are stored in the map. Hence, nothing * consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here * else has to be done for private mappings here
*/ */
if (!vma || vma->vm_flags & VM_SHARED) if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to); region_add(&inode->i_mapping->private_list, from, to);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册