diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index c175f9f25210a61896543e5eb371c90d534f8f3d..59e1c5585748a231b7063165d0a5663e986077b3 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -256,8 +256,20 @@ static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) __xip_unmap(mapping, vmf->pgoff); found: + /* + * We must recheck i_size under i_mmap_rwsem to prevent races + * with truncation + */ + i_mmap_lock_read(mapping); + size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + if (unlikely(vmf->pgoff >= size)) { + i_mmap_unlock_read(mapping); + return VM_FAULT_SIGBUS; + } err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, xip_pfn); + i_mmap_unlock_read(mapping); if (err == -ENOMEM) return VM_FAULT_OOM; /* @@ -281,16 +293,30 @@ static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (error != -ENODATA) goto out; + + /* + * We must recheck i_size under i_mmap_rwsem to prevent races + * with truncation + */ + i_mmap_lock_read(mapping); + size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + if (unlikely(vmf->pgoff >= size)) { + ret = VM_FAULT_SIGBUS; + goto unlock; + } /* not shared and writable, use xip_sparse_page() */ page = xip_sparse_page(); if (!page) - goto out; + goto unlock; err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); if (err == -ENOMEM) - goto out; + goto unlock; ret = VM_FAULT_NOPAGE; +unlock: + i_mmap_unlock_read(mapping); out: write_seqcount_end(&xip_sparse_seq); mutex_unlock(&xip_sparse_mutex); diff --git a/mm/memory.c b/mm/memory.c index 99275325f303681230f88372d4e4ef99aa576dcc..1b04e13b99930c4a0b9f06499c69808a47623dc7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2329,6 +2329,7 @@ void unmap_mapping_range(struct address_space *mapping, details.last_index = ULONG_MAX; + /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details);