提交 68402ddc 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] mm: remove VM_LOCKED before remap_pfn_range and drop VM_SHM

Remove VM_LOCKED before remap_pfn range from device drivers and get rid of
VM_SHM.

remap_pfn_range() already sets VM_IO.  There is no need to set VM_SHM since
it does nothing.  VM_LOCKED is of no use since the remap_pfn_range does not
place pages on the LRU.  The pages are therefore never subject to swap
anyways.  Remove all the vm_flags settings before calling remap_pfn_range.

After removing all the vm_flag settings no use of VM_SHM is left.  Drop it.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Acked-by: NHugh Dickins <hugh@veritas.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 9f1a3cfc
...@@ -702,7 +702,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -702,7 +702,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* /*
* Mark this as IO * Mark this as IO
*/ */
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys, if (remap_pfn_range(vma, vma->vm_start, phys,
......
...@@ -27,8 +27,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -27,8 +27,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* Leave vm_pgoff as-is, the PCI space address is the physical /* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform. * address on this platform.
*/ */
vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
prot = pgprot_val(vma->vm_page_prot); prot = pgprot_val(vma->vm_page_prot);
vma->vm_page_prot = __pgprot(prot); vma->vm_page_prot = __pgprot(prot);
......
...@@ -285,8 +285,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -285,8 +285,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* Leave vm_pgoff as-is, the PCI space address is the physical /* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform. * address on this platform.
*/ */
vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
prot = pgprot_val(vma->vm_page_prot); prot = pgprot_val(vma->vm_page_prot);
if (boot_cpu_data.x86 > 3) if (boot_cpu_data.x86 > 3)
prot |= _PAGE_PCD | _PAGE_PWT; prot |= _PAGE_PCD | _PAGE_PWT;
......
...@@ -602,8 +602,6 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -602,8 +602,6 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
* Leave vm_pgoff as-is, the PCI space address is the physical * Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform. * address on this platform.
*/ */
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (write_combine && efi_range_is_wc(vma->vm_start, if (write_combine && efi_range_is_wc(vma->vm_start,
vma->vm_end - vma->vm_start)) vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
...@@ -666,7 +664,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) ...@@ -666,7 +664,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = prot; vma->vm_page_prot = prot;
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot)) size, vma->vm_page_prot))
......
...@@ -1654,7 +1654,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -1654,7 +1654,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot, vma->vm_page_prot,
mmap_state, write_combine); mmap_state, write_combine);
......
...@@ -877,7 +877,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -877,7 +877,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot, vma->vm_page_prot,
mmap_state, write_combine); mmap_state, write_combine);
......
...@@ -115,8 +115,6 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) ...@@ -115,8 +115,6 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{ {
struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
vma->vm_flags |= VM_SHM | VM_LOCKED;
if ((vma->vm_end - vma->vm_start) > dp->size) if ((vma->vm_end - vma->vm_start) > dp->size)
return -EINVAL; return -EINVAL;
......
...@@ -1032,7 +1032,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -1032,7 +1032,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_pgoff = offset >> PAGE_SHIFT;
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
vma->vm_page_prot, vma->vm_page_prot,
mmap_state, write_combine); mmap_state, write_combine);
......
...@@ -349,17 +349,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -349,17 +349,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
} }
/*
* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
* mapping.
*/
static __inline__ void
__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
}
/* /*
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping. * device mapping.
...@@ -399,7 +388,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -399,7 +388,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0) if (ret < 0)
return ret; return ret;
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
......
...@@ -329,7 +329,6 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -329,7 +329,6 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
if (PAGE_SIZE > (1 << 16)) if (PAGE_SIZE > (1 << 16))
return -ENOSYS; return -ENOSYS;
vma->vm_flags |= (VM_IO | VM_SHM | VM_LOCKED );
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
mmtimer_addr = __pa(RTC_COUNTER_ADDR); mmtimer_addr = __pa(RTC_COUNTER_ADDR);
......
...@@ -71,7 +71,6 @@ flash_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -71,7 +71,6 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
vma->vm_flags |= (VM_SHM | VM_LOCKED);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot)) if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
......
...@@ -623,7 +623,7 @@ static int vfc_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -623,7 +623,7 @@ static int vfc_mmap(struct file *file, struct vm_area_struct *vma)
map_size = sizeof(struct vfc_regs); map_size = sizeof(struct vfc_regs);
vma->vm_flags |= vma->vm_flags |=
(VM_SHM | VM_LOCKED | VM_IO | VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE); (VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE);
map_offset = (unsigned int) (long)dev->phys_regs; map_offset = (unsigned int) (long)dev->phys_regs;
ret = io_remap_pfn_range(vma, vma->vm_start, ret = io_remap_pfn_range(vma, vma->vm_start,
MK_IOSPACE_PFN(dev->which_io, MK_IOSPACE_PFN(dev->which_io,
......
...@@ -232,9 +232,6 @@ static int igafb_mmap(struct fb_info *info, ...@@ -232,9 +232,6 @@ static int igafb_mmap(struct fb_info *info,
size = vma->vm_end - vma->vm_start; size = vma->vm_end - vma->vm_start;
/* To stop the swapper from even considering these pages. */
vma->vm_flags |= (VM_SHM | VM_LOCKED);
/* Each page, see which map applies */ /* Each page, see which map applies */
for (page = 0; page < size; ) { for (page = 0; page < size; ) {
map_size = 0; map_size = 0;
......
...@@ -145,7 +145,6 @@ extern unsigned int kobjsize(const void *objp); ...@@ -145,7 +145,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
#define VM_GROWSUP 0x00000200 #define VM_GROWSUP 0x00000200
#define VM_SHM 0x00000000 /* Means nothing: delete it later */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册