提交 889e4dd9 编写于 作者: B Bjorn Helgaas

Merge branch 'pci/resource-mmap' into next

* pci/resource-mmap:
  ia64: Use generic pci_mmap_resource_range()
  ia64: Remove redundant checks for WC in pci_mmap_page_range()
  ia64: Remove redundant valid_mmap_phys_addr_range() from pci_mmap_page_range()
  PCI: Add I/O BAR support to generic pci_mmap_resource_range()
  x86/PCI: Use generic pci_mmap_resource_range()
  unicore32/PCI: Use generic pci_mmap_resource_range()
  sh/PCI: Use generic pci_mmap_resource_range()
  parisc: Use generic pci_mmap_resource_range()
  mn10300/PCI: Use generic pci_mmap_resource_range()
  MIPS: PCI: Use generic pci_mmap_resource_range()
  cris/PCI: Use generic pci_mmap_resource_range()
  ARM/PCI: Use generic pci_mmap_resource_range()
  PCI: Add pci_mmap_resource_range() and use it for ARM64
  PCI: Add BAR index argument to pci_mmap_page_range()
  PCI: Use BAR index in sysfs attr->private instead of resource pointer
  PCI: Add arch_can_pci_mmap_io() on architectures which can mmap() I/O space
  PCI: Move multiple declarations of pci_mmap_page_range() to <linux/pci.h>
  PCI: Add arch_can_pci_mmap_wc() macro
  xtensa/PCI: Do not mmap PCI BARs to userspace as write-through
  PCI: Only allow WC mmap on prefetchable resources
  PCI: Fix another sanity check bug in /proc/pci mmap
  PCI: Fix pci_mmap_fits() for HAVE_PCI_RESOURCE_TO_USER platforms
...@@ -113,9 +113,18 @@ Supporting PCI access on new platforms ...@@ -113,9 +113,18 @@ Supporting PCI access on new platforms
-------------------------------------- --------------------------------------
In order to support PCI resource mapping as described above, Linux platform In order to support PCI resource mapping as described above, Linux platform
code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function. code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic
Platforms are free to only support subsets of the mmap functionality, but implementation of that functionality. To support the historical interface of
useful return codes should be provided. mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
implementation of pci_mmap_page_range() instead of defining
ARCH_GENERIC_PCI_MMAP_RESOURCE.
Platforms which support write-combining maps of PCI resources must define
arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when
write-combining is permitted. Platforms which support maps of I/O resources
define arch_can_pci_mmap_io() similarly.
Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms
wishing to support legacy functionality should define it and provide wishing to support legacy functionality should define it and provide
......
...@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define PCI_DMA_BUS_IS_PHYS (1) #define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state, int write_combine);
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{ {
......
...@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, ...@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start; return start;
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
void __init pci_map_io_early(unsigned long pfn) void __init pci_map_io_early(unsigned long pfn)
{ {
struct map_desc pci_io_desc = { struct map_desc pci_io_desc = {
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
*/ */
#define PCI_DMA_BUS_IS_PHYS (0) #define PCI_DMA_BUS_IS_PHYS (0)
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy; extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
......
...@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev) ...@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
prot = pgprot_val(vma->vm_page_prot);
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored, it is changed via the mtrr
* interfaces on this platform.
*/
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
resource_size_t resource_size_t
pcibios_align_resource(void *data, const struct resource *res, pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align) resource_size_t size, resource_size_t align)
......
...@@ -42,9 +42,7 @@ struct pci_dev; ...@@ -42,9 +42,7 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1) #define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state, int write_combine);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask; ...@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state, int write_combine); #define arch_can_pci_mmap_wc() 1
#define HAVE_PCI_LEGACY #define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus, extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma, struct vm_area_struct *vma,
......
...@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res, ...@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res,
return res->start; return res->start;
} }
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long size = vma->vm_end - vma->vm_start;
pgprot_t prot;
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
/*
* XXX we could relax this for I/O spaces for which ACPI
* indicates that the space is 1-to-1 mapped. But at the
* moment, we don't support multiple PCI address spaces and
* the legacy I/O space is not 1-to-1 mapped, so this is moot.
*/
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
/*
* If the user requested WC, the kernel uses UC or WC for this region,
* and the chipset supports WC, we can use WC. Otherwise, we have to
* use the same attribute the kernel uses.
*/
if (write_combine &&
((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
(pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/** /**
* ia64_pci_get_legacy_mem - generic legacy mem routine * ia64_pci_get_legacy_mem - generic legacy mem routine
* @bus: bus to get legacy memory base address for * @bus: bus to get legacy memory base address for
......
...@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus); ...@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus); extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct; struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1 #define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count); size_t count);
......
...@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, ...@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
* *
* Returns a negative error code on failure, zero on success. * Returns a negative error code on failure, zero on success.
*/ */
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine) enum pci_mmap_state mmap_state, int write_combine)
{ {
resource_size_t offset = resource_size_t offset =
......
...@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM; ...@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM;
extern void pcibios_set_master(struct pci_dev *dev); extern void pcibios_set_master(struct pci_dev *dev);
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define HAVE_ARCH_PCI_RESOURCE_TO_USER #define HAVE_ARCH_PCI_RESOURCE_TO_USER
/* /*
......
...@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, ...@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*start = fixup_bigphys_addr(rsrc->start, size); *start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size; *end = rsrc->start + size;
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
...@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev) ...@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev)
} }
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state,
int write_combine);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void) ...@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(0); pcibios_allocate_resources(0);
pcibios_allocate_resources(1); pcibios_allocate_resources(1);
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
vma->vm_flags |= VM_LOCKED;
prot = pgprot_val(vma->vm_page_prot);
prot &= ~_PAGE_CACHE;
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored */
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
...@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) ...@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
} }
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#endif /* __ASM_PARISC_PCI_H */ #endif /* __ASM_PARISC_PCI_H */
...@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, ...@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start; return start;
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
if (write_combine)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot |= _PAGE_NO_CACHE;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
/* /*
* A driver is enabling the device. We make sure that all the appropriate * A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting. * bits are set to allow the device to operate as the driver is expecting.
......
...@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus); ...@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus); extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct; struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
#define HAVE_PCI_MMAP 1 #define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
#define arch_can_pci_mmap_wc() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count); size_t count);
......
...@@ -521,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, ...@@ -521,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
* *
* Returns a negative error code on failure, zero on success. * Returns a negative error code on failure, zero on success.
*/ */
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine) enum pci_mmap_state mmap_state, int write_combine)
{ {
resource_size_t offset = resource_size_t offset =
......
...@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn) ...@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
} }
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
#ifndef CONFIG_GENERIC_IOMAP #ifndef CONFIG_GENERIC_IOMAP
void __iomem *__pci_ioport_map(struct pci_dev *dev, void __iomem *__pci_ioport_map(struct pci_dev *dev,
......
...@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; ...@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev; struct pci_dev;
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state, int write_combine);
extern void pcibios_set_master(struct pci_dev *dev); extern void pcibios_set_master(struct pci_dev *dev);
/* Dynamic DMA mapping stuff. /* Dynamic DMA mapping stuff.
......
...@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Platform support for /proc/bus/pci/X/Y mmap()s. */ /* Platform support for /proc/bus/pci/X/Y mmap()s. */
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
#define get_pci_unmapped_area get_fb_unmapped_area #define get_pci_unmapped_area get_fb_unmapped_area
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine);
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{ {
return PCI_IRQ_NONE; return PCI_IRQ_NONE;
......
...@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm ...@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm
* *
* Returns a negative error code on failure, zero on success. * Returns a negative error code on failure, zero on success.
*/ */
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *dev, int bar,
enum pci_mmap_state mmap_state, struct vm_area_struct *vma,
int write_combine) enum pci_mmap_state mmap_state, int write_combine)
{ {
int ret; int ret;
......
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */ #include <mach/hardware.h> /* for PCIBIOS_MIN_* */
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
enum pci_mmap_state mmap_state, int write_combine);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) ...@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
} }
return 0; return 0;
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long phys;
if (mmap_state == pci_mmap_io)
return -EINVAL;
phys = vma->vm_pgoff;
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pat.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); ...@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define arch_can_pci_mmap_wc() pat_enabled()
enum pci_mmap_state mmap_state, #define ARCH_GENERIC_PCI_MMAP_RESOURCE
int write_combine);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern void early_quirks(void); extern void early_quirks(void);
......
...@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void) ...@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void)
*/ */
ioapic_insert_resources(); ioapic_insert_resources();
} }
static const struct vm_operations_struct pci_mmap_ops = {
.access = generic_access_phys,
};
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
prot = pgprot_val(vma->vm_page_prot);
/*
* Return error if pat is not enabled and write_combine is requested.
* Caller can followup with UC MINUS request and add a WC mtrr if there
* is a free mtrr slot.
*/
if (!pat_enabled() && write_combine)
return -EINVAL;
if (pat_enabled() && write_combine)
prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
else if (pat_enabled() || boot_cpu_data.x86 > 3)
/*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here
* as well.
*/
prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
vma->vm_page_prot = __pgprot(prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
vma->vm_ops = &pci_mmap_ops;
return 0;
}
...@@ -46,12 +46,9 @@ struct pci_dev; ...@@ -46,12 +46,9 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1) #define PCI_DMA_BUS_IS_PHYS (1)
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1 #define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -333,25 +333,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -333,25 +333,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
} }
/*
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
static __inline__ void
__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
int prot = pgprot_val(vma->vm_page_prot);
/* Set to write-through */
prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
#if 0
if (!write_combine)
prot |= _PAGE_WRITETHRU;
#endif
vma->vm_page_prot = __pgprot(prot);
}
/* /*
* Perform the actual remap of the pages for a PCI device mapping, as * Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map * appropriate for this architecture. The region in the process to map
...@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
* *
* Returns a negative error code on failure, zero on success. * Returns a negative error code on failure, zero on success.
*/ */
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, enum pci_mmap_state mmap_state,
int write_combine) int write_combine)
{ {
...@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0) if (ret < 0)
return ret; return ret;
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,vma->vm_page_prot); vma->vm_end - vma->vm_start,vma->vm_page_prot);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o vpd.o setup-bus.o vc.o irq.o vpd.o setup-bus.o vc.o mmap.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o obj-$(CONFIG_SYSFS) += slot.o
......
/*
* mmap.c — generic PCI resource mmap helper
*
* Copyright © 2017 Amazon.com, Inc. or its affiliates.
*
* Author: David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
/*
* Modern setup: generic pci_mmap_resource_range(), and implement the legacy
* pci_mmap_page_range() (if needed) as a wrapper round it.
*/
#ifdef HAVE_PCI_MMAP
int pci_mmap_page_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t start, end;
pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
/* Adjust vm_pgoff to be the offset within the resource */
vma->vm_pgoff -= start >> PAGE_SHIFT;
return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
write_combine);
}
#endif
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
#endif
};
int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long size;
int ret;
size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
if (vma->vm_pgoff + vma_pages(vma) > size)
return -EINVAL;
if (write_combine)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
if (mmap_state == pci_mmap_io) {
ret = pci_iobar_pfn(pdev, bar, vma);
if (ret)
return ret;
} else
vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
vma->vm_ops = &pci_phys_vm_ops;
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
/*
* Legacy setup: Impement pci_mmap_resource_range() as a wrapper around
* the architecture's pci_mmap_page_range(), converting to "user visible"
* addresses as necessary.
*/
int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t start, end;
/*
* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
*/
pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
}
#endif
...@@ -980,20 +980,24 @@ void pci_remove_legacy_files(struct pci_bus *b) ...@@ -980,20 +980,24 @@ void pci_remove_legacy_files(struct pci_bus *b)
} }
#endif /* HAVE_PCI_LEGACY */ #endif /* HAVE_PCI_LEGACY */
#ifdef HAVE_PCI_MMAP #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api) enum pci_mmap_api mmap_api)
{ {
unsigned long nr, start, size, pci_start; unsigned long nr, start, size;
resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0) if (pci_resource_len(pdev, resno) == 0)
return 0; return 0;
nr = vma_pages(vma); nr = vma_pages(vma);
start = vma->vm_pgoff; start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ? if (mmap_api == PCI_MMAP_PROCFS) {
pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; pci_resource_to_user(pdev, resno, &pdev->resource[resno],
&pci_start, &pci_end);
pci_start >>= PAGE_SHIFT;
}
if (start >= pci_start && start < pci_start + size && if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size) start + nr <= pci_start + size)
return 1; return 1;
...@@ -1013,37 +1017,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, ...@@ -1013,37 +1017,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine) struct vm_area_struct *vma, int write_combine)
{ {
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
struct resource *res = attr->private; int bar = (unsigned long)attr->private;
enum pci_mmap_state mmap_type; enum pci_mmap_state mmap_type;
resource_size_t start, end; struct resource *res = &pdev->resource[bar];
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
return -EINVAL; return -EINVAL;
if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
pci_name(pdev), i, pci_name(pdev), bar,
(u64)pci_resource_start(pdev, i), (u64)pci_resource_start(pdev, bar),
(u64)pci_resource_len(pdev, i)); (u64)pci_resource_len(pdev, bar));
return -EINVAL; return -EINVAL;
} }
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
*/
pci_resource_to_user(pdev, i, res, &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
} }
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
...@@ -1065,22 +1056,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, ...@@ -1065,22 +1056,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
loff_t off, size_t count, bool write) loff_t off, size_t count, bool write)
{ {
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
struct resource *res = attr->private; int bar = (unsigned long)attr->private;
struct resource *res;
unsigned long port = off; unsigned long port = off;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++) res = &pdev->resource[bar];
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
port += pci_resource_start(pdev, i); port += pci_resource_start(pdev, bar);
if (port > pci_resource_end(pdev, i)) if (port > pci_resource_end(pdev, bar))
return 0; return 0;
if (port + count - 1 > pci_resource_end(pdev, i)) if (port + count - 1 > pci_resource_end(pdev, bar))
return -EINVAL; return -EINVAL;
switch (count) { switch (count) {
...@@ -1170,16 +1157,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) ...@@ -1170,16 +1157,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else { } else {
pdev->res_attr[num] = res_attr; pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num); sprintf(res_attr_name, "resource%d", num);
res_attr->mmap = pci_mmap_resource_uc; if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
} res_attr->read = pci_read_resource_io;
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->write = pci_write_resource_io;
res_attr->read = pci_read_resource_io; if (arch_can_pci_mmap_io())
res_attr->write = pci_write_resource_io; res_attr->mmap = pci_mmap_resource_uc;
} else {
res_attr->mmap = pci_mmap_resource_uc;
}
} }
res_attr->attr.name = res_attr_name; res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR; res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num); res_attr->size = pci_resource_len(pdev, num);
res_attr->private = &pdev->resource[num]; res_attr->private = (void *)(unsigned long)num;
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
if (retval) if (retval)
kfree(res_attr); kfree(res_attr);
...@@ -1207,9 +1197,9 @@ static int pci_create_resource_files(struct pci_dev *pdev) ...@@ -1207,9 +1197,9 @@ static int pci_create_resource_files(struct pci_dev *pdev)
retval = pci_create_attr(pdev, i, 0); retval = pci_create_attr(pdev, i, 0);
/* for prefetchable resources, create a WC mappable file */ /* for prefetchable resources, create a WC mappable file */
if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH) if (!retval && arch_can_pci_mmap_wc() &&
pdev->resource[i].flags & IORESOURCE_PREFETCH)
retval = pci_create_attr(pdev, i, 1); retval = pci_create_attr(pdev, i, 1);
if (retval) { if (retval) {
pci_remove_resource_files(pdev); pci_remove_resource_files(pdev);
return retval; return retval;
......
...@@ -21,14 +21,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev); ...@@ -21,14 +21,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev);
void pci_remove_firmware_label_files(struct pci_dev *pdev); void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif #endif
void pci_cleanup_rom(struct pci_dev *dev); void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
enum pci_mmap_api { enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
}; };
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api); enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev); int pci_probe_reset_function(struct pci_dev *dev);
/** /**
......
...@@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, ...@@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP #ifdef HAVE_PCI_MMAP
case PCIIOC_MMAP_IS_IO: case PCIIOC_MMAP_IS_IO:
if (!arch_can_pci_mmap_io())
return -EINVAL;
fpriv->mmap_state = pci_mmap_io; fpriv->mmap_state = pci_mmap_io;
break; break;
...@@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, ...@@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break; break;
case PCIIOC_WRITE_COMBINE: case PCIIOC_WRITE_COMBINE:
if (arg) if (arch_can_pci_mmap_wc()) {
fpriv->write_combine = 1; if (arg)
else fpriv->write_combine = 1;
fpriv->write_combine = 0; else
break; fpriv->write_combine = 0;
break;
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */ #endif /* HAVE_PCI_MMAP */
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{ {
struct pci_dev *dev = PDE_DATA(file_inode(file)); struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data; struct pci_filp_private *fpriv = file->private_data;
int i, ret, write_combine; int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
res_bit = IORESOURCE_IO;
}
/* Make sure the caller is mapping a real resource for this device */ /* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) { for (i = 0; i < PCI_ROM_RESOURCE; i++) {
if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) if (dev->resource[i].flags & res_bit &&
pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break; break;
} }
if (i >= PCI_ROM_RESOURCE) if (i >= PCI_ROM_RESOURCE)
return -ENODEV; return -ENODEV;
if (fpriv->mmap_state == pci_mmap_mem) if (fpriv->mmap_state == pci_mmap_mem &&
write_combine = fpriv->write_combine; fpriv->write_combine) {
else if (dev->resource[i].flags & IORESOURCE_PREFETCH)
write_combine = 0; write_combine = 1;
ret = pci_mmap_page_range(dev, vma, else
return -EINVAL;
}
ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine); fpriv->mmap_state, write_combine);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -1617,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } ...@@ -1617,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h> #include <asm/pci.h>
/* These two functions provide almost identical functionality. Depennding
* on the architecture, one will be implemented as a wrapper around the
* other (in drivers/pci/mmap.c).
*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
* pci_mmap_page_range() is the legacy architecture-specific interface,
* which accepts a "user visible" resource address converted by
* pci_resource_to_user(), as used in the legacy mmap() interface in
* /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
int pci_mmap_page_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
#endif
#ifndef arch_can_pci_mmap_io
#define arch_can_pci_mmap_io() 0
#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
#else
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#endif
#ifndef pci_root_bus_fwnode #ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL #define pci_root_bus_fwnode(bus) NULL
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册