提交 e24c2d96 编写于 作者: D David S. Miller 提交者: Greg Kroah-Hartman

[PATCH] PCI: DMA bursting advice

After seeing, at best, "guesses" as to the following kind
of information in several drivers, I decided that we really
need a way for platforms to specifically give advice in this
area for what works best with their PCI controller implementation.

Basically, this new interface gives DMA bursting advice on
PCI.  There are three forms of the advice:

1) Burst as much as possible, it is not necessary to end bursts
   on some particular boundary for best performance.

2) Burst on some byte count multiple.  A DMA burst to some multiple of
   number of bytes may be done, but it is important to end the burst
   on an exact multiple for best performance.

   The best example of this I am aware of are the PPC64 PCI
   controllers, where if you end a burst mid-cacheline then
   chip has to refetch the data and the IOMMU translations
   which hurts performance a lot.

3) Burst on a single byte count multiple.  Bursts shall end
   exactly on the next multiple boundary for best performance.

   Sparc64 and Alpha's PCI controllers operate this way.  They
   disconnect any device which tries to burst across a cacheline
   boundary.

   Actually, newer sparc64 PCI controllers do not have this behavior.
   That is why the "pdev" is passed into the interface, so I can
   add code later to check which PCI controller the system is using
   and give advice accordingly.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 2311b1f2
......@@ -223,6 +223,23 @@ pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr,
/* Nothing to do. */
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_BOUNDARY;
*strategy_parameter = cacheline_size;
}
/* TODO: integrate with include/asm-generic/pci.h ? */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
......
......@@ -42,6 +42,14 @@ static inline void pcibios_penalize_isa_irq(int irq)
#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
......
......@@ -57,6 +57,14 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
*/
#define PCI_DMA_BUS_IS_PHYS (1)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
/*
* These are pretty much arbitary with the CoMEM implementation.
* We have the whole address space to ourselves.
......
......@@ -99,6 +99,14 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev)
{
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#endif /* __KERNEL__ */
/* implement the pci_ DMA API in terms of the generic device dma_ one */
......
......@@ -82,6 +82,23 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
......
......@@ -130,6 +130,14 @@ extern void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
extern void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
dma64_addr_t dma_addr, size_t len, int direction);
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
extern void pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region, struct resource *res);
......
......@@ -230,6 +230,23 @@ extern inline void pcibios_register_hba(struct pci_hba_data *x)
/* export the pci_ DMA API in terms of the dma_ one */
#include <asm-generic/pci-dma-compat.h>
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
extern void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res);
......
......@@ -69,6 +69,14 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
/*
* At present there are very few 32-bit PPC machines that can have
* memory above the 4GB point, and we don't support that.
......
......@@ -78,6 +78,23 @@ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
return 0;
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
......
......@@ -96,6 +96,14 @@ static inline void pcibios_penalize_isa_irq(int irq)
#define sg_dma_address(sg) (virt_to_bus((sg)->dma_address))
#define sg_dma_len(sg) ((sg)->length)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
/* Board-specific fixup routines. */
extern void pcibios_fixup(void);
extern void pcibios_fixup_irqs(void);
......
......@@ -86,6 +86,14 @@ static inline void pcibios_penalize_isa_irq(int irq)
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
/* Board-specific fixup routines. */
extern void pcibios_fixup(void);
extern void pcibios_fixup_irqs(void);
......
......@@ -144,6 +144,14 @@ extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
#define pci_dac_dma_supported(dev, mask) (0)
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
{
}
......
......@@ -220,6 +220,23 @@ static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
return (dma_addr == PCI_DMA_ERROR_CODE);
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_BOUNDARY;
*strategy_parameter = cacheline_size;
}
/* Return the index of the PCI controller for device PDEV. */
extern int pci_domain_nr(struct pci_bus *bus);
......
......@@ -81,6 +81,14 @@ extern void
pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr,
dma_addr_t dma_addr);
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
{
}
......
......@@ -123,6 +123,14 @@ pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr,
flush_write_buffers();
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
......
......@@ -874,6 +874,15 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
enum pci_dma_burst_strategy {
PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
strategy_parameter is N/A */
PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
byte boundaries */
PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
strategy_parameter byte boundaries */
};
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
extern struct pci_dev *isa_bridge;
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册