提交 604c307b 编写于 作者: J Joerg Roedel

Merge branches 'dma-debug/next', 'amd-iommu/command-cleanups', 'amd-iommu/ats'...

Merge branches 'dma-debug/next', 'amd-iommu/command-cleanups', 'amd-iommu/ats' and 'amd-iommu/extended-features' into iommu/2.6.40

Conflicts:
	arch/x86/include/asm/amd_iommu_types.h
	arch/x86/kernel/amd_iommu.c
	arch/x86/kernel/amd_iommu_init.c
...@@ -690,6 +690,7 @@ config AMD_IOMMU ...@@ -690,6 +690,7 @@ config AMD_IOMMU
bool "AMD IOMMU support" bool "AMD IOMMU support"
select SWIOTLB select SWIOTLB
select PCI_MSI select PCI_MSI
select PCI_IOV
depends on X86_64 && PCI && ACPI depends on X86_64 && PCI && ACPI
---help--- ---help---
With this option you can enable support for AMD IOMMU hardware in With this option you can enable support for AMD IOMMU hardware in
......
...@@ -19,13 +19,11 @@ ...@@ -19,13 +19,11 @@
#ifndef _ASM_X86_AMD_IOMMU_PROTO_H #ifndef _ASM_X86_AMD_IOMMU_PROTO_H
#define _ASM_X86_AMD_IOMMU_PROTO_H #define _ASM_X86_AMD_IOMMU_PROTO_H
struct amd_iommu; #include <asm/amd_iommu_types.h>
extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_dma_ops(void);
extern int amd_iommu_init_passthrough(void); extern int amd_iommu_init_passthrough(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_apply_erratum_63(u16 devid); extern void amd_iommu_apply_erratum_63(u16 devid);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void); extern int amd_iommu_init_devices(void);
...@@ -44,4 +42,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) ...@@ -44,4 +42,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU); (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
} }
static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
{
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return false;
return !!(iommu->features & f);
}
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
...@@ -68,12 +68,25 @@ ...@@ -68,12 +68,25 @@
#define MMIO_CONTROL_OFFSET 0x0018 #define MMIO_CONTROL_OFFSET 0x0018
#define MMIO_EXCL_BASE_OFFSET 0x0020 #define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028 #define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030
#define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020 #define MMIO_STATUS_OFFSET 0x2020
/* Extended Feature Bits */
#define FEATURE_PREFETCH (1ULL<<0)
#define FEATURE_PPR (1ULL<<1)
#define FEATURE_X2APIC (1ULL<<2)
#define FEATURE_NX (1ULL<<3)
#define FEATURE_GT (1ULL<<4)
#define FEATURE_IA (1ULL<<6)
#define FEATURE_GA (1ULL<<7)
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
/* MMIO status bits */ /* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
...@@ -113,7 +126,9 @@ ...@@ -113,7 +126,9 @@
/* command specific defines */ /* command specific defines */
#define CMD_COMPL_WAIT 0x01 #define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02 #define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03 #define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
#define CMD_INV_ALL 0x08
#define CMD_COMPL_WAIT_STORE_MASK 0x01 #define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_COMPL_WAIT_INT_MASK 0x02 #define CMD_COMPL_WAIT_INT_MASK 0x02
...@@ -215,6 +230,8 @@ ...@@ -215,6 +230,8 @@
#define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62) #define IOMMU_PTE_IW (1ULL << 62)
#define DTE_FLAG_IOTLB 0x01
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
...@@ -227,6 +244,7 @@ ...@@ -227,6 +244,7 @@
/* IOMMU capabilities */ /* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26 #define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
#define MAX_DOMAIN_ID 65536 #define MAX_DOMAIN_ID 65536
...@@ -249,6 +267,8 @@ extern bool amd_iommu_dump; ...@@ -249,6 +267,8 @@ extern bool amd_iommu_dump;
/* global flag if IOMMUs cache non-present entries */ /* global flag if IOMMUs cache non-present entries */
extern bool amd_iommu_np_cache; extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
/* /*
* Make iterating over all IOMMUs easier * Make iterating over all IOMMUs easier
...@@ -371,6 +391,9 @@ struct amd_iommu { ...@@ -371,6 +391,9 @@ struct amd_iommu {
/* flags read from acpi table */ /* flags read from acpi table */
u8 acpi_flags; u8 acpi_flags;
/* Extended features */
u64 features;
/* /*
* Capability pointer. There could be more than one IOMMU per PCI * Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability * device function if there are more than one AMD IOMMU capability
...@@ -409,9 +432,6 @@ struct amd_iommu { ...@@ -409,9 +432,6 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */ /* if one, we need to send a completion wait command */
bool need_sync; bool need_sync;
/* becomes true if a command buffer reset is running */
bool reset_in_progress;
/* default dma_ops domain for that IOMMU */ /* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom; struct dma_ops_domain *default_dom;
......
此差异已折叠。
...@@ -137,6 +137,7 @@ int amd_iommus_present; ...@@ -137,6 +137,7 @@ int amd_iommus_present;
/* IOMMUs have a non-present cache? */ /* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly; bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
/* /*
* The ACPI table parsing functions set this variable on an error * The ACPI table parsing functions set this variable on an error
...@@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */ ...@@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */
static u32 alias_table_size; /* size of the alias table */ static u32 alias_table_size; /* size of the alias table */
static u32 rlookup_table_size; /* size if the rlookup table */ static u32 rlookup_table_size; /* size if the rlookup table */
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
*/
extern void iommu_flush_all_caches(struct amd_iommu *iommu);
static inline void update_last_devid(u16 devid) static inline void update_last_devid(u16 devid)
{ {
if (devid > amd_iommu_last_bdf) if (devid > amd_iommu_last_bdf)
...@@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) ...@@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
/* Function to enable the hardware */ /* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu) static void iommu_enable(struct amd_iommu *iommu)
{ {
printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", static const char * const feat_str[] = {
"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
"IA", "GA", "HE", "PC", NULL
};
int i;
printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
dev_name(&iommu->dev->dev), iommu->cap_ptr); dev_name(&iommu->dev->dev), iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
printk(KERN_CONT " extended features: ");
for (i = 0; feat_str[i]; ++i)
if (iommu_feature(iommu, (1ULL << i)))
printk(KERN_CONT " %s", feat_str[i]);
}
printk(KERN_CONT "\n");
iommu_feature_enable(iommu, CONTROL_IOMMU_EN); iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
} }
...@@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) ...@@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
static void __init init_iommu_from_pci(struct amd_iommu *iommu) static void __init init_iommu_from_pci(struct amd_iommu *iommu)
{ {
int cap_ptr = iommu->cap_ptr; int cap_ptr = iommu->cap_ptr;
u32 range, misc; u32 range, misc, low, high;
int i, j; int i, j;
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
...@@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) ...@@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
MMIO_GET_LD(range)); MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc); iommu->evt_msi_num = MMIO_MSI_NUM(misc);
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
iommu->features = ((u64)high << 32) | low;
if (!is_rd890_iommu(iommu->dev)) if (!is_rd890_iommu(iommu->dev))
return; return;
...@@ -1244,6 +1274,7 @@ static void enable_iommus(void) ...@@ -1244,6 +1274,7 @@ static void enable_iommus(void)
iommu_set_exclusion_range(iommu); iommu_set_exclusion_range(iommu);
iommu_init_msi(iommu); iommu_init_msi(iommu);
iommu_enable(iommu); iommu_enable(iommu);
iommu_flush_all_caches(iommu);
} }
} }
...@@ -1274,8 +1305,8 @@ static void amd_iommu_resume(void) ...@@ -1274,8 +1305,8 @@ static void amd_iommu_resume(void)
* we have to flush after the IOMMUs are enabled because a * we have to flush after the IOMMUs are enabled because a
* disabled IOMMU will never execute the commands we send * disabled IOMMU will never execute the commands we send
*/ */
amd_iommu_flush_all_devices(); for_each_iommu(iommu)
amd_iommu_flush_all_domains(); iommu_flush_all_caches(iommu);
} }
static int amd_iommu_suspend(void) static int amd_iommu_suspend(void)
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include "pci.h" #include "pci.h"
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci-ats.h>
#include "pci.h" #include "pci.h"
#define VIRTFN_ID_LEN 16 #define VIRTFN_ID_LEN 16
......
...@@ -250,15 +250,6 @@ struct pci_sriov { ...@@ -250,15 +250,6 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */ u8 __iomem *mstate; /* VF Migration State Array */
}; };
/* Address Translation Service */
struct pci_ats {
int pos; /* capability position */
int stu; /* Smallest Translation Unit */
int qdep; /* Invalidate Queue Depth */
int ref_cnt; /* Physical Function reference count */
unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev); extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev);
...@@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, ...@@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
extern void pci_restore_iov_state(struct pci_dev *dev); extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus); extern int pci_iov_bus_range(struct pci_bus *bus);
extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev);
extern int pci_ats_queue_depth(struct pci_dev *dev);
/**
* pci_ats_enabled - query the ATS status
* @dev: the PCI device
*
* Returns 1 if ATS capability is enabled, or 0 if not.
*/
static inline int pci_ats_enabled(struct pci_dev *dev)
{
return dev->ats && dev->ats->is_enabled;
}
#else #else
static inline int pci_iov_init(struct pci_dev *dev) static inline int pci_iov_init(struct pci_dev *dev)
{ {
...@@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) ...@@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
return 0; return 0;
} }
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
{
return -ENODEV;
}
static inline void pci_disable_ats(struct pci_dev *dev)
{
}
static inline int pci_ats_queue_depth(struct pci_dev *dev)
{
return -ENODEV;
}
static inline int pci_ats_enabled(struct pci_dev *dev)
{
return 0;
}
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
......
#ifndef LINUX_PCI_ATS_H
#define LINUX_PCI_ATS_H
/* Address Translation Service */
struct pci_ats {
int pos; /* capability position */
int stu; /* Smallest Translation Unit */
int qdep; /* Invalidate Queue Depth */
int ref_cnt; /* Physical Function reference count */
unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV
extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev);
extern int pci_ats_queue_depth(struct pci_dev *dev);
/**
* pci_ats_enabled - query the ATS status
* @dev: the PCI device
*
* Returns 1 if ATS capability is enabled, or 0 if not.
*/
static inline int pci_ats_enabled(struct pci_dev *dev)
{
return dev->ats && dev->ats->is_enabled;
}
#else /* CONFIG_PCI_IOV */
static inline int pci_enable_ats(struct pci_dev *dev, int ps)
{
return -ENODEV;
}
static inline void pci_disable_ats(struct pci_dev *dev)
{
}
static inline int pci_ats_queue_depth(struct pci_dev *dev)
{
return -ENODEV;
}
static inline int pci_ats_enabled(struct pci_dev *dev)
{
return 0;
}
#endif /* CONFIG_PCI_IOV */
#endif /* LINUX_PCI_ATS_H*/
...@@ -649,7 +649,7 @@ static int dma_debug_fs_init(void) ...@@ -649,7 +649,7 @@ static int dma_debug_fs_init(void)
return -ENOMEM; return -ENOMEM;
} }
static int device_dma_allocations(struct device *dev) static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
unsigned long flags; unsigned long flags;
...@@ -660,8 +660,10 @@ static int device_dma_allocations(struct device *dev) ...@@ -660,8 +660,10 @@ static int device_dma_allocations(struct device *dev)
for (i = 0; i < HASH_SIZE; ++i) { for (i = 0; i < HASH_SIZE; ++i) {
spin_lock(&dma_entry_hash[i].lock); spin_lock(&dma_entry_hash[i].lock);
list_for_each_entry(entry, &dma_entry_hash[i].list, list) { list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
if (entry->dev == dev) if (entry->dev == dev) {
count += 1; count += 1;
*out_entry = entry;
}
} }
spin_unlock(&dma_entry_hash[i].lock); spin_unlock(&dma_entry_hash[i].lock);
} }
...@@ -674,6 +676,7 @@ static int device_dma_allocations(struct device *dev) ...@@ -674,6 +676,7 @@ static int device_dma_allocations(struct device *dev)
static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
{ {
struct device *dev = data; struct device *dev = data;
struct dma_debug_entry *uninitialized_var(entry);
int count; int count;
if (global_disable) if (global_disable)
...@@ -681,12 +684,17 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti ...@@ -681,12 +684,17 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
switch (action) { switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER: case BUS_NOTIFY_UNBOUND_DRIVER:
count = device_dma_allocations(dev); count = device_dma_allocations(dev, &entry);
if (count == 0) if (count == 0)
break; break;
err_printk(dev, NULL, "DMA-API: device driver has pending " err_printk(dev, entry, "DMA-API: device driver has pending "
"DMA allocations while released from device " "DMA allocations while released from device "
"[count=%d]\n", count); "[count=%d]\n"
"One of leaked entries details: "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [mapped as %s]\n",
count, entry->dev_addr, entry->size,
dir2name[entry->direction], type2name[entry->type]);
break; break;
default: default:
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册