提交 7329cf02 编写于 作者: I Ingo Molnar

Merge branch 'amd-iommu/2.6.36' of...

Merge branch 'amd-iommu/2.6.36' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
...@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } ...@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
#endif /* !CONFIG_AMD_IOMMU_STATS */ #endif /* !CONFIG_AMD_IOMMU_STATS */
static inline bool is_rd890_iommu(struct pci_dev *pdev)
{
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
...@@ -368,6 +368,9 @@ struct amd_iommu { ...@@ -368,6 +368,9 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */ /* capabilities of that IOMMU read from ACPI */
u32 cap; u32 cap;
/* flags read from acpi table */
u8 acpi_flags;
/* /*
* Capability pointer. There could be more than one IOMMU per PCI * Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability * device function if there are more than one AMD IOMMU capability
...@@ -411,6 +414,15 @@ struct amd_iommu { ...@@ -411,6 +414,15 @@ struct amd_iommu {
/* default dma_ops domain for that IOMMU */ /* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom; struct dma_ops_domain *default_dom;
/*
* This array is required to work around a potential BIOS bug.
* The BIOS may miss to restore parts of the PCI configuration
* space when the system resumes from S3. The result is that the
* IOMMU does not execute commands anymore which leads to system
* failure.
*/
u32 cache_cfg[4];
}; };
/* /*
......
...@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, ...@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size, size_t size,
int dir) int dir)
{ {
dma_addr_t flush_addr;
dma_addr_t i, start; dma_addr_t i, start;
unsigned int pages; unsigned int pages;
...@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, ...@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
(dma_addr + size > dma_dom->aperture_size)) (dma_addr + size > dma_dom->aperture_size))
return; return;
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK; dma_addr &= PAGE_MASK;
start = dma_addr; start = dma_addr;
...@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, ...@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_ops_free_addresses(dma_dom, dma_addr, pages); dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) { if (amd_iommu_unmap_flush || dma_dom->need_flush) {
iommu_flush_pages(&dma_dom->domain, dma_addr, size); iommu_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false; dma_dom->need_flush = false;
} }
} }
......
...@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) ...@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->last_device = calc_devid(MMIO_GET_BUS(range), iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range)); MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc); iommu->evt_msi_num = MMIO_MSI_NUM(misc);
if (is_rd890_iommu(iommu->dev)) {
pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
}
} }
/* /*
...@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, ...@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
struct ivhd_entry *e; struct ivhd_entry *e;
/* /*
* First set the recommended feature enable bits from ACPI * First save the recommended feature enable bits from ACPI
* into the IOMMU control registers
*/ */
h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? iommu->acpi_flags = h->flags;
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
h->flags & IVHD_FLAG_ISOC_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
/*
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
/* /*
* Done. Now parse the device entries * Done. Now parse the device entries
...@@ -1116,6 +1103,40 @@ static void init_device_table(void) ...@@ -1116,6 +1103,40 @@ static void init_device_table(void)
} }
} }
static void iommu_init_flags(struct amd_iommu *iommu)
{
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
/*
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
}
static void iommu_apply_quirks(struct amd_iommu *iommu)
{
if (is_rd890_iommu(iommu->dev)) {
pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
}
}
/* /*
* This function finally enables all IOMMUs found in the system after * This function finally enables all IOMMUs found in the system after
* they have been initialized * they have been initialized
...@@ -1126,6 +1147,8 @@ static void enable_iommus(void) ...@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
for_each_iommu(iommu) { for_each_iommu(iommu) {
iommu_disable(iommu); iommu_disable(iommu);
iommu_apply_quirks(iommu);
iommu_init_flags(iommu);
iommu_set_device_table(iommu); iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu); iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu); iommu_enable_event_buffer(iommu);
......
...@@ -393,6 +393,9 @@ ...@@ -393,6 +393,9 @@
#define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_82C147 0x0105
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
/* AMD RD890 Chipset */
#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
#define PCI_VENDOR_ID_ADL 0x1005 #define PCI_VENDOR_ID_ADL 0x1005
#define PCI_DEVICE_ID_ADL_2301 0x2301 #define PCI_DEVICE_ID_ADL_2301 0x2301
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册