提交 ceee0e95 编写于 作者: L Linus Torvalds

Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core/iommu changes from Ingo Molnar.

* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  iommu/dmar: Use pr_format() instead of PREFIX to tidy up pr_*() calls
  iommu/dmar: Reserve mmio space used by the IOMMU, if the BIOS forgets to
  iommu/dmar: Replace printks with appropriate pr_*()
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
* These routines are used by both DMA-remapping and Interrupt-remapping * These routines are used by both DMA-remapping and Interrupt-remapping
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/iova.h> #include <linux/iova.h>
...@@ -39,8 +41,6 @@ ...@@ -39,8 +41,6 @@
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/iommu_table.h> #include <asm/iommu_table.h>
#define PREFIX "DMAR: "
/* No locks are needed as DMA remapping hardware unit /* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of * list is constructed at boot time and hotplug of
* these units are not supported by the architecture. * these units are not supported by the architecture.
...@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, ...@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
* ignore it * ignore it
*/ */
if (!bus) { if (!bus) {
printk(KERN_WARNING pr_warn("Device scope bus [%d] not found\n", scope->bus);
PREFIX "Device scope bus [%d] not found\n",
scope->bus);
break; break;
} }
pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
if (!pdev) { if (!pdev) {
printk(KERN_WARNING PREFIX /* warning will be printed below */
"Device scope device [%04x:%02x:%02x.%02x] not found\n",
segment, bus->number, path->dev, path->fn);
break; break;
} }
path ++; path ++;
...@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, ...@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
bus = pdev->subordinate; bus = pdev->subordinate;
} }
if (!pdev) { if (!pdev) {
printk(KERN_WARNING PREFIX pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
"Device scope device [%04x:%02x:%02x.%02x] not found\n", segment, scope->bus, path->dev, path->fn);
segment, scope->bus, path->dev, path->fn);
*dev = NULL; *dev = NULL;
return 0; return 0;
} }
...@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, ...@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pdev->subordinate) || (scope->entry_type == \ pdev->subordinate) || (scope->entry_type == \
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
pci_dev_put(pdev); pci_dev_put(pdev);
printk(KERN_WARNING PREFIX pr_warn("Device scope type does not match for %s\n",
"Device scope type does not match for %s\n", pci_name(pdev));
pci_name(pdev));
return -EINVAL; return -EINVAL;
} }
*dev = pdev; *dev = pdev;
...@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, ...@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++; (*cnt)++;
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
printk(KERN_WARNING PREFIX pr_warn("Unsupported device scope\n");
"Unsupported device scope\n");
} }
start += scope->length; start += scope->length;
} }
...@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) ...@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
case ACPI_DMAR_TYPE_HARDWARE_UNIT: case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd = container_of(header, struct acpi_dmar_hardware_unit, drhd = container_of(header, struct acpi_dmar_hardware_unit,
header); header);
printk (KERN_INFO PREFIX pr_info("DRHD base: %#016Lx flags: %#x\n",
"DRHD base: %#016Lx flags: %#x\n",
(unsigned long long)drhd->address, drhd->flags); (unsigned long long)drhd->address, drhd->flags);
break; break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY: case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = container_of(header, struct acpi_dmar_reserved_memory, rmrr = container_of(header, struct acpi_dmar_reserved_memory,
header); header);
printk (KERN_INFO PREFIX pr_info("RMRR base: %#016Lx end: %#016Lx\n",
"RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address, (unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address); (unsigned long long)rmrr->end_address);
break; break;
case ACPI_DMAR_TYPE_ATSR: case ACPI_DMAR_TYPE_ATSR:
atsr = container_of(header, struct acpi_dmar_atsr, header); atsr = container_of(header, struct acpi_dmar_atsr, header);
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); pr_info("ATSR flags: %#x\n", atsr->flags);
break; break;
case ACPI_DMAR_HARDWARE_AFFINITY: case ACPI_DMAR_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header); rhsa = container_of(header, struct acpi_dmar_rhsa, header);
printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address, (unsigned long long)rhsa->base_address,
rhsa->proximity_domain); rhsa->proximity_domain);
break; break;
...@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void) ...@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
&dmar_tbl_size); &dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) { if (ACPI_SUCCESS(status) && !dmar_tbl) {
printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); pr_warn("Unable to map DMAR\n");
status = AE_NOT_FOUND; status = AE_NOT_FOUND;
} }
...@@ -333,20 +324,18 @@ parse_dmar_table(void) ...@@ -333,20 +324,18 @@ parse_dmar_table(void)
return -ENODEV; return -ENODEV;
if (dmar->width < PAGE_SHIFT - 1) { if (dmar->width < PAGE_SHIFT - 1) {
printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); pr_warn("Invalid DMAR haw\n");
return -EINVAL; return -EINVAL;
} }
printk (KERN_INFO PREFIX "Host address width %d\n", pr_info("Host address width %d\n", dmar->width + 1);
dmar->width + 1);
entry_header = (struct acpi_dmar_header *)(dmar + 1); entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) < while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) { (((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */ /* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) { if (entry_header->length == 0) {
printk(KERN_WARNING PREFIX pr_warn("Invalid 0-length structure\n");
"Invalid 0-length structure\n");
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -369,8 +358,7 @@ parse_dmar_table(void) ...@@ -369,8 +358,7 @@ parse_dmar_table(void)
#endif #endif
break; break;
default: default:
printk(KERN_WARNING PREFIX pr_warn("Unknown DMAR structure type %d\n",
"Unknown DMAR structure type %d\n",
entry_header->type); entry_header->type);
ret = 0; /* for forward compatibility */ ret = 0; /* for forward compatibility */
break; break;
...@@ -469,12 +457,12 @@ int __init dmar_table_init(void) ...@@ -469,12 +457,12 @@ int __init dmar_table_init(void)
ret = parse_dmar_table(); ret = parse_dmar_table();
if (ret) { if (ret) {
if (ret != -ENODEV) if (ret != -ENODEV)
printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); pr_info("parse DMAR table failure.\n");
return ret; return ret;
} }
if (list_empty(&dmar_drhd_units)) { if (list_empty(&dmar_drhd_units)) {
printk(KERN_INFO PREFIX "No DMAR devices found\n"); pr_info("No DMAR devices found\n");
return -ENODEV; return -ENODEV;
} }
...@@ -506,8 +494,7 @@ int __init check_zero_address(void) ...@@ -506,8 +494,7 @@ int __init check_zero_address(void)
(((unsigned long)dmar) + dmar_tbl->length)) { (((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */ /* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) { if (entry_header->length == 0) {
printk(KERN_WARNING PREFIX pr_warn("Invalid 0-length structure\n");
"Invalid 0-length structure\n");
return 0; return 0;
} }
...@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void) ...@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
if (ret && irq_remapping_enabled && cpu_has_x2apic && if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1) dmar->flags & 0x1)
printk(KERN_INFO pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
"Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1; iommu_detected = 1;
...@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void) ...@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
} }
static void unmap_iommu(struct intel_iommu *iommu)
{
iounmap(iommu->reg);
release_mem_region(iommu->reg_phys, iommu->reg_size);
}
/**
* map_iommu: map the iommu's registers
* @iommu: the iommu to map
* @phys_addr: the physical address of the base resgister
*
* Memory map the iommu's registers. Start w/ a single page, and
* possibly expand if that turns out to be insufficent.
*/
static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
{
int map_size, err=0;
iommu->reg_phys = phys_addr;
iommu->reg_size = VTD_PAGE_SIZE;
if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
pr_err("IOMMU: can't reserve memory\n");
err = -EBUSY;
goto out;
}
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
if (!iommu->reg) {
pr_err("IOMMU: can't map the region\n");
err = -ENOMEM;
goto release;
}
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
err = -EINVAL;
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
map_size = VTD_PAGE_ALIGN(map_size);
if (map_size > iommu->reg_size) {
iounmap(iommu->reg);
release_mem_region(iommu->reg_phys, iommu->reg_size);
iommu->reg_size = map_size;
if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
iommu->name)) {
pr_err("IOMMU: can't reserve memory\n");
err = -EBUSY;
goto out;
}
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
if (!iommu->reg) {
pr_err("IOMMU: can't map the region\n");
err = -ENOMEM;
goto release;
}
}
err = 0;
goto out;
unmap:
iounmap(iommu->reg);
release:
release_mem_region(iommu->reg_phys, iommu->reg_size);
out:
return err;
}
int alloc_iommu(struct dmar_drhd_unit *drhd) int alloc_iommu(struct dmar_drhd_unit *drhd)
{ {
struct intel_iommu *iommu; struct intel_iommu *iommu;
int map_size;
u32 ver; u32 ver;
static int iommu_allocated = 0; static int iommu_allocated = 0;
int agaw = 0; int agaw = 0;
int msagaw = 0; int msagaw = 0;
int err;
if (!drhd->reg_base_addr) { if (!drhd->reg_base_addr) {
warn_invalid_dmar(0, ""); warn_invalid_dmar(0, "");
...@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++; iommu->seq_id = iommu_allocated++;
sprintf (iommu->name, "dmar%d", iommu->seq_id); sprintf (iommu->name, "dmar%d", iommu->seq_id);
iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); err = map_iommu(iommu, drhd->reg_base_addr);
if (!iommu->reg) { if (err) {
printk(KERN_ERR "IOMMU: can't map the region\n"); pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error; goto error;
} }
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
goto err_unmap;
}
err = -EINVAL;
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
"Cannot get a valid agaw for iommu (seq_id = %d)\n", iommu->seq_id);
iommu->seq_id);
goto err_unmap; goto err_unmap;
} }
msagaw = iommu_calculate_max_sagaw(iommu); msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) { if (msagaw < 0) {
printk(KERN_ERR pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto err_unmap; goto err_unmap;
} }
...@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->node = -1; iommu->node = -1;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
map_size = VTD_PAGE_ALIGN(map_size);
if (map_size > VTD_PAGE_SIZE) {
iounmap(iommu->reg);
iommu->reg = ioremap(drhd->reg_base_addr, map_size);
if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n");
goto error;
}
}
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
iommu->seq_id, iommu->seq_id,
...@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0; return 0;
err_unmap: err_unmap:
iounmap(iommu->reg); unmap_iommu(iommu);
error: error:
kfree(iommu); kfree(iommu);
return -1; return err;
} }
void free_iommu(struct intel_iommu *iommu) void free_iommu(struct intel_iommu *iommu)
...@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu) ...@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
free_dmar_iommu(iommu); free_dmar_iommu(iommu);
if (iommu->reg) if (iommu->reg)
iounmap(iommu->reg); unmap_iommu(iommu);
kfree(iommu); kfree(iommu);
} }
...@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
if (fault & DMA_FSTS_IQE) { if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG); head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> DMAR_IQ_SHIFT) == index) { if ((head >> DMAR_IQ_SHIFT) == index) {
printk(KERN_ERR "VT-d detected invalid descriptor: " pr_err("VT-d detected invalid descriptor: "
"low=%llx, high=%llx\n", "low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low, (unsigned long long)qi->desc[index].low,
(unsigned long long)qi->desc[index].high); (unsigned long long)qi->desc[index].high);
...@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type, ...@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type); reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP) if (fault_type == INTR_REMAP)
printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
"fault index %llx\n" "fault index %llx\n"
"INTR-REMAP:[fault reason %02d] %s\n", "INTR-REMAP:[fault reason %02d] %s\n",
(source_id >> 8), PCI_SLOT(source_id & 0xFF), (source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48, PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason); fault_reason, reason);
else else
printk(KERN_ERR pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
"DMAR:[%s] Request device [%02x:%02x.%d] "
"fault addr %llx \n" "fault addr %llx \n"
"DMAR:[fault reason %02d] %s\n", "DMAR:[fault reason %02d] %s\n",
(type ? "DMA Read" : "DMA Write"), (type ? "DMA Read" : "DMA Write"),
...@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG); fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status) if (fault_status)
printk(KERN_ERR "DRHD: handling fault status reg %x\n", pr_err("DRHD: handling fault status reg %x\n", fault_status);
fault_status);
/* TBD: ignore advanced fault log currently */ /* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF)) if (!(fault_status & DMA_FSTS_PPF))
...@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) ...@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
irq = create_irq(); irq = create_irq();
if (!irq) { if (!irq) {
printk(KERN_ERR "IOMMU: no free vectors\n"); pr_err("IOMMU: no free vectors\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) ...@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret) if (ret)
printk(KERN_ERR "IOMMU: can't request irq\n"); pr_err("IOMMU: can't request irq\n");
return ret; return ret;
} }
...@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void) ...@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
ret = dmar_set_interrupt(iommu); ret = dmar_set_interrupt(iommu);
if (ret) { if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable fault, " pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
" interrupt, ret %d\n",
(unsigned long long)drhd->reg_base_addr, ret); (unsigned long long)drhd->reg_base_addr, ret);
return -1; return -1;
} }
......
...@@ -308,6 +308,8 @@ enum { ...@@ -308,6 +308,8 @@ enum {
struct intel_iommu { struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
u64 reg_size; /* size of hw register set */
u64 cap; u64 cap;
u64 ecap; u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册