提交 87e547fe 编写于 作者: P Pekka Paalanen 提交者: Thomas Gleixner

x86 mmiotrace: fix page-unaligned ioremaps

mmiotrace_ioremap() expects to receive the original unaligned map phys address
and size. Also fix {un,}register_kmmio_probe() to deal properly with
unaligned size.
Signed-off-by: NPekka Paalanen <pq@iki.fi>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 970e6fa0
......@@ -123,6 +123,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
{
unsigned long pfn, offset, vaddr;
resource_size_t last_addr;
const resource_size_t unaligned_phys_addr = phys_addr;
const unsigned long unaligned_size = size;
struct vm_struct *area;
unsigned long new_prot_val;
pgprot_t prot;
......@@ -236,7 +238,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
}
ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(phys_addr, size, ret_addr);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
return ret_addr;
}
......
......@@ -351,11 +351,19 @@ static void release_kmmio_fault_page(unsigned long page,
}
}
/*
* With page-unaligned ioremaps, one or two armed pages may contain
* addresses from outside the intended mapping. Events for these addresses
* are currently silently dropped. The events may result only from programming
* mistakes by accessing addresses before the beginning or past the end of a
* mapping.
*/
int register_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
int ret = 0;
unsigned long size = 0;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
......@@ -364,7 +372,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
}
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < p->len) {
while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size))
pr_err("kmmio: Unable to set page fault.\n");
size += PAGE_SIZE;
......@@ -436,11 +444,12 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
unsigned long size = 0;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;
spin_lock_irqsave(&kmmio_lock, flags);
while (size < p->len) {
while (size < size_lim) {
release_kmmio_fault_page(p->addr + size, &release_list);
size += PAGE_SIZE;
}
......
......@@ -280,6 +280,7 @@ static void ioremap_trace_core(unsigned long offset, unsigned long size,
{
static atomic_t next_id;
struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
/* These are page-unaligned. */
struct mmiotrace_map map = {
.phys = offset,
.virt = (unsigned long)addr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册