提交 0712dc7e 编写于 作者: I Ian Munsie 提交者: Michael Ellerman

cxl: Fix issues when unmapping contexts

An issue was introduced with "cxl: Unmap MMIO regions when detaching a
context" (b123429e) where closing a
context normally could also unmap the problem state area of other
contexts currently using the AFU.

It was also discovered that after a context's MMIO space had been
unmapped it would read 0s when accessing it, whereas the expected
behaviour was for the access to fail altogether.

In order to address these issues, this patch does two things:

- Forced mmap unmapping is only done when we are forcefully detaching
  all contexts, and not in the normal detach path. Since the normal
  context close path is tied to the file release any mmaps must have
  already been released so we don't need to worry in that case.

- The mmap path now uses a vm_operations_struct with a fault handler.
  The fault handler ensures that the context is in started state,
  otherwise it fails the access attempt with a SIGBUS.

Fixes: b123429e ("cxl: Unmap MMIO regions when detaching a context")
Signed-off-by: NIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 bfe5fda8
...@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ...@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0; return 0;
} }
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
unsigned long address = (unsigned long)vmf->virtual_address;
u64 area, offset;
offset = vmf->pgoff << PAGE_SHIFT;
pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
__func__, ctx->pe, address, offset);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
if (offset > ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
if (offset > ctx->psn_size)
return VM_FAULT_SIGBUS;
}
mutex_lock(&ctx->status_mutex);
if (ctx->status != STARTED) {
mutex_unlock(&ctx->status_mutex);
pr_devel("%s: Context not started, failing problem state access\n", __func__);
return VM_FAULT_SIGBUS;
}
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct cxl_mmap_vmops = {
.fault = cxl_mmap_fault,
};
/* /*
* Map a per-context mmio space into the given vma. * Map a per-context mmio space into the given vma.
*/ */
...@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) ...@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start; u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size); len = min(len, ctx->psn_size);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* make sure there is a valid per process space for this AFU */
return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
} pr_devel("AFU doesn't support mmio space\n");
return -EINVAL;
}
/* make sure there is a valid per process space for this AFU */ /* Can't mmap until the AFU is enabled */
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { if (!ctx->afu->enabled)
pr_devel("AFU doesn't support mmio space\n"); return -EBUSY;
return -EINVAL;
} }
/* Can't mmap until the AFU is enabled */
if (!ctx->afu->enabled)
return -EBUSY;
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master); ctx->psn_phys, ctx->pe , ctx->master);
vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, ctx->psn_phys, len); vma->vm_ops = &cxl_mmap_vmops;
return 0;
} }
/* /*
...@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) ...@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx); afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */ flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq); wake_up_all(&ctx->wq);
/* Release Problem State Area mapping */
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
} }
/* /*
...@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) ...@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed * created and torn down after the IDR removed
*/ */
__detach_context(ctx); __detach_context(ctx);
/*
* We are force detaching - remove any active PSA mappings so
* userspace cannot interfere with the card if it comes back.
* Easiest way to exercise this is to unbind and rebind the
* driver via sysfs while it is in use.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
} }
mutex_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
} }
......
...@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, ...@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe); pr_devel("%s: pe: %i\n", __func__, ctx->pe);
mutex_lock(&ctx->status_mutex); /* Do this outside the status_mutex to avoid a circular dependency with
if (ctx->status != OPENED) { * the locking in cxl_mmap_fault() */
rc = -EIO;
goto out;
}
if (copy_from_user(&work, uwork, if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) { sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT; rc = -EFAULT;
goto out; goto out;
} }
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
rc = -EIO;
goto out;
}
/* /*
* if any of the reserved fields are set or any of the unused * if any of the reserved fields are set or any of the unused
* flags are set it's invalid * flags are set it's invalid
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册