提交 a9ceae14 编写于 作者: X Xie Yongji 提交者: Pengyuan Zhao

vdpa: factor out vhost_vdpa_pa_map() and vhost_vdpa_pa_unmap()

mainline inclusion
from mainline-v5.15-rc1
commit 22af48cf
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5WXCZ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=22af48cf91aae5f2fd32fe811d9be1c52d7a801b

----------------------------------------------------------------------

The upcoming patch is going to support VA mapping/unmapping.
So let's factor out the logic of PA mapping/unmapping firstly
to make the code more readable.
Suggested-by: NJason Wang <jasowang@redhat.com>
Signed-off-by: NXie Yongji <xieyongji@bytedance.com>
Acked-by: NJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20210831103634.33-10-xieyongji@bytedance.comSigned-off-by: NMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: NPengyuan Zhao <zhaopengyuan@hisilicon.com>
上级 b2a3683e
...@@ -537,7 +537,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -537,7 +537,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r; return r;
} }
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb; struct vhost_iotlb *iotlb = dev->iotlb;
...@@ -559,6 +559,11 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) ...@@ -559,6 +559,11 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
} }
} }
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
return vhost_vdpa_pa_unmap(v, start, last);
}
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v) static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
...@@ -639,38 +644,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) ...@@ -639,38 +644,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
} }
} }
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
struct vhost_iotlb_msg *msg) u64 iova, u64 size, u64 uaddr, u32 perm)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list; struct page **page_list;
unsigned long list_size = PAGE_SIZE / sizeof(struct page *); unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM; unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0; unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long lock_limit, sz2pin, nchunks, i; unsigned long lock_limit, sz2pin, nchunks, i;
u64 iova = msg->iova; u64 start = iova;
long pinned; long pinned;
int ret = 0; int ret = 0;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;
if (vhost_iotlb_itree_first(iotlb, msg->iova,
msg->iova + msg->size - 1))
return -EEXIST;
/* Limit the use of memory for bookkeeping */ /* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) if (!page_list)
return -ENOMEM; return -ENOMEM;
if (msg->perm & VHOST_ACCESS_WO) if (perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE; gup_flags |= FOLL_WRITE;
npages = PFN_UP(msg->size + (iova & ~PAGE_MASK)); npages = PFN_UP(size + (iova & ~PAGE_MASK));
if (!npages) { if (!npages) {
ret = -EINVAL; ret = -EINVAL;
goto free; goto free;
...@@ -684,7 +679,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -684,7 +679,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
goto unlock; goto unlock;
} }
cur_base = msg->uaddr & PAGE_MASK; cur_base = uaddr & PAGE_MASK;
iova &= PAGE_MASK; iova &= PAGE_MASK;
nchunks = 0; nchunks = 0;
...@@ -715,7 +710,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -715,7 +710,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
csize = PFN_PHYS(last_pfn - map_pfn + 1); csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize, ret = vhost_vdpa_map(v, iova, csize,
PFN_PHYS(map_pfn), PFN_PHYS(map_pfn),
msg->perm); perm);
if (ret) { if (ret) {
/* /*
* Unpin the pages that are left unmapped * Unpin the pages that are left unmapped
...@@ -744,7 +739,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -744,7 +739,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
/* Pin the rest chunk */ /* Pin the rest chunk */
ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1), ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), msg->perm); PFN_PHYS(map_pfn), perm);
out: out:
if (ret) { if (ret) {
if (nchunks) { if (nchunks) {
...@@ -763,13 +758,33 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -763,13 +758,33 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
for (pfn = map_pfn; pfn <= last_pfn; pfn++) for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn)); unpin_user_page(pfn_to_page(pfn));
} }
vhost_vdpa_unmap(v, msg->iova, msg->size); vhost_vdpa_unmap(v, start, size);
} }
unlock: unlock:
mmap_read_unlock(dev->mm); mmap_read_unlock(dev->mm);
free: free:
free_page((unsigned long)page_list); free_page((unsigned long)page_list);
return ret; return ret;
}
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_iotlb_msg *msg)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;
if (vhost_iotlb_itree_first(iotlb, msg->iova,
msg->iova + msg->size - 1))
return -EEXIST;
return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
msg->perm);
} }
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册