提交 b5ea3305 编写于 作者: K Keqian Zhu 提交者: Zheng Zengkai

vfio/iommu_type1: Mantain a counter for non_pinned_groups

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZUKK
CVE: NA

------------------------------

With this counter, we never need to traverse all groups to update
pinned_scope of vfio_iommu.
Suggested-by: NAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: Kunkun Jiang<jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 a466f5dd
...@@ -72,10 +72,10 @@ struct vfio_iommu { ...@@ -72,10 +72,10 @@ struct vfio_iommu {
struct blocking_notifier_head notifier; struct blocking_notifier_head notifier;
unsigned int dma_avail; unsigned int dma_avail;
uint64_t pgsize_bitmap; uint64_t pgsize_bitmap;
uint64_t num_non_pinned_groups;
bool v2; bool v2;
bool nesting; bool nesting;
bool dirty_page_tracking; bool dirty_page_tracking;
bool pinned_page_dirty_scope;
}; };
struct vfio_domain { struct vfio_domain {
...@@ -161,7 +161,6 @@ static int put_pfn(unsigned long pfn, int prot); ...@@ -161,7 +161,6 @@ static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group); struct iommu_group *iommu_group);
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/* /*
* This code handles mapping and unmapping of user data buffers * This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU * into DMA'ble space using the IOMMU
...@@ -749,7 +748,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, ...@@ -749,7 +748,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group); group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) { if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true; group->pinned_page_dirty_scope = true;
update_pinned_page_dirty_scope(iommu); iommu->num_non_pinned_groups--;
} }
goto pin_done; goto pin_done;
...@@ -1027,7 +1026,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, ...@@ -1027,7 +1026,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
* mark all pages dirty if any IOMMU capable device is not able * mark all pages dirty if any IOMMU capable device is not able
* to report dirty pages and all pages are pinned and mapped. * to report dirty pages and all pages are pinned and mapped.
*/ */
if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) if (iommu->num_non_pinned_groups && dma->iommu_mapped)
bitmap_set(dma->bitmap, 0, nbits); bitmap_set(dma->bitmap, 0, nbits);
if (shift) { if (shift) {
...@@ -1810,33 +1809,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, ...@@ -1810,33 +1809,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group; return group;
} }
static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
struct vfio_group *group;
list_for_each_entry(domain, &iommu->domain_list, next) {
list_for_each_entry(group, &domain->group_list, next) {
if (!group->pinned_page_dirty_scope) {
iommu->pinned_page_dirty_scope = false;
return;
}
}
}
if (iommu->external_domain) {
domain = iommu->external_domain;
list_for_each_entry(group, &domain->group_list, next) {
if (!group->pinned_page_dirty_scope) {
iommu->pinned_page_dirty_scope = false;
return;
}
}
}
iommu->pinned_page_dirty_scope = true;
}
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions, static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
phys_addr_t *base) phys_addr_t *base)
{ {
...@@ -2281,8 +2253,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -2281,8 +2253,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* addition of a dirty tracking group. * addition of a dirty tracking group.
*/ */
group->pinned_page_dirty_scope = true; group->pinned_page_dirty_scope = true;
if (!iommu->pinned_page_dirty_scope)
update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
return 0; return 0;
...@@ -2404,7 +2374,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -2404,7 +2374,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* demotes the iommu scope until it declares itself dirty tracking * demotes the iommu scope until it declares itself dirty tracking
* capable via the page pinning interface. * capable via the page pinning interface.
*/ */
iommu->pinned_page_dirty_scope = false; iommu->num_non_pinned_groups++;
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions); vfio_iommu_resv_free(&group_resv_regions);
...@@ -2623,7 +2593,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data, ...@@ -2623,7 +2593,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
* to be promoted. * to be promoted.
*/ */
if (update_dirty_scope) { if (update_dirty_scope) {
update_pinned_page_dirty_scope(iommu); iommu->num_non_pinned_groups--;
if (iommu->dirty_page_tracking) if (iommu->dirty_page_tracking)
vfio_iommu_populate_bitmap_full(iommu); vfio_iommu_populate_bitmap_full(iommu);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册