提交 f682e9c2 编写于 作者: A Alexey Kardashevskiy 提交者: David Gibson

memory: Add reporting of supported page sizes

Every IOMMU has some granularity which MemoryRegionIOMMUOps::translate
uses when translating, however this information is not available outside
the translate context for various checks.

This adds a get_min_page_size callback to MemoryRegionIOMMUOps and
a wrapper for it so IOMMU users (such as VFIO) can know the minimum
actual page size supported by an IOMMU.

As IOMMU MR represents a guest IOMMU, this uses TARGET_PAGE_SIZE
as fallback.

This removes vfio_container_granularity() and uses new helper in
memory_region_iommu_replay() when replaying IOMMU mappings on added
IOMMU memory region.
Signed-off-by: NAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: NDavid Gibson <david@gibson.dropbear.id.au>
Acked-by: NAlex Williamson <alex.williamson@redhat.com>
[dwg: Removed an unnecessary calculation]
Signed-off-by: NDavid Gibson <david@gibson.dropbear.id.au>
上级 f0278900
...@@ -149,6 +149,13 @@ static void spapr_tce_table_pre_save(void *opaque) ...@@ -149,6 +149,13 @@ static void spapr_tce_table_pre_save(void *opaque)
tcet->bus_offset, tcet->page_shift); tcet->bus_offset, tcet->page_shift);
} }
static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
{
sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
return 1ULL << tcet->page_shift;
}
static int spapr_tce_table_post_load(void *opaque, int version_id) static int spapr_tce_table_post_load(void *opaque, int version_id)
{ {
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
...@@ -228,6 +235,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { ...@@ -228,6 +235,7 @@ static const VMStateDescription vmstate_spapr_tce_table = {
static MemoryRegionIOMMUOps spapr_iommu_ops = { static MemoryRegionIOMMUOps spapr_iommu_ops = {
.translate = spapr_tce_translate_iommu, .translate = spapr_tce_translate_iommu,
.get_min_page_size = spapr_tce_get_min_page_size,
}; };
static int spapr_tce_table_realize(DeviceState *dev) static int spapr_tce_table_realize(DeviceState *dev)
......
...@@ -321,11 +321,6 @@ out: ...@@ -321,11 +321,6 @@ out:
rcu_read_unlock(); rcu_read_unlock();
} }
static hwaddr vfio_container_granularity(VFIOContainer *container)
{
return (hwaddr)1 << ctz64(container->iova_pgsizes);
}
static void vfio_listener_region_add(MemoryListener *listener, static void vfio_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section) MemoryRegionSection *section)
{ {
...@@ -392,9 +387,7 @@ static void vfio_listener_region_add(MemoryListener *listener, ...@@ -392,9 +387,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
memory_region_iommu_replay(giommu->iommu, &giommu->n, memory_region_iommu_replay(giommu->iommu, &giommu->n, false);
vfio_container_granularity(container),
false);
return; return;
} }
......
...@@ -151,6 +151,8 @@ typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; ...@@ -151,6 +151,8 @@ typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
struct MemoryRegionIOMMUOps { struct MemoryRegionIOMMUOps {
/* Return a TLB entry that contains a given address. */ /* Return a TLB entry that contains a given address. */
IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
/* Returns minimum supported page size */
uint64_t (*get_min_page_size)(MemoryRegion *iommu);
}; };
typedef struct CoalescedMemoryRange CoalescedMemoryRange; typedef struct CoalescedMemoryRange CoalescedMemoryRange;
...@@ -572,6 +574,16 @@ static inline bool memory_region_is_iommu(MemoryRegion *mr) ...@@ -572,6 +574,16 @@ static inline bool memory_region_is_iommu(MemoryRegion *mr)
} }
/**
* memory_region_iommu_get_min_page_size: get minimum supported page size
* for an iommu
*
* Returns minimum supported page size for an iommu.
*
* @mr: the memory region being queried
*/
uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
/** /**
* memory_region_notify_iommu: notify a change in an IOMMU translation entry. * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
* *
...@@ -596,16 +608,15 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); ...@@ -596,16 +608,15 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
/** /**
* memory_region_iommu_replay: replay existing IOMMU translations to * memory_region_iommu_replay: replay existing IOMMU translations to
* a notifier * a notifier with the minimum page granularity returned by
* mr->iommu_ops->get_page_size().
* *
* @mr: the memory region to observe * @mr: the memory region to observe
* @n: the notifier to which to replay iommu mappings * @n: the notifier to which to replay iommu mappings
* @granularity: Minimum page granularity to replay notifications for
* @is_write: Whether to treat the replay as a translate "write" * @is_write: Whether to treat the replay as a translate "write"
* through the iommu * through the iommu
*/ */
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
hwaddr granularity, bool is_write);
/** /**
* memory_region_unregister_iommu_notifier: unregister a notifier for * memory_region_unregister_iommu_notifier: unregister a notifier for
......
...@@ -1502,12 +1502,22 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) ...@@ -1502,12 +1502,22 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
notifier_list_add(&mr->iommu_notify, n); notifier_list_add(&mr->iommu_notify, n);
} }
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
hwaddr granularity, bool is_write)
{ {
hwaddr addr; assert(memory_region_is_iommu(mr));
if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
return mr->iommu_ops->get_min_page_size(mr);
}
return TARGET_PAGE_SIZE;
}
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
{
hwaddr addr, granularity;
IOMMUTLBEntry iotlb; IOMMUTLBEntry iotlb;
granularity = memory_region_iommu_get_min_page_size(mr);
for (addr = 0; addr < memory_region_size(mr); addr += granularity) { for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
iotlb = mr->iommu_ops->translate(mr, addr, is_write); iotlb = mr->iommu_ops->translate(mr, addr, is_write);
if (iotlb.perm != IOMMU_NONE) { if (iotlb.perm != IOMMU_NONE) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册