diff --git a/dma-helpers.c b/dma-helpers.c index 2e09ceb912a52bb49fbca46d579d3b2651345506..35cb500581ebb62db151ea0566c98d2accec0496 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -31,6 +31,8 @@ static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) { + dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE); + if (dma_has_iommu(dma)) { return iommu_dma_memory_set(dma, addr, c, len); } diff --git a/dma.h b/dma.h index f52a656fd5caf2a91325082a28fdfe16a1de86fd..acacf1bf27d216de319bf14fa2c48ed98116cf07 100644 --- a/dma.h +++ b/dma.h @@ -13,6 +13,7 @@ #include #include "hw/hw.h" #include "block.h" +#include "kvm.h" typedef struct DMAContext DMAContext; typedef struct ScatterGatherEntry ScatterGatherEntry; @@ -65,6 +66,31 @@ struct DMAContext { DMAUnmapFunc *unmap; }; +static inline void dma_barrier(DMAContext *dma, DMADirection dir) +{ + /* + * This is called before DMA read and write operations + * unless the _relaxed form is used and is responsible + * for providing some sane ordering of accesses vs + * concurrently running VCPUs. + * + * Users of map(), unmap() or lower level st/ld_* + * operations are responsible for providing their own + * ordering via barriers. + * + * This primitive implementation does a simple smp_mb() + * before each operation which provides pretty much full + * ordering. + * + * A smarter implementation can be devised if needed to + * use lighter barriers based on the direction of the + * transfer, the DMA context, etc... + */ + if (kvm_enabled()) { + smp_mb(); + } +} + static inline bool dma_has_iommu(DMAContext *dma) { return !!dma; @@ -88,8 +114,9 @@ static inline bool dma_memory_valid(DMAContext *dma, int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, void *buf, dma_addr_t len, DMADirection dir); -static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, - void *buf, dma_addr_t len, DMADirection dir) +static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) { if (!dma_has_iommu(dma)) { /* Fast-path for no IOMMU */ @@ -101,6 +128,28 @@ static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, } } +static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, + DMA_DIRECTION_FROM_DEVICE); +} + +static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, + void *buf, dma_addr_t len, + DMADirection dir) +{ + dma_barrier(dma, dir); + + return dma_memory_rw_relaxed(dma, addr, buf, len, dir); +} + static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, void *buf, dma_addr_t len) {