提交 9cf06697 编写于 作者: S Sheng Yang 提交者: David Woodhouse

intel-iommu: VT-d page table to support snooping control bit

The user can request to enable snooping control through VT-d page table.
Signed-off-by: NSheng Yang <sheng@linux.intel.com>
Signed-off-by: NDavid Woodhouse <David.Woodhouse@intel.com>
上级 dbb9fd86
...@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context) ...@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context)
* 1: writable * 1: writable
* 2-6: reserved * 2-6: reserved
* 7: super page * 7: super page
* 8-11: available * 8-10: available
* 11: snoop behavior
* 12-63: Host physcial address * 12-63: Host physcial address
*/ */
struct dma_pte { struct dma_pte {
...@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte) ...@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
pte->val |= DMA_PTE_WRITE; pte->val |= DMA_PTE_WRITE;
} }
static inline void dma_set_pte_snp(struct dma_pte *pte)
{
pte->val |= DMA_PTE_SNP;
}
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
{ {
pte->val = (pte->val & ~3) | (prot & 3); pte->val = (pte->val & ~3) | (prot & 3);
...@@ -1685,6 +1691,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, ...@@ -1685,6 +1691,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(pte)); BUG_ON(dma_pte_addr(pte));
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(pte, prot); dma_set_pte_prot(pte, prot);
if (prot & DMA_PTE_SNP)
dma_set_pte_snp(pte);
domain_flush_cache(domain, pte, sizeof(*pte)); domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++; start_pfn++;
index++; index++;
...@@ -3105,6 +3113,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain, ...@@ -3105,6 +3113,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
prot |= DMA_PTE_READ; prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE) if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE; prot |= DMA_PTE_WRITE;
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
if (dmar_domain->max_addr < max_addr) { if (dmar_domain->max_addr < max_addr) {
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define DMA_PTE_READ (1) #define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2) #define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
struct intel_iommu; struct intel_iommu;
struct dmar_domain; struct dmar_domain;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define IOMMU_READ (1) #define IOMMU_READ (1)
#define IOMMU_WRITE (2) #define IOMMU_WRITE (2)
#define IOMMU_CACHE (4) /* DMA cache coherency */
struct device; struct device;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册