提交 c85994e4 编写于 作者: D David Woodhouse

intel-iommu: Ensure that PTE writes are 64-bit atomic, even on i386

Signed-off-by: NDavid Woodhouse <David.Woodhouse@intel.com>
上级 3238c0c4
...@@ -222,7 +222,12 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) ...@@ -222,7 +222,12 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static inline u64 dma_pte_addr(struct dma_pte *pte) static inline u64 dma_pte_addr(struct dma_pte *pte)
{ {
return (pte->val & VTD_PAGE_MASK); #ifdef CONFIG_64BIT
return pte->val & VTD_PAGE_MASK;
#else
/* Must have a full atomic 64-bit read */
return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
#endif
} }
static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
...@@ -712,6 +717,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -712,6 +717,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
break; break;
if (!dma_pte_present(pte)) { if (!dma_pte_present(pte)) {
uint64_t pteval;
tmp_page = alloc_pgtable_page(); tmp_page = alloc_pgtable_page();
if (!tmp_page) { if (!tmp_page) {
...@@ -719,15 +726,15 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -719,15 +726,15 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
flags); flags);
return NULL; return NULL;
} }
domain_flush_cache(domain, tmp_page, PAGE_SIZE); domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page)); pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
/* if (cmpxchg64(&pte->val, 0ULL, pteval)) {
* high level table always sets r/w, last level page /* Someone else set it while we were thinking; use theirs. */
* table control read/write free_pgtable_page(tmp_page);
*/ } else {
dma_set_pte_readable(pte); dma_pte_addr(pte);
dma_set_pte_writable(pte); domain_flush_cache(domain, pte, sizeof(*pte));
domain_flush_cache(domain, pte, sizeof(*pte)); }
} }
parent = phys_to_virt(dma_pte_addr(pte)); parent = phys_to_virt(dma_pte_addr(pte));
level--; level--;
...@@ -1666,6 +1673,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -1666,6 +1673,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
} }
while (nr_pages--) { while (nr_pages--) {
uint64_t tmp;
if (!sg_res) { if (!sg_res) {
sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
...@@ -1680,17 +1689,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -1680,17 +1689,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* We don't need lock here, nobody else /* We don't need lock here, nobody else
* touches the iova range * touches the iova range
*/ */
if (unlikely(dma_pte_addr(pte))) { tmp = cmpxchg64(&pte->val, 0ULL, pteval);
if (tmp) {
static int dumps = 5; static int dumps = 5;
printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)\n", printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
iov_pfn, pte->val); iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) { if (dumps) {
dumps--; dumps--;
debug_dma_dump_mappings(NULL); debug_dma_dump_mappings(NULL);
} }
WARN_ON(1); WARN_ON(1);
} }
pte->val = pteval;
pte++; pte++;
if (!nr_pages || if (!nr_pages ||
(unsigned long)pte >> VTD_PAGE_SHIFT != (unsigned long)pte >> VTD_PAGE_SHIFT !=
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册