Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
c85994e4
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c85994e4
编写于
7月 01, 2009
作者:
D
David Woodhouse
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
intel-iommu: Ensure that PTE writes are 64-bit atomic, even on i386
Signed-off-by:
N
David Woodhouse
<
David.Woodhouse@intel.com
>
上级
3238c0c4
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
23 addition
and
14 deletion
+23
-14
drivers/pci/intel-iommu.c
drivers/pci/intel-iommu.c
+23
-14
未找到文件。
drivers/pci/intel-iommu.c
浏览文件 @
c85994e4
...
...
@@ -222,7 +222,12 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static
inline
u64
dma_pte_addr
(
struct
dma_pte
*
pte
)
{
return
(
pte
->
val
&
VTD_PAGE_MASK
);
#ifdef CONFIG_64BIT
return
pte
->
val
&
VTD_PAGE_MASK
;
#else
/* Must have a full atomic 64-bit read */
return
__cmpxchg64
(
pte
,
0ULL
,
0ULL
)
&
VTD_PAGE_MASK
;
#endif
}
static
inline
void
dma_set_pte_pfn
(
struct
dma_pte
*
pte
,
unsigned
long
pfn
)
...
...
@@ -712,6 +717,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
break
;
if
(
!
dma_pte_present
(
pte
))
{
uint64_t
pteval
;
tmp_page
=
alloc_pgtable_page
();
if
(
!
tmp_page
)
{
...
...
@@ -719,15 +726,15 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
flags
);
return
NULL
;
}
domain_flush_cache
(
domain
,
tmp_page
,
PAGE_SIZE
);
dma_set_pte_pfn
(
pte
,
virt_to_dma_pfn
(
tmp_page
))
;
/*
* high level table always sets r/w, last level page
* table control read/write
*/
dma_set_pte_readable
(
pte
);
dma_set_pte_writable
(
pte
);
domain_flush_cache
(
domain
,
pte
,
sizeof
(
*
pte
));
domain_flush_cache
(
domain
,
tmp_page
,
VTD_
PAGE_SIZE
);
pteval
=
(
virt_to_dma_pfn
(
tmp_page
)
<<
VTD_PAGE_SHIFT
)
|
DMA_PTE_READ
|
DMA_PTE_WRITE
;
if
(
cmpxchg64
(
&
pte
->
val
,
0ULL
,
pteval
))
{
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page
(
tmp_page
);
}
else
{
dma_pte_addr
(
pte
);
domain_flush_cache
(
domain
,
pte
,
sizeof
(
*
pte
)
);
}
}
parent
=
phys_to_virt
(
dma_pte_addr
(
pte
));
level
--
;
...
...
@@ -1666,6 +1673,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
while
(
nr_pages
--
)
{
uint64_t
tmp
;
if
(
!
sg_res
)
{
sg_res
=
(
sg
->
offset
+
sg
->
length
+
VTD_PAGE_SIZE
-
1
)
>>
VTD_PAGE_SHIFT
;
sg
->
dma_address
=
((
dma_addr_t
)
iov_pfn
<<
VTD_PAGE_SHIFT
)
+
sg
->
offset
;
...
...
@@ -1680,17 +1689,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* We don't need lock here, nobody else
* touches the iova range
*/
if
(
unlikely
(
dma_pte_addr
(
pte
)))
{
tmp
=
cmpxchg64
(
&
pte
->
val
,
0ULL
,
pteval
);
if
(
tmp
)
{
static
int
dumps
=
5
;
printk
(
KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx)
\n
"
,
iov_pfn
,
pte
->
val
);
printk
(
KERN_CRIT
"ERROR: DMA PTE for vPFN 0x%lx already set (to %llx
not %llx
)
\n
"
,
iov_pfn
,
tmp
,
(
unsigned
long
long
)
pte
val
);
if
(
dumps
)
{
dumps
--
;
debug_dma_dump_mappings
(
NULL
);
}
WARN_ON
(
1
);
}
pte
->
val
=
pteval
;
pte
++
;
if
(
!
nr_pages
||
(
unsigned
long
)
pte
>>
VTD_PAGE_SHIFT
!=
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录