Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
a06ec394
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
a06ec394
编写于
12月 14, 2011
作者:
J
Joerg Roedel
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'iommu/page-sizes' into x86/amd
Conflicts: drivers/iommu/amd_iommu.c
上级
175d6146
6c274d1c
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
205 addition
and
70 deletion
+205
-70
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.c
+24
-8
drivers/iommu/intel-iommu.c
drivers/iommu/intel-iommu.c
+23
-7
drivers/iommu/iommu.c
drivers/iommu/iommu.c
+107
-12
drivers/iommu/msm_iommu.c
drivers/iommu/msm_iommu.c
+12
-13
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.c
+9
-9
drivers/iommu/omap-iovmm.c
drivers/iommu/omap-iovmm.c
+6
-11
include/linux/iommu.h
include/linux/iommu.h
+20
-6
virt/kvm/iommu.c
virt/kvm/iommu.c
+4
-4
未找到文件。
drivers/iommu/amd_iommu.c
浏览文件 @
a06ec394
...
@@ -44,6 +44,24 @@
...
@@ -44,6 +44,24 @@
#define LOOP_TIMEOUT 100000
#define LOOP_TIMEOUT 100000
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
* Traditionally the IOMMU core just handed us the mappings directly,
* after making sure the size is an order of a 4KiB page and that the
* mapping has natural alignment.
*
* To retain this behavior, we currently advertise that we support
* all page sizes that are an order of 4KiB.
*
* If at some point we'd like to utilize the IOMMU core's new behavior,
* we could change this to advertise the real page sizes we support.
*/
#define AMD_IOMMU_PGSIZES (~0xFFFUL)
static
DEFINE_RWLOCK
(
amd_iommu_devtable_lock
);
static
DEFINE_RWLOCK
(
amd_iommu_devtable_lock
);
/* A list of preallocated protection domains */
/* A list of preallocated protection domains */
...
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
...
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
}
static
int
amd_iommu_map
(
struct
iommu_domain
*
dom
,
unsigned
long
iova
,
static
int
amd_iommu_map
(
struct
iommu_domain
*
dom
,
unsigned
long
iova
,
phys_addr_t
paddr
,
int
gfp_order
,
int
iommu_prot
)
phys_addr_t
paddr
,
size_t
page_size
,
int
iommu_prot
)
{
{
unsigned
long
page_size
=
0x1000UL
<<
gfp_order
;
struct
protection_domain
*
domain
=
dom
->
priv
;
struct
protection_domain
*
domain
=
dom
->
priv
;
int
prot
=
0
;
int
prot
=
0
;
int
ret
;
int
ret
;
...
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
...
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
return
ret
;
return
ret
;
}
}
static
in
t
amd_iommu_unmap
(
struct
iommu_domain
*
dom
,
unsigned
long
iova
,
static
size_
t
amd_iommu_unmap
(
struct
iommu_domain
*
dom
,
unsigned
long
iova
,
int
gfp_order
)
size_t
page_size
)
{
{
struct
protection_domain
*
domain
=
dom
->
priv
;
struct
protection_domain
*
domain
=
dom
->
priv
;
unsigned
long
page_size
,
unmap_size
;
size_t
unmap_size
;
if
(
domain
->
mode
==
PAGE_MODE_NONE
)
if
(
domain
->
mode
==
PAGE_MODE_NONE
)
return
-
EINVAL
;
return
-
EINVAL
;
page_size
=
0x1000UL
<<
gfp_order
;
mutex_lock
(
&
domain
->
api_lock
);
mutex_lock
(
&
domain
->
api_lock
);
unmap_size
=
iommu_unmap_page
(
domain
,
iova
,
page_size
);
unmap_size
=
iommu_unmap_page
(
domain
,
iova
,
page_size
);
mutex_unlock
(
&
domain
->
api_lock
);
mutex_unlock
(
&
domain
->
api_lock
);
domain_flush_tlb_pde
(
domain
);
domain_flush_tlb_pde
(
domain
);
return
get_order
(
unmap_size
)
;
return
unmap_size
;
}
}
static
phys_addr_t
amd_iommu_iova_to_phys
(
struct
iommu_domain
*
dom
,
static
phys_addr_t
amd_iommu_iova_to_phys
(
struct
iommu_domain
*
dom
,
...
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
...
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
.
unmap
=
amd_iommu_unmap
,
.
unmap
=
amd_iommu_unmap
,
.
iova_to_phys
=
amd_iommu_iova_to_phys
,
.
iova_to_phys
=
amd_iommu_iova_to_phys
,
.
domain_has_cap
=
amd_iommu_domain_has_cap
,
.
domain_has_cap
=
amd_iommu_domain_has_cap
,
.
pgsize_bitmap
=
AMD_IOMMU_PGSIZES
,
};
};
/*****************************************************************************
/*****************************************************************************
...
...
drivers/iommu/intel-iommu.c
浏览文件 @
a06ec394
...
@@ -78,6 +78,24 @@
...
@@ -78,6 +78,24 @@
#define LEVEL_STRIDE (9)
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
* Traditionally the IOMMU core just handed us the mappings directly,
* after making sure the size is an order of a 4KiB page and that the
* mapping has natural alignment.
*
* To retain this behavior, we currently advertise that we support
* all page sizes that are an order of 4KiB.
*
* If at some point we'd like to utilize the IOMMU core's new behavior,
* we could change this to advertise the real page sizes we support.
*/
#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
static
inline
int
agaw_to_level
(
int
agaw
)
static
inline
int
agaw_to_level
(
int
agaw
)
{
{
return
agaw
+
2
;
return
agaw
+
2
;
...
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
...
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static
int
intel_iommu_map
(
struct
iommu_domain
*
domain
,
static
int
intel_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
phys_addr_t
hpa
,
unsigned
long
iova
,
phys_addr_t
hpa
,
int
gfp_order
,
int
iommu_prot
)
size_t
size
,
int
iommu_prot
)
{
{
struct
dmar_domain
*
dmar_domain
=
domain
->
priv
;
struct
dmar_domain
*
dmar_domain
=
domain
->
priv
;
u64
max_addr
;
u64
max_addr
;
int
prot
=
0
;
int
prot
=
0
;
size_t
size
;
int
ret
;
int
ret
;
if
(
iommu_prot
&
IOMMU_READ
)
if
(
iommu_prot
&
IOMMU_READ
)
...
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
...
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if
((
iommu_prot
&
IOMMU_CACHE
)
&&
dmar_domain
->
iommu_snooping
)
if
((
iommu_prot
&
IOMMU_CACHE
)
&&
dmar_domain
->
iommu_snooping
)
prot
|=
DMA_PTE_SNP
;
prot
|=
DMA_PTE_SNP
;
size
=
PAGE_SIZE
<<
gfp_order
;
max_addr
=
iova
+
size
;
max_addr
=
iova
+
size
;
if
(
dmar_domain
->
max_addr
<
max_addr
)
{
if
(
dmar_domain
->
max_addr
<
max_addr
)
{
u64
end
;
u64
end
;
...
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
...
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return
ret
;
return
ret
;
}
}
static
in
t
intel_iommu_unmap
(
struct
iommu_domain
*
domain
,
static
size_
t
intel_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
gfp_order
)
unsigned
long
iova
,
size_t
size
)
{
{
struct
dmar_domain
*
dmar_domain
=
domain
->
priv
;
struct
dmar_domain
*
dmar_domain
=
domain
->
priv
;
size_t
size
=
PAGE_SIZE
<<
gfp_order
;
int
order
;
int
order
;
order
=
dma_pte_clear_range
(
dmar_domain
,
iova
>>
VTD_PAGE_SHIFT
,
order
=
dma_pte_clear_range
(
dmar_domain
,
iova
>>
VTD_PAGE_SHIFT
,
...
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
...
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if
(
dmar_domain
->
max_addr
==
iova
+
size
)
if
(
dmar_domain
->
max_addr
==
iova
+
size
)
dmar_domain
->
max_addr
=
iova
;
dmar_domain
->
max_addr
=
iova
;
return
order
;
return
PAGE_SIZE
<<
order
;
}
}
static
phys_addr_t
intel_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
static
phys_addr_t
intel_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
...
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
...
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
.
unmap
=
intel_iommu_unmap
,
.
unmap
=
intel_iommu_unmap
,
.
iova_to_phys
=
intel_iommu_iova_to_phys
,
.
iova_to_phys
=
intel_iommu_iova_to_phys
,
.
domain_has_cap
=
intel_iommu_domain_has_cap
,
.
domain_has_cap
=
intel_iommu_domain_has_cap
,
.
pgsize_bitmap
=
INTEL_IOMMU_PGSIZES
,
};
};
static
void
__devinit
quirk_iommu_rwbf
(
struct
pci_dev
*
dev
)
static
void
__devinit
quirk_iommu_rwbf
(
struct
pci_dev
*
dev
)
...
...
drivers/iommu/iommu.c
浏览文件 @
a06ec394
...
@@ -16,6 +16,8 @@
...
@@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/device.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/bug.h>
...
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
...
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL
(
iommu_domain_has_cap
);
EXPORT_SYMBOL_GPL
(
iommu_domain_has_cap
);
int
iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
phys_addr_t
paddr
,
int
gfp_order
,
int
prot
)
phys_addr_t
paddr
,
size_t
size
,
int
prot
)
{
{
size_t
size
;
unsigned
long
orig_iova
=
iova
;
unsigned
int
min_pagesz
;
size_t
orig_size
=
size
;
int
ret
=
0
;
if
(
unlikely
(
domain
->
ops
->
map
==
NULL
))
if
(
unlikely
(
domain
->
ops
->
map
==
NULL
))
return
-
ENODEV
;
return
-
ENODEV
;
size
=
PAGE_SIZE
<<
gfp_order
;
/* find out the minimum page size supported */
min_pagesz
=
1
<<
__ffs
(
domain
->
ops
->
pgsize_bitmap
);
/*
* both the virtual address and the physical one, as well as
* the size of the mapping, must be aligned (at least) to the
* size of the smallest page supported by the hardware
*/
if
(
!
IS_ALIGNED
(
iova
|
paddr
|
size
,
min_pagesz
))
{
pr_err
(
"unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
"0x%x
\n
"
,
iova
,
(
unsigned
long
)
paddr
,
(
unsigned
long
)
size
,
min_pagesz
);
return
-
EINVAL
;
}
pr_debug
(
"map: iova 0x%lx pa 0x%lx size 0x%lx
\n
"
,
iova
,
(
unsigned
long
)
paddr
,
(
unsigned
long
)
size
);
while
(
size
)
{
unsigned
long
pgsize
,
addr_merge
=
iova
|
paddr
;
unsigned
int
pgsize_idx
;
/* Max page size that still fits into 'size' */
pgsize_idx
=
__fls
(
size
);
/* need to consider alignment requirements ? */
if
(
likely
(
addr_merge
))
{
/* Max page size allowed by both iova and paddr */
unsigned
int
align_pgsize_idx
=
__ffs
(
addr_merge
);
pgsize_idx
=
min
(
pgsize_idx
,
align_pgsize_idx
);
}
/* build a mask of acceptable page sizes */
pgsize
=
(
1UL
<<
(
pgsize_idx
+
1
))
-
1
;
/* throw away page sizes not supported by the hardware */
pgsize
&=
domain
->
ops
->
pgsize_bitmap
;
BUG_ON
(
!
IS_ALIGNED
(
iova
|
paddr
,
size
));
/* make sure we're still sane */
BUG_ON
(
!
pgsize
);
return
domain
->
ops
->
map
(
domain
,
iova
,
paddr
,
gfp_order
,
prot
);
/* pick the biggest page */
pgsize_idx
=
__fls
(
pgsize
);
pgsize
=
1UL
<<
pgsize_idx
;
pr_debug
(
"mapping: iova 0x%lx pa 0x%lx pgsize %lu
\n
"
,
iova
,
(
unsigned
long
)
paddr
,
pgsize
);
ret
=
domain
->
ops
->
map
(
domain
,
iova
,
paddr
,
pgsize
,
prot
);
if
(
ret
)
break
;
iova
+=
pgsize
;
paddr
+=
pgsize
;
size
-=
pgsize
;
}
/* unroll mapping in case something went wrong */
if
(
ret
)
iommu_unmap
(
domain
,
orig_iova
,
orig_size
-
size
);
return
ret
;
}
}
EXPORT_SYMBOL_GPL
(
iommu_map
);
EXPORT_SYMBOL_GPL
(
iommu_map
);
int
iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
gfp_order
)
size_t
iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
size_t
size
)
{
{
size_t
size
;
size_t
unmapped_page
,
unmapped
=
0
;
unsigned
int
min_pagesz
;
if
(
unlikely
(
domain
->
ops
->
unmap
==
NULL
))
if
(
unlikely
(
domain
->
ops
->
unmap
==
NULL
))
return
-
ENODEV
;
return
-
ENODEV
;
size
=
PAGE_SIZE
<<
gfp_order
;
/* find out the minimum page size supported */
min_pagesz
=
1
<<
__ffs
(
domain
->
ops
->
pgsize_bitmap
);
BUG_ON
(
!
IS_ALIGNED
(
iova
,
size
));
/*
return
domain
->
ops
->
unmap
(
domain
,
iova
,
gfp_order
);
* The virtual address, as well as the size of the mapping, must be
* aligned (at least) to the size of the smallest page supported
* by the hardware
*/
if
(
!
IS_ALIGNED
(
iova
|
size
,
min_pagesz
))
{
pr_err
(
"unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x
\n
"
,
iova
,
(
unsigned
long
)
size
,
min_pagesz
);
return
-
EINVAL
;
}
pr_debug
(
"unmap this: iova 0x%lx size 0x%lx
\n
"
,
iova
,
(
unsigned
long
)
size
);
/*
* Keep iterating until we either unmap 'size' bytes (or more)
* or we hit an area that isn't mapped.
*/
while
(
unmapped
<
size
)
{
size_t
left
=
size
-
unmapped
;
unmapped_page
=
domain
->
ops
->
unmap
(
domain
,
iova
,
left
);
if
(
!
unmapped_page
)
break
;
pr_debug
(
"unmapped: iova 0x%lx size %lx
\n
"
,
iova
,
(
unsigned
long
)
unmapped_page
);
iova
+=
unmapped_page
;
unmapped
+=
unmapped_page
;
}
return
unmapped
;
}
}
EXPORT_SYMBOL_GPL
(
iommu_unmap
);
EXPORT_SYMBOL_GPL
(
iommu_unmap
);
drivers/iommu/msm_iommu.c
浏览文件 @
a06ec394
...
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
...
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
static
int
msm_iommu_tex_class
[
4
];
static
int
msm_iommu_tex_class
[
4
];
DEFINE_SPINLOCK
(
msm_iommu_lock
);
DEFINE_SPINLOCK
(
msm_iommu_lock
);
...
@@ -352,7 +355,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
...
@@ -352,7 +355,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
}
}
static
int
msm_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
va
,
static
int
msm_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
va
,
phys_addr_t
pa
,
int
order
,
int
prot
)
phys_addr_t
pa
,
size_t
len
,
int
prot
)
{
{
struct
msm_priv
*
priv
;
struct
msm_priv
*
priv
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
...
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned
long
*
sl_pte
;
unsigned
long
*
sl_pte
;
unsigned
long
sl_offset
;
unsigned
long
sl_offset
;
unsigned
int
pgprot
;
unsigned
int
pgprot
;
size_t
len
=
0x1000UL
<<
order
;
int
ret
=
0
,
tex
,
sh
;
int
ret
=
0
,
tex
,
sh
;
spin_lock_irqsave
(
&
msm_iommu_lock
,
flags
);
spin_lock_irqsave
(
&
msm_iommu_lock
,
flags
);
...
@@ -463,8 +465,8 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
...
@@ -463,8 +465,8 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
return
ret
;
return
ret
;
}
}
static
in
t
msm_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
va
,
static
size_
t
msm_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
va
,
int
order
)
size_t
len
)
{
{
struct
msm_priv
*
priv
;
struct
msm_priv
*
priv
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
...
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
unsigned
long
*
sl_table
;
unsigned
long
*
sl_table
;
unsigned
long
*
sl_pte
;
unsigned
long
*
sl_pte
;
unsigned
long
sl_offset
;
unsigned
long
sl_offset
;
size_t
len
=
0x1000UL
<<
order
;
int
i
,
ret
=
0
;
int
i
,
ret
=
0
;
spin_lock_irqsave
(
&
msm_iommu_lock
,
flags
);
spin_lock_irqsave
(
&
msm_iommu_lock
,
flags
);
...
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
...
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
ret
=
__flush_iotlb
(
domain
);
ret
=
__flush_iotlb
(
domain
);
/*
* the IOMMU API requires us to return the order of the unmapped
* page (on success).
*/
if
(
!
ret
)
ret
=
order
;
fail:
fail:
spin_unlock_irqrestore
(
&
msm_iommu_lock
,
flags
);
spin_unlock_irqrestore
(
&
msm_iommu_lock
,
flags
);
return
ret
;
/* the IOMMU API requires us to return how many bytes were unmapped */
len
=
ret
?
0
:
len
;
return
len
;
}
}
static
phys_addr_t
msm_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
static
phys_addr_t
msm_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
...
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
...
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
.
map
=
msm_iommu_map
,
.
map
=
msm_iommu_map
,
.
unmap
=
msm_iommu_unmap
,
.
unmap
=
msm_iommu_unmap
,
.
iova_to_phys
=
msm_iommu_iova_to_phys
,
.
iova_to_phys
=
msm_iommu_iova_to_phys
,
.
domain_has_cap
=
msm_iommu_domain_has_cap
.
domain_has_cap
=
msm_iommu_domain_has_cap
,
.
pgsize_bitmap
=
MSM_IOMMU_PGSIZES
,
};
};
static
int
__init
get_tex_class
(
int
icp
,
int
ocp
,
int
mt
,
int
nos
)
static
int
__init
get_tex_class
(
int
icp
,
int
ocp
,
int
mt
,
int
nos
)
...
...
drivers/iommu/omap-iommu.c
浏览文件 @
a06ec394
...
@@ -33,6 +33,9 @@
...
@@ -33,6 +33,9 @@
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
__i++)
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
/**
/**
* struct omap_iommu_domain - omap iommu domain
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
* @pgtable: the page table
...
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
...
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
}
}
static
int
omap_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
da
,
static
int
omap_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
da
,
phys_addr_t
pa
,
int
order
,
int
prot
)
phys_addr_t
pa
,
size_t
bytes
,
int
prot
)
{
{
struct
omap_iommu_domain
*
omap_domain
=
domain
->
priv
;
struct
omap_iommu_domain
*
omap_domain
=
domain
->
priv
;
struct
omap_iommu
*
oiommu
=
omap_domain
->
iommu_dev
;
struct
omap_iommu
*
oiommu
=
omap_domain
->
iommu_dev
;
struct
device
*
dev
=
oiommu
->
dev
;
struct
device
*
dev
=
oiommu
->
dev
;
size_t
bytes
=
PAGE_SIZE
<<
order
;
struct
iotlb_entry
e
;
struct
iotlb_entry
e
;
int
omap_pgsz
;
int
omap_pgsz
;
u32
ret
,
flags
;
u32
ret
,
flags
;
...
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
...
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return
ret
;
return
ret
;
}
}
static
in
t
omap_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
da
,
static
size_
t
omap_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
da
,
int
order
)
size_t
size
)
{
{
struct
omap_iommu_domain
*
omap_domain
=
domain
->
priv
;
struct
omap_iommu_domain
*
omap_domain
=
domain
->
priv
;
struct
omap_iommu
*
oiommu
=
omap_domain
->
iommu_dev
;
struct
omap_iommu
*
oiommu
=
omap_domain
->
iommu_dev
;
struct
device
*
dev
=
oiommu
->
dev
;
struct
device
*
dev
=
oiommu
->
dev
;
size_t
unmap_size
;
dev_dbg
(
dev
,
"unmapping da 0x%lx order %d
\n
"
,
da
,
order
);
unmap_size
=
iopgtable_clear_entry
(
oiommu
,
da
);
dev_dbg
(
dev
,
"unmapping da 0x%lx size %u
\n
"
,
da
,
size
);
return
unmap_size
?
get_order
(
unmap_size
)
:
-
EINVAL
;
return
iopgtable_clear_entry
(
oiommu
,
da
)
;
}
}
static
int
static
int
...
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
...
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
.
unmap
=
omap_iommu_unmap
,
.
unmap
=
omap_iommu_unmap
,
.
iova_to_phys
=
omap_iommu_iova_to_phys
,
.
iova_to_phys
=
omap_iommu_iova_to_phys
,
.
domain_has_cap
=
omap_iommu_domain_has_cap
,
.
domain_has_cap
=
omap_iommu_domain_has_cap
,
.
pgsize_bitmap
=
OMAP_IOMMU_PGSIZES
,
};
};
static
int
__init
omap_iommu_init
(
void
)
static
int
__init
omap_iommu_init
(
void
)
...
...
drivers/iommu/omap-iovmm.c
浏览文件 @
a06ec394
...
@@ -410,7 +410,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
...
@@ -410,7 +410,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
unsigned
int
i
,
j
;
unsigned
int
i
,
j
;
struct
scatterlist
*
sg
;
struct
scatterlist
*
sg
;
u32
da
=
new
->
da_start
;
u32
da
=
new
->
da_start
;
int
order
;
if
(
!
domain
||
!
sgt
)
if
(
!
domain
||
!
sgt
)
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -429,12 +428,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
...
@@ -429,12 +428,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
if
(
bytes_to_iopgsz
(
bytes
)
<
0
)
if
(
bytes_to_iopgsz
(
bytes
)
<
0
)
goto
err_out
;
goto
err_out
;
order
=
get_order
(
bytes
);
pr_debug
(
"%s: [%d] %08x %08x(%x)
\n
"
,
__func__
,
pr_debug
(
"%s: [%d] %08x %08x(%x)
\n
"
,
__func__
,
i
,
da
,
pa
,
bytes
);
i
,
da
,
pa
,
bytes
);
err
=
iommu_map
(
domain
,
da
,
pa
,
order
,
flags
);
err
=
iommu_map
(
domain
,
da
,
pa
,
bytes
,
flags
);
if
(
err
)
if
(
err
)
goto
err_out
;
goto
err_out
;
...
@@ -449,10 +446,9 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
...
@@ -449,10 +446,9 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
size_t
bytes
;
size_t
bytes
;
bytes
=
sg
->
length
+
sg
->
offset
;
bytes
=
sg
->
length
+
sg
->
offset
;
order
=
get_order
(
bytes
);
/* ignore failures.. we're already handling one */
/* ignore failures.. we're already handling one */
iommu_unmap
(
domain
,
da
,
order
);
iommu_unmap
(
domain
,
da
,
bytes
);
da
+=
bytes
;
da
+=
bytes
;
}
}
...
@@ -467,7 +463,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
...
@@ -467,7 +463,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t
total
=
area
->
da_end
-
area
->
da_start
;
size_t
total
=
area
->
da_end
-
area
->
da_start
;
const
struct
sg_table
*
sgt
=
area
->
sgt
;
const
struct
sg_table
*
sgt
=
area
->
sgt
;
struct
scatterlist
*
sg
;
struct
scatterlist
*
sg
;
int
i
,
err
;
int
i
;
size_t
unmapped
;
BUG_ON
(
!
sgtable_ok
(
sgt
));
BUG_ON
(
!
sgtable_ok
(
sgt
));
BUG_ON
((
!
total
)
||
!
IS_ALIGNED
(
total
,
PAGE_SIZE
));
BUG_ON
((
!
total
)
||
!
IS_ALIGNED
(
total
,
PAGE_SIZE
));
...
@@ -475,13 +472,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
...
@@ -475,13 +472,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
start
=
area
->
da_start
;
start
=
area
->
da_start
;
for_each_sg
(
sgt
->
sgl
,
sg
,
sgt
->
nents
,
i
)
{
for_each_sg
(
sgt
->
sgl
,
sg
,
sgt
->
nents
,
i
)
{
size_t
bytes
;
size_t
bytes
;
int
order
;
bytes
=
sg
->
length
+
sg
->
offset
;
bytes
=
sg
->
length
+
sg
->
offset
;
order
=
get_order
(
bytes
);
err
=
iommu_unmap
(
domain
,
start
,
order
);
unmapped
=
iommu_unmap
(
domain
,
start
,
bytes
);
if
(
err
<
0
)
if
(
unmapped
<
bytes
)
break
;
break
;
dev_dbg
(
obj
->
dev
,
"%s: unmap %08x(%x) %08x
\n
"
,
dev_dbg
(
obj
->
dev
,
"%s: unmap %08x(%x) %08x
\n
"
,
...
...
include/linux/iommu.h
浏览文件 @
a06ec394
...
@@ -48,19 +48,33 @@ struct iommu_domain {
...
@@ -48,19 +48,33 @@ struct iommu_domain {
#ifdef CONFIG_IOMMU_API
#ifdef CONFIG_IOMMU_API
/**
* struct iommu_ops - iommu ops and capabilities
* @domain_init: init iommu domain
* @domain_destroy: destroy iommu domain
* @attach_dev: attach device to an iommu domain
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @iova_to_phys: translate iova to physical address
* @domain_has_cap: domain capabilities query
* @commit: commit iommu domain
* @pgsize_bitmap: bitmap of supported page sizes
*/
struct
iommu_ops
{
struct
iommu_ops
{
int
(
*
domain_init
)(
struct
iommu_domain
*
domain
);
int
(
*
domain_init
)(
struct
iommu_domain
*
domain
);
void
(
*
domain_destroy
)(
struct
iommu_domain
*
domain
);
void
(
*
domain_destroy
)(
struct
iommu_domain
*
domain
);
int
(
*
attach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
int
(
*
attach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
void
(
*
detach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
void
(
*
detach_dev
)(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
int
(
*
map
)(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
(
*
map
)(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
phys_addr_t
paddr
,
int
gfp_order
,
int
prot
);
phys_addr_t
paddr
,
size_t
size
,
int
prot
);
in
t
(
*
unmap
)(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
size_
t
(
*
unmap
)(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
gfp_order
);
size_t
size
);
phys_addr_t
(
*
iova_to_phys
)(
struct
iommu_domain
*
domain
,
phys_addr_t
(
*
iova_to_phys
)(
struct
iommu_domain
*
domain
,
unsigned
long
iova
);
unsigned
long
iova
);
int
(
*
domain_has_cap
)(
struct
iommu_domain
*
domain
,
int
(
*
domain_has_cap
)(
struct
iommu_domain
*
domain
,
unsigned
long
cap
);
unsigned
long
cap
);
unsigned
long
pgsize_bitmap
;
};
};
extern
int
bus_set_iommu
(
struct
bus_type
*
bus
,
struct
iommu_ops
*
ops
);
extern
int
bus_set_iommu
(
struct
bus_type
*
bus
,
struct
iommu_ops
*
ops
);
...
@@ -72,9 +86,9 @@ extern int iommu_attach_device(struct iommu_domain *domain,
...
@@ -72,9 +86,9 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern
void
iommu_detach_device
(
struct
iommu_domain
*
domain
,
extern
void
iommu_detach_device
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
);
struct
device
*
dev
);
extern
int
iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
extern
int
iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
phys_addr_t
paddr
,
int
gfp_order
,
int
prot
);
phys_addr_t
paddr
,
size_t
size
,
int
prot
);
extern
in
t
iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
extern
size_
t
iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
int
gfp_order
);
size_t
size
);
extern
phys_addr_t
iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
extern
phys_addr_t
iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
);
unsigned
long
iova
);
extern
int
iommu_domain_has_cap
(
struct
iommu_domain
*
domain
,
extern
int
iommu_domain_has_cap
(
struct
iommu_domain
*
domain
,
...
...
virt/kvm/iommu.c
浏览文件 @
a06ec394
...
@@ -113,7 +113,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
...
@@ -113,7 +113,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
/* Map into IO address space */
/* Map into IO address space */
r
=
iommu_map
(
domain
,
gfn_to_gpa
(
gfn
),
pfn_to_hpa
(
pfn
),
r
=
iommu_map
(
domain
,
gfn_to_gpa
(
gfn
),
pfn_to_hpa
(
pfn
),
get_order
(
page_size
)
,
flags
);
page_size
,
flags
);
if
(
r
)
{
if
(
r
)
{
printk
(
KERN_ERR
"kvm_iommu_map_address:"
printk
(
KERN_ERR
"kvm_iommu_map_address:"
"iommu failed to map pfn=%llx
\n
"
,
pfn
);
"iommu failed to map pfn=%llx
\n
"
,
pfn
);
...
@@ -292,15 +292,15 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
...
@@ -292,15 +292,15 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
while
(
gfn
<
end_gfn
)
{
while
(
gfn
<
end_gfn
)
{
unsigned
long
unmap_pages
;
unsigned
long
unmap_pages
;
int
order
;
size_t
size
;
/* Get physical address */
/* Get physical address */
phys
=
iommu_iova_to_phys
(
domain
,
gfn_to_gpa
(
gfn
));
phys
=
iommu_iova_to_phys
(
domain
,
gfn_to_gpa
(
gfn
));
pfn
=
phys
>>
PAGE_SHIFT
;
pfn
=
phys
>>
PAGE_SHIFT
;
/* Unmap address from IO address space */
/* Unmap address from IO address space */
order
=
iommu_unmap
(
domain
,
gfn_to_gpa
(
gfn
),
0
);
size
=
iommu_unmap
(
domain
,
gfn_to_gpa
(
gfn
),
PAGE_SIZE
);
unmap_pages
=
1ULL
<<
order
;
unmap_pages
=
1ULL
<<
get_order
(
size
)
;
/* Unpin all pages we just unmapped to not leak any memory */
/* Unpin all pages we just unmapped to not leak any memory */
kvm_unpin_pages
(
kvm
,
pfn
,
unmap_pages
);
kvm_unpin_pages
(
kvm
,
pfn
,
unmap_pages
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录