Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
7de47306
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
7de47306
编写于
3月 22, 2012
作者:
J
Joerg Roedel
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'iommu/fixes', 'arm/tegra' and 'x86/amd' into next
上级
c16fa4f2
09c32533
ae295142
变更
10
展开全部
隐藏空白更改
内联
并排
Showing
10 changed file
with
1709 addition
and
68 deletion
+1709
-68
MAINTAINERS
MAINTAINERS
+1
-1
arch/arm/mach-tegra/include/mach/smmu.h
arch/arm/mach-tegra/include/mach/smmu.h
+63
-0
drivers/iommu/Kconfig
drivers/iommu/Kconfig
+20
-0
drivers/iommu/Makefile
drivers/iommu/Makefile
+2
-0
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.c
+1
-1
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_init.c
+123
-64
drivers/iommu/amd_iommu_v2.c
drivers/iommu/amd_iommu_v2.c
+13
-1
drivers/iommu/tegra-gart.c
drivers/iommu/tegra-gart.c
+451
-0
drivers/iommu/tegra-smmu.c
drivers/iommu/tegra-smmu.c
+1034
-0
include/linux/amd-iommu.h
include/linux/amd-iommu.h
+1
-1
未找到文件。
MAINTAINERS
浏览文件 @
7de47306
...
@@ -503,7 +503,7 @@ F: arch/x86/include/asm/geode.h
...
@@ -503,7 +503,7 @@ F: arch/x86/include/asm/geode.h
AMD IOMMU (AMD-VI)
AMD IOMMU (AMD-VI)
M: Joerg Roedel <joerg.roedel@amd.com>
M: Joerg Roedel <joerg.roedel@amd.com>
L: iommu@lists.linux-foundation.org
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/
linux-2.6-
iommu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Supported
S: Supported
F: drivers/iommu/amd_iommu*.[ch]
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
F: include/linux/amd-iommu.h
...
...
arch/arm/mach-tegra/include/mach/smmu.h
0 → 100644
浏览文件 @
7de47306
/*
* IOMMU API for SMMU in Tegra30
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MACH_SMMU_H
#define MACH_SMMU_H
enum
smmu_hwgrp
{
HWGRP_AFI
,
HWGRP_AVPC
,
HWGRP_DC
,
HWGRP_DCB
,
HWGRP_EPP
,
HWGRP_G2
,
HWGRP_HC
,
HWGRP_HDA
,
HWGRP_ISP
,
HWGRP_MPE
,
HWGRP_NV
,
HWGRP_NV2
,
HWGRP_PPCS
,
HWGRP_SATA
,
HWGRP_VDE
,
HWGRP_VI
,
HWGRP_COUNT
,
HWGRP_END
=
~
0
,
};
#define HWG_AFI (1 << HWGRP_AFI)
#define HWG_AVPC (1 << HWGRP_AVPC)
#define HWG_DC (1 << HWGRP_DC)
#define HWG_DCB (1 << HWGRP_DCB)
#define HWG_EPP (1 << HWGRP_EPP)
#define HWG_G2 (1 << HWGRP_G2)
#define HWG_HC (1 << HWGRP_HC)
#define HWG_HDA (1 << HWGRP_HDA)
#define HWG_ISP (1 << HWGRP_ISP)
#define HWG_MPE (1 << HWGRP_MPE)
#define HWG_NV (1 << HWGRP_NV)
#define HWG_NV2 (1 << HWGRP_NV2)
#define HWG_PPCS (1 << HWGRP_PPCS)
#define HWG_SATA (1 << HWGRP_SATA)
#define HWG_VDE (1 << HWGRP_VDE)
#define HWG_VI (1 << HWGRP_VI)
#endif
/* MACH_SMMU_H */
drivers/iommu/Kconfig
浏览文件 @
7de47306
...
@@ -142,4 +142,24 @@ config OMAP_IOMMU_DEBUG
...
@@ -142,4 +142,24 @@ config OMAP_IOMMU_DEBUG
Say N unless you know you need this.
Say N unless you know you need this.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
shared with the operating system into contiguous I/O virtual
space through the GART (Graphics Address Relocation Table)
hardware included on Tegra SoCs.
config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support"
depends on ARCH_TEGRA_3x_SOC
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
shared with the operating system into contiguous I/O virtual
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
endif # IOMMU_SUPPORT
endif # IOMMU_SUPPORT
drivers/iommu/Makefile
浏览文件 @
7de47306
...
@@ -8,3 +8,5 @@ obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
...
@@ -8,3 +8,5 @@ obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
obj-$(CONFIG_OMAP_IOMMU)
+=
omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU)
+=
omap-iommu.o
obj-$(CONFIG_OMAP_IOVMM)
+=
omap-iovmm.o
obj-$(CONFIG_OMAP_IOVMM)
+=
omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG)
+=
omap-iommu-debug.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG)
+=
omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART)
+=
tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU)
+=
tegra-smmu.o
drivers/iommu/amd_iommu.c
浏览文件 @
7de47306
...
@@ -2804,7 +2804,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
...
@@ -2804,7 +2804,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
* we don't need to preallocate the protection domains anymore.
* we don't need to preallocate the protection domains anymore.
* For now we have to.
* For now we have to.
*/
*/
static
void
prealloc_protection_domains
(
void
)
static
void
__init
prealloc_protection_domains
(
void
)
{
{
struct
iommu_dev_data
*
dev_data
;
struct
iommu_dev_data
*
dev_data
;
struct
dma_ops_domain
*
dma_dom
;
struct
dma_ops_domain
*
dma_dom
;
...
...
drivers/iommu/amd_iommu_init.c
浏览文件 @
7de47306
...
@@ -196,6 +196,8 @@ static u32 rlookup_table_size; /* size if the rlookup table */
...
@@ -196,6 +196,8 @@ static u32 rlookup_table_size; /* size if the rlookup table */
*/
*/
extern
void
iommu_flush_all_caches
(
struct
amd_iommu
*
iommu
);
extern
void
iommu_flush_all_caches
(
struct
amd_iommu
*
iommu
);
static
int
amd_iommu_enable_interrupts
(
void
);
static
inline
void
update_last_devid
(
u16
devid
)
static
inline
void
update_last_devid
(
u16
devid
)
{
{
if
(
devid
>
amd_iommu_last_bdf
)
if
(
devid
>
amd_iommu_last_bdf
)
...
@@ -358,8 +360,6 @@ static void iommu_disable(struct amd_iommu *iommu)
...
@@ -358,8 +360,6 @@ static void iommu_disable(struct amd_iommu *iommu)
*/
*/
static
u8
*
__init
iommu_map_mmio_space
(
u64
address
)
static
u8
*
__init
iommu_map_mmio_space
(
u64
address
)
{
{
u8
*
ret
;
if
(
!
request_mem_region
(
address
,
MMIO_REGION_LENGTH
,
"amd_iommu"
))
{
if
(
!
request_mem_region
(
address
,
MMIO_REGION_LENGTH
,
"amd_iommu"
))
{
pr_err
(
"AMD-Vi: Can not reserve memory region %llx for mmio
\n
"
,
pr_err
(
"AMD-Vi: Can not reserve memory region %llx for mmio
\n
"
,
address
);
address
);
...
@@ -367,13 +367,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
...
@@ -367,13 +367,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
return
NULL
;
return
NULL
;
}
}
ret
=
ioremap_nocache
(
address
,
MMIO_REGION_LENGTH
);
return
ioremap_nocache
(
address
,
MMIO_REGION_LENGTH
);
if
(
ret
!=
NULL
)
return
ret
;
release_mem_region
(
address
,
MMIO_REGION_LENGTH
);
return
NULL
;
}
}
static
void
__init
iommu_unmap_mmio_space
(
struct
amd_iommu
*
iommu
)
static
void
__init
iommu_unmap_mmio_space
(
struct
amd_iommu
*
iommu
)
...
@@ -1131,8 +1125,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
...
@@ -1131,8 +1125,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
{
{
int
r
;
int
r
;
if
(
pci_enable_msi
(
iommu
->
dev
))
r
=
pci_enable_msi
(
iommu
->
dev
);
return
1
;
if
(
r
)
return
r
;
r
=
request_threaded_irq
(
iommu
->
dev
->
irq
,
r
=
request_threaded_irq
(
iommu
->
dev
->
irq
,
amd_iommu_int_handler
,
amd_iommu_int_handler
,
...
@@ -1142,27 +1137,36 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
...
@@ -1142,27 +1137,36 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
if
(
r
)
{
if
(
r
)
{
pci_disable_msi
(
iommu
->
dev
);
pci_disable_msi
(
iommu
->
dev
);
return
1
;
return
r
;
}
}
iommu
->
int_enabled
=
true
;
iommu
->
int_enabled
=
true
;
iommu_feature_enable
(
iommu
,
CONTROL_EVT_INT_EN
);
if
(
iommu
->
ppr_log
!=
NULL
)
iommu_feature_enable
(
iommu
,
CONTROL_PPFINT_EN
);
return
0
;
return
0
;
}
}
static
int
iommu_init_msi
(
struct
amd_iommu
*
iommu
)
static
int
iommu_init_msi
(
struct
amd_iommu
*
iommu
)
{
{
int
ret
;
if
(
iommu
->
int_enabled
)
if
(
iommu
->
int_enabled
)
return
0
;
goto
enable_faults
;
if
(
pci_find_capability
(
iommu
->
dev
,
PCI_CAP_ID_MSI
))
if
(
pci_find_capability
(
iommu
->
dev
,
PCI_CAP_ID_MSI
))
return
iommu_setup_msi
(
iommu
);
ret
=
iommu_setup_msi
(
iommu
);
else
ret
=
-
ENODEV
;
return
1
;
if
(
ret
)
return
ret
;
enable_faults:
iommu_feature_enable
(
iommu
,
CONTROL_EVT_INT_EN
);
if
(
iommu
->
ppr_log
!=
NULL
)
iommu_feature_enable
(
iommu
,
CONTROL_PPFINT_EN
);
return
0
;
}
}
/****************************************************************************
/****************************************************************************
...
@@ -1381,7 +1385,6 @@ static void enable_iommus(void)
...
@@ -1381,7 +1385,6 @@ static void enable_iommus(void)
iommu_enable_ppr_log
(
iommu
);
iommu_enable_ppr_log
(
iommu
);
iommu_enable_gt
(
iommu
);
iommu_enable_gt
(
iommu
);
iommu_set_exclusion_range
(
iommu
);
iommu_set_exclusion_range
(
iommu
);
iommu_init_msi
(
iommu
);
iommu_enable
(
iommu
);
iommu_enable
(
iommu
);
iommu_flush_all_caches
(
iommu
);
iommu_flush_all_caches
(
iommu
);
}
}
...
@@ -1409,6 +1412,8 @@ static void amd_iommu_resume(void)
...
@@ -1409,6 +1412,8 @@ static void amd_iommu_resume(void)
/* re-load the hardware */
/* re-load the hardware */
enable_iommus
();
enable_iommus
();
amd_iommu_enable_interrupts
();
}
}
static
int
amd_iommu_suspend
(
void
)
static
int
amd_iommu_suspend
(
void
)
...
@@ -1424,10 +1429,40 @@ static struct syscore_ops amd_iommu_syscore_ops = {
...
@@ -1424,10 +1429,40 @@ static struct syscore_ops amd_iommu_syscore_ops = {
.
resume
=
amd_iommu_resume
,
.
resume
=
amd_iommu_resume
,
};
};
static
void
__init
free_on_init_error
(
void
)
{
amd_iommu_uninit_devices
();
free_pages
((
unsigned
long
)
amd_iommu_pd_alloc_bitmap
,
get_order
(
MAX_DOMAIN_ID
/
8
));
free_pages
((
unsigned
long
)
amd_iommu_rlookup_table
,
get_order
(
rlookup_table_size
));
free_pages
((
unsigned
long
)
amd_iommu_alias_table
,
get_order
(
alias_table_size
));
free_pages
((
unsigned
long
)
amd_iommu_dev_table
,
get_order
(
dev_table_size
));
free_iommu_all
();
free_unity_maps
();
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
* if possible.
*/
gart_iommu_init
();
#endif
}
/*
/*
* This is the
core init function for AMD IOMMU hardware
in the system.
* This is the
hardware init function for AMD IOMMU
in the system.
* This function is called
from the generic x86 DMA layer initialization
* This function is called
either from amd_iommu_init or from the interrupt
* code.
*
remapping setup
code.
*
*
* This function basically parses the ACPI table for AMD IOMMU (IVRS)
* This function basically parses the ACPI table for AMD IOMMU (IVRS)
* three times:
* three times:
...
@@ -1446,16 +1481,21 @@ static struct syscore_ops amd_iommu_syscore_ops = {
...
@@ -1446,16 +1481,21 @@ static struct syscore_ops amd_iommu_syscore_ops = {
* remapping requirements parsed out of the ACPI table in
* remapping requirements parsed out of the ACPI table in
* this last pass.
* this last pass.
*
*
* After that the hardware is initialized and ready to go. In the last
* After everything is set up the IOMMUs are enabled and the necessary
* step we do some Linux specific things like registering the driver in
* hotplug and suspend notifiers are registered.
* the dma_ops interface and initializing the suspend/resume support
* functions. Finally it prints some information about AMD IOMMUs and
* the driver state and enables the hardware.
*/
*/
static
int
__init
amd_iommu_init
(
void
)
int
__init
amd_iommu_init_hardware
(
void
)
{
{
int
i
,
ret
=
0
;
int
i
,
ret
=
0
;
if
(
!
amd_iommu_detected
)
return
-
ENODEV
;
if
(
amd_iommu_dev_table
!=
NULL
)
{
/* Hardware already initialized */
return
0
;
}
/*
/*
* First parse ACPI tables to find the largest Bus/Dev/Func
* First parse ACPI tables to find the largest Bus/Dev/Func
* we need to handle. Upon this information the shared data
* we need to handle. Upon this information the shared data
...
@@ -1472,9 +1512,8 @@ static int __init amd_iommu_init(void)
...
@@ -1472,9 +1512,8 @@ static int __init amd_iommu_init(void)
alias_table_size
=
tbl_size
(
ALIAS_TABLE_ENTRY_SIZE
);
alias_table_size
=
tbl_size
(
ALIAS_TABLE_ENTRY_SIZE
);
rlookup_table_size
=
tbl_size
(
RLOOKUP_TABLE_ENTRY_SIZE
);
rlookup_table_size
=
tbl_size
(
RLOOKUP_TABLE_ENTRY_SIZE
);
ret
=
-
ENOMEM
;
/* Device table - directly used by all IOMMUs */
/* Device table - directly used by all IOMMUs */
ret
=
-
ENOMEM
;
amd_iommu_dev_table
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
|
__GFP_ZERO
,
amd_iommu_dev_table
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
|
__GFP_ZERO
,
get_order
(
dev_table_size
));
get_order
(
dev_table_size
));
if
(
amd_iommu_dev_table
==
NULL
)
if
(
amd_iommu_dev_table
==
NULL
)
...
@@ -1546,20 +1585,65 @@ static int __init amd_iommu_init(void)
...
@@ -1546,20 +1585,65 @@ static int __init amd_iommu_init(void)
enable_iommus
();
enable_iommus
();
amd_iommu_init_notifier
();
register_syscore_ops
(
&
amd_iommu_syscore_ops
);
out:
return
ret
;
free:
free_on_init_error
();
return
ret
;
}
static
int
amd_iommu_enable_interrupts
(
void
)
{
struct
amd_iommu
*
iommu
;
int
ret
=
0
;
for_each_iommu
(
iommu
)
{
ret
=
iommu_init_msi
(
iommu
);
if
(
ret
)
goto
out
;
}
out:
return
ret
;
}
/*
* This is the core init function for AMD IOMMU hardware in the system.
* This function is called from the generic x86 DMA layer initialization
* code.
*
* The function calls amd_iommu_init_hardware() to setup and enable the
* IOMMU hardware if this has not happened yet. After that the driver
* registers for the DMA-API and for the IOMMU-API as necessary.
*/
static
int
__init
amd_iommu_init
(
void
)
{
int
ret
=
0
;
ret
=
amd_iommu_init_hardware
();
if
(
ret
)
goto
out
;
ret
=
amd_iommu_enable_interrupts
();
if
(
ret
)
goto
free
;
if
(
iommu_pass_through
)
if
(
iommu_pass_through
)
ret
=
amd_iommu_init_passthrough
();
ret
=
amd_iommu_init_passthrough
();
else
else
ret
=
amd_iommu_init_dma_ops
();
ret
=
amd_iommu_init_dma_ops
();
if
(
ret
)
if
(
ret
)
goto
free
_disable
;
goto
free
;
amd_iommu_init_api
();
amd_iommu_init_api
();
amd_iommu_init_notifier
();
register_syscore_ops
(
&
amd_iommu_syscore_ops
);
if
(
iommu_pass_through
)
if
(
iommu_pass_through
)
goto
out
;
goto
out
;
...
@@ -1569,39 +1653,14 @@ static int __init amd_iommu_init(void)
...
@@ -1569,39 +1653,14 @@ static int __init amd_iommu_init(void)
printk
(
KERN_INFO
"AMD-Vi: Lazy IO/TLB flushing enabled
\n
"
);
printk
(
KERN_INFO
"AMD-Vi: Lazy IO/TLB flushing enabled
\n
"
);
x86_platform
.
iommu_shutdown
=
disable_iommus
;
x86_platform
.
iommu_shutdown
=
disable_iommus
;
out:
out:
return
ret
;
return
ret
;
free_disable:
disable_iommus
();
free:
free:
amd_iommu_uninit_devices
();
disable_iommus
();
free_pages
((
unsigned
long
)
amd_iommu_pd_alloc_bitmap
,
get_order
(
MAX_DOMAIN_ID
/
8
));
free_pages
((
unsigned
long
)
amd_iommu_rlookup_table
,
get_order
(
rlookup_table_size
));
free_pages
((
unsigned
long
)
amd_iommu_alias_table
,
get_order
(
alias_table_size
));
free_pages
((
unsigned
long
)
amd_iommu_dev_table
,
get_order
(
dev_table_size
));
free_iommu_all
();
free_unity_maps
();
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
* if possible.
*/
gart_iommu_init
();
#endif
free_on_init_error
();
goto
out
;
goto
out
;
}
}
...
...
drivers/iommu/amd_iommu_v2.c
浏览文件 @
7de47306
...
@@ -921,7 +921,16 @@ static int __init amd_iommu_v2_init(void)
...
@@ -921,7 +921,16 @@ static int __init amd_iommu_v2_init(void)
size_t
state_table_size
;
size_t
state_table_size
;
int
ret
;
int
ret
;
pr_info
(
"AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>"
);
pr_info
(
"AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>
\n
"
);
if
(
!
amd_iommu_v2_supported
())
{
pr_info
(
"AMD IOMMUv2 functionality not available on this sytem
\n
"
);
/*
* Load anyway to provide the symbols to other modules
* which may use AMD IOMMUv2 optionally.
*/
return
0
;
}
spin_lock_init
(
&
state_lock
);
spin_lock_init
(
&
state_lock
);
...
@@ -961,6 +970,9 @@ static void __exit amd_iommu_v2_exit(void)
...
@@ -961,6 +970,9 @@ static void __exit amd_iommu_v2_exit(void)
size_t
state_table_size
;
size_t
state_table_size
;
int
i
;
int
i
;
if
(
!
amd_iommu_v2_supported
())
return
;
profile_event_unregister
(
PROFILE_TASK_EXIT
,
&
profile_nb
);
profile_event_unregister
(
PROFILE_TASK_EXIT
,
&
profile_nb
);
amd_iommu_unregister_ppr_notifier
(
&
ppr_nb
);
amd_iommu_unregister_ppr_notifier
(
&
ppr_nb
);
...
...
drivers/iommu/tegra-gart.c
0 → 100644
浏览文件 @
7de47306
/*
* IOMMU API for GART in Tegra20
*
* Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define pr_fmt(fmt) "%s(): " fmt, __func__
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
#define GART_CONFIG 0x24
#define GART_ENTRY_ADDR 0x28
#define GART_ENTRY_DATA 0x2c
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
#define GART_PAGE_MASK \
(~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
struct
gart_client
{
struct
device
*
dev
;
struct
list_head
list
;
};
struct
gart_device
{
void
__iomem
*
regs
;
u32
*
savedata
;
u32
page_count
;
/* total remappable size */
dma_addr_t
iovmm_base
;
/* offset to vmm_area */
spinlock_t
pte_lock
;
/* for pagetable */
struct
list_head
client
;
spinlock_t
client_lock
;
/* for client list */
struct
device
*
dev
;
};
static
struct
gart_device
*
gart_handle
;
/* unique for a system */
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is
* complete before initiating activity on the PPSB block.
*/
#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
#define for_each_gart_pte(gart, iova) \
for (iova = gart->iovmm_base; \
iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
iova += GART_PAGE_SIZE)
static
inline
void
gart_set_pte
(
struct
gart_device
*
gart
,
unsigned
long
offs
,
u32
pte
)
{
writel
(
offs
,
gart
->
regs
+
GART_ENTRY_ADDR
);
writel
(
pte
,
gart
->
regs
+
GART_ENTRY_DATA
);
dev_dbg
(
gart
->
dev
,
"%s %08lx:%08x
\n
"
,
pte
?
"map"
:
"unmap"
,
offs
,
pte
&
GART_PAGE_MASK
);
}
static
inline
unsigned
long
gart_read_pte
(
struct
gart_device
*
gart
,
unsigned
long
offs
)
{
unsigned
long
pte
;
writel
(
offs
,
gart
->
regs
+
GART_ENTRY_ADDR
);
pte
=
readl
(
gart
->
regs
+
GART_ENTRY_DATA
);
return
pte
;
}
static
void
do_gart_setup
(
struct
gart_device
*
gart
,
const
u32
*
data
)
{
unsigned
long
iova
;
for_each_gart_pte
(
gart
,
iova
)
gart_set_pte
(
gart
,
iova
,
data
?
*
(
data
++
)
:
0
);
writel
(
1
,
gart
->
regs
+
GART_CONFIG
);
FLUSH_GART_REGS
(
gart
);
}
#ifdef DEBUG
static
void
gart_dump_table
(
struct
gart_device
*
gart
)
{
unsigned
long
iova
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
for_each_gart_pte
(
gart
,
iova
)
{
unsigned
long
pte
;
pte
=
gart_read_pte
(
gart
,
iova
);
dev_dbg
(
gart
->
dev
,
"%s %08lx:%08lx
\n
"
,
(
GART_ENTRY_PHYS_ADDR_VALID
&
pte
)
?
"v"
:
" "
,
iova
,
pte
&
GART_PAGE_MASK
);
}
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
}
#else
static
inline
void
gart_dump_table
(
struct
gart_device
*
gart
)
{
}
#endif
static
inline
bool
gart_iova_range_valid
(
struct
gart_device
*
gart
,
unsigned
long
iova
,
size_t
bytes
)
{
unsigned
long
iova_start
,
iova_end
,
gart_start
,
gart_end
;
iova_start
=
iova
;
iova_end
=
iova_start
+
bytes
-
1
;
gart_start
=
gart
->
iovmm_base
;
gart_end
=
gart_start
+
gart
->
page_count
*
GART_PAGE_SIZE
-
1
;
if
(
iova_start
<
gart_start
)
return
false
;
if
(
iova_end
>
gart_end
)
return
false
;
return
true
;
}
static
int
gart_iommu_attach_dev
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
struct
gart_device
*
gart
;
struct
gart_client
*
client
,
*
c
;
int
err
=
0
;
gart
=
dev_get_drvdata
(
dev
->
parent
);
if
(
!
gart
)
return
-
EINVAL
;
domain
->
priv
=
gart
;
client
=
devm_kzalloc
(
gart
->
dev
,
sizeof
(
*
c
),
GFP_KERNEL
);
if
(
!
client
)
return
-
ENOMEM
;
client
->
dev
=
dev
;
spin_lock
(
&
gart
->
client_lock
);
list_for_each_entry
(
c
,
&
gart
->
client
,
list
)
{
if
(
c
->
dev
==
dev
)
{
dev_err
(
gart
->
dev
,
"%s is already attached
\n
"
,
dev_name
(
dev
));
err
=
-
EINVAL
;
goto
fail
;
}
}
list_add
(
&
client
->
list
,
&
gart
->
client
);
spin_unlock
(
&
gart
->
client_lock
);
dev_dbg
(
gart
->
dev
,
"Attached %s
\n
"
,
dev_name
(
dev
));
return
0
;
fail:
devm_kfree
(
gart
->
dev
,
client
);
spin_unlock
(
&
gart
->
client_lock
);
return
err
;
}
static
void
gart_iommu_detach_dev
(
struct
iommu_domain
*
domain
,
struct
device
*
dev
)
{
struct
gart_device
*
gart
=
domain
->
priv
;
struct
gart_client
*
c
;
spin_lock
(
&
gart
->
client_lock
);
list_for_each_entry
(
c
,
&
gart
->
client
,
list
)
{
if
(
c
->
dev
==
dev
)
{
list_del
(
&
c
->
list
);
devm_kfree
(
gart
->
dev
,
c
);
dev_dbg
(
gart
->
dev
,
"Detached %s
\n
"
,
dev_name
(
dev
));
goto
out
;
}
}
dev_err
(
gart
->
dev
,
"Couldn't find
\n
"
);
out:
spin_unlock
(
&
gart
->
client_lock
);
}
static
int
gart_iommu_domain_init
(
struct
iommu_domain
*
domain
)
{
return
0
;
}
static
void
gart_iommu_domain_destroy
(
struct
iommu_domain
*
domain
)
{
struct
gart_device
*
gart
=
domain
->
priv
;
if
(
!
gart
)
return
;
spin_lock
(
&
gart
->
client_lock
);
if
(
!
list_empty
(
&
gart
->
client
))
{
struct
gart_client
*
c
;
list_for_each_entry
(
c
,
&
gart
->
client
,
list
)
gart_iommu_detach_dev
(
domain
,
c
->
dev
);
}
spin_unlock
(
&
gart
->
client_lock
);
domain
->
priv
=
NULL
;
}
static
int
gart_iommu_map
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
phys_addr_t
pa
,
size_t
bytes
,
int
prot
)
{
struct
gart_device
*
gart
=
domain
->
priv
;
unsigned
long
flags
;
unsigned
long
pfn
;
if
(
!
gart_iova_range_valid
(
gart
,
iova
,
bytes
))
return
-
EINVAL
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
pfn
=
__phys_to_pfn
(
pa
);
if
(
!
pfn_valid
(
pfn
))
{
dev_err
(
gart
->
dev
,
"Invalid page: %08x
\n
"
,
pa
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
return
-
EINVAL
;
}
gart_set_pte
(
gart
,
iova
,
GART_PTE
(
pfn
));
FLUSH_GART_REGS
(
gart
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
return
0
;
}
static
size_t
gart_iommu_unmap
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
,
size_t
bytes
)
{
struct
gart_device
*
gart
=
domain
->
priv
;
unsigned
long
flags
;
if
(
!
gart_iova_range_valid
(
gart
,
iova
,
bytes
))
return
0
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
gart_set_pte
(
gart
,
iova
,
0
);
FLUSH_GART_REGS
(
gart
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
return
0
;
}
static
phys_addr_t
gart_iommu_iova_to_phys
(
struct
iommu_domain
*
domain
,
unsigned
long
iova
)
{
struct
gart_device
*
gart
=
domain
->
priv
;
unsigned
long
pte
;
phys_addr_t
pa
;
unsigned
long
flags
;
if
(
!
gart_iova_range_valid
(
gart
,
iova
,
0
))
return
-
EINVAL
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
pte
=
gart_read_pte
(
gart
,
iova
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
pa
=
(
pte
&
GART_PAGE_MASK
);
if
(
!
pfn_valid
(
__phys_to_pfn
(
pa
)))
{
dev_err
(
gart
->
dev
,
"No entry for %08lx:%08x
\n
"
,
iova
,
pa
);
gart_dump_table
(
gart
);
return
-
EINVAL
;
}
return
pa
;
}
static
int
gart_iommu_domain_has_cap
(
struct
iommu_domain
*
domain
,
unsigned
long
cap
)
{
return
0
;
}
static
struct
iommu_ops
gart_iommu_ops
=
{
.
domain_init
=
gart_iommu_domain_init
,
.
domain_destroy
=
gart_iommu_domain_destroy
,
.
attach_dev
=
gart_iommu_attach_dev
,
.
detach_dev
=
gart_iommu_detach_dev
,
.
map
=
gart_iommu_map
,
.
unmap
=
gart_iommu_unmap
,
.
iova_to_phys
=
gart_iommu_iova_to_phys
,
.
domain_has_cap
=
gart_iommu_domain_has_cap
,
.
pgsize_bitmap
=
GART_IOMMU_PGSIZES
,
};
static
int
tegra_gart_suspend
(
struct
device
*
dev
)
{
struct
gart_device
*
gart
=
dev_get_drvdata
(
dev
);
unsigned
long
iova
;
u32
*
data
=
gart
->
savedata
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
for_each_gart_pte
(
gart
,
iova
)
*
(
data
++
)
=
gart_read_pte
(
gart
,
iova
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
return
0
;
}
static
int
tegra_gart_resume
(
struct
device
*
dev
)
{
struct
gart_device
*
gart
=
dev_get_drvdata
(
dev
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
gart
->
pte_lock
,
flags
);
do_gart_setup
(
gart
,
gart
->
savedata
);
spin_unlock_irqrestore
(
&
gart
->
pte_lock
,
flags
);
return
0
;
}
static
int
tegra_gart_probe
(
struct
platform_device
*
pdev
)
{
struct
gart_device
*
gart
;
struct
resource
*
res
,
*
res_remap
;
void
__iomem
*
gart_regs
;
int
err
;
struct
device
*
dev
=
&
pdev
->
dev
;
if
(
gart_handle
)
return
-
EIO
;
BUILD_BUG_ON
(
PAGE_SHIFT
!=
GART_PAGE_SHIFT
);
/* the GART memory aperture is required */
res
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
0
);
res_remap
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
1
);
if
(
!
res
||
!
res_remap
)
{
dev_err
(
dev
,
"GART memory aperture expected
\n
"
);
return
-
ENXIO
;
}
gart
=
devm_kzalloc
(
dev
,
sizeof
(
*
gart
),
GFP_KERNEL
);
if
(
!
gart
)
{
dev_err
(
dev
,
"failed to allocate gart_device
\n
"
);
return
-
ENOMEM
;
}
gart_regs
=
devm_ioremap
(
dev
,
res
->
start
,
resource_size
(
res
));
if
(
!
gart_regs
)
{
dev_err
(
dev
,
"failed to remap GART registers
\n
"
);
err
=
-
ENXIO
;
goto
fail
;
}
gart
->
dev
=
&
pdev
->
dev
;
spin_lock_init
(
&
gart
->
pte_lock
);
spin_lock_init
(
&
gart
->
client_lock
);
INIT_LIST_HEAD
(
&
gart
->
client
);
gart
->
regs
=
gart_regs
;
gart
->
iovmm_base
=
(
dma_addr_t
)
res_remap
->
start
;
gart
->
page_count
=
(
resource_size
(
res_remap
)
>>
GART_PAGE_SHIFT
);
gart
->
savedata
=
vmalloc
(
sizeof
(
u32
)
*
gart
->
page_count
);
if
(
!
gart
->
savedata
)
{
dev_err
(
dev
,
"failed to allocate context save area
\n
"
);
err
=
-
ENOMEM
;
goto
fail
;
}
platform_set_drvdata
(
pdev
,
gart
);
do_gart_setup
(
gart
,
NULL
);
gart_handle
=
gart
;
return
0
;
fail:
if
(
gart_regs
)
devm_iounmap
(
dev
,
gart_regs
);
if
(
gart
&&
gart
->
savedata
)
vfree
(
gart
->
savedata
);
devm_kfree
(
dev
,
gart
);
return
err
;
}
static
int
tegra_gart_remove
(
struct
platform_device
*
pdev
)
{
struct
gart_device
*
gart
=
platform_get_drvdata
(
pdev
);
struct
device
*
dev
=
gart
->
dev
;
writel
(
0
,
gart
->
regs
+
GART_CONFIG
);
if
(
gart
->
savedata
)
vfree
(
gart
->
savedata
);
if
(
gart
->
regs
)
devm_iounmap
(
dev
,
gart
->
regs
);
devm_kfree
(
dev
,
gart
);
gart_handle
=
NULL
;
return
0
;
}
const
struct
dev_pm_ops
tegra_gart_pm_ops
=
{
.
suspend
=
tegra_gart_suspend
,
.
resume
=
tegra_gart_resume
,
};
static
struct
platform_driver
tegra_gart_driver
=
{
.
probe
=
tegra_gart_probe
,
.
remove
=
tegra_gart_remove
,
.
driver
=
{
.
owner
=
THIS_MODULE
,
.
name
=
"tegra-gart"
,
.
pm
=
&
tegra_gart_pm_ops
,
},
};
static
int
__devinit
tegra_gart_init
(
void
)
{
bus_set_iommu
(
&
platform_bus_type
,
&
gart_iommu_ops
);
return
platform_driver_register
(
&
tegra_gart_driver
);
}
static
void
__exit
tegra_gart_exit
(
void
)
{
platform_driver_unregister
(
&
tegra_gart_driver
);
}
subsys_initcall
(
tegra_gart_init
);
module_exit
(
tegra_gart_exit
);
MODULE_DESCRIPTION
(
"IOMMU API for GART in Tegra20"
);
MODULE_AUTHOR
(
"Hiroshi DOYU <hdoyu@nvidia.com>"
);
MODULE_LICENSE
(
"GPL v2"
);
drivers/iommu/tegra-smmu.c
0 → 100644
浏览文件 @
7de47306
此差异已折叠。
点击以展开。
include/linux/amd-iommu.h
浏览文件 @
7de47306
...
@@ -28,7 +28,7 @@ struct task_struct;
...
@@ -28,7 +28,7 @@ struct task_struct;
struct
pci_dev
;
struct
pci_dev
;
extern
int
amd_iommu_detect
(
void
);
extern
int
amd_iommu_detect
(
void
);
extern
int
amd_iommu_init_hardware
(
void
);
/**
/**
* amd_iommu_enable_device_erratum() - Enable erratum workaround for device
* amd_iommu_enable_device_erratum() - Enable erratum workaround for device
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录