提交 28f69154 编写于 作者: J Joerg Roedel

Merge branch 'for-joerg/io-pgtable' of...

Merge branch 'for-joerg/io-pgtable' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/io-pgtable
...@@ -1793,11 +1793,13 @@ F: drivers/edac/synopsys_edac.c ...@@ -1793,11 +1793,13 @@ F: drivers/edac/synopsys_edac.c
ARM SMMU DRIVERS ARM SMMU DRIVERS
M: Will Deacon <will.deacon@arm.com> M: Will Deacon <will.deacon@arm.com>
R: Robin Murphy <robin.murphy@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: drivers/iommu/arm-smmu.c F: drivers/iommu/arm-smmu.c
F: drivers/iommu/arm-smmu-v3.c F: drivers/iommu/arm-smmu-v3.c
F: drivers/iommu/io-pgtable-arm.c F: drivers/iommu/io-pgtable-arm.c
F: drivers/iommu/io-pgtable-arm-v7s.c
ARM64 PORT (AARCH64 ARCHITECTURE) ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com> M: Catalin Marinas <catalin.marinas@arm.com>
......
...@@ -39,6 +39,25 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST ...@@ -39,6 +39,25 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
If unsure, say N here. If unsure, say N here.
config IOMMU_IO_PGTABLE_ARMV7S
bool "ARMv7/v8 Short Descriptor Format"
select IOMMU_IO_PGTABLE
depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the ARM Short-descriptor pagetable format.
This supports 32-bit virtual and physical addresses mapped using
2-level tables with 4KB pages/1MB sections, and contiguous entries
for 64KB pages/16MB supersections if indicated by the IOMMU driver.
config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
bool "ARMv7s selftests"
depends on IOMMU_IO_PGTABLE_ARMV7S
help
Enable self-tests for ARMv7s page table allocator. This performs
a series of page-table consistency checks during boot.
If unsure, say N here.
endmenu endmenu
config IOMMU_IOVA config IOMMU_IOVA
......
...@@ -3,6 +3,7 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o ...@@ -3,6 +3,7 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o
......
此差异已折叠。
...@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end; unsigned long blk_start, blk_end;
phys_addr_t blk_paddr; phys_addr_t blk_paddr;
arm_lpae_iopte table = 0; arm_lpae_iopte table = 0;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1); blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size; blk_end = blk_start + blk_size;
...@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
} }
} }
__arm_lpae_set_pte(ptep, table, cfg); __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
iova &= ~(blk_size - 1); iova &= ~(blk_size - 1);
cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
return size; return size;
} }
...@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
arm_lpae_iopte pte; arm_lpae_iopte pte;
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; struct io_pgtable *iop = &data->iop;
void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */ /* Something went horribly wrong and we ran out of page table */
...@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */ /* If the size matches this level, we're in the right place */
if (size == blk_size) { if (size == blk_size) {
__arm_lpae_set_pte(ptep, 0, &data->iop.cfg); __arm_lpae_set_pte(ptep, 0, &iop->cfg);
if (!iopte_leaf(pte, lvl)) { if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */ /* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data), io_pgtable_tlb_add_flush(iop, iova, size,
false, cookie); ARM_LPAE_GRANULE(data), false);
tlb->tlb_sync(cookie); io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data); ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep); __arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else { } else {
tlb->tlb_add_flush(iova, size, size, true, cookie); io_pgtable_tlb_add_flush(iop, iova, size, size, true);
} }
return size; return size;
...@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{ {
size_t unmapped; size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data); int lvl = ARM_LPAE_START_LVL(data);
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped) if (unmapped)
iop->cfg.tlb->tlb_sync(iop->cookie); io_pgtable_tlb_sync(&data->iop);
return unmapped; return unmapped;
} }
...@@ -662,8 +659,12 @@ static struct io_pgtable * ...@@ -662,8 +659,12 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{ {
u64 reg; u64 reg;
struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); struct arm_lpae_io_pgtable *data;
if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
if (!data) if (!data)
return NULL; return NULL;
...@@ -746,8 +747,13 @@ static struct io_pgtable * ...@@ -746,8 +747,13 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{ {
u64 reg, sl; u64 reg, sl;
struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
if (cfg->quirks)
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
if (!data) if (!data)
return NULL; return NULL;
......
...@@ -33,6 +33,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = ...@@ -33,6 +33,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
#endif #endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
}; };
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
...@@ -72,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) ...@@ -72,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
return; return;
iop = container_of(ops, struct io_pgtable, ops); iop = container_of(ops, struct io_pgtable, ops);
iop->cfg.tlb->tlb_flush_all(iop->cookie); io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop); io_pgtable_init_table[iop->fmt]->free(iop);
} }
#ifndef __IO_PGTABLE_H #ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H #define __IO_PGTABLE_H
#include <linux/bitops.h>
/* /*
* Public API for use by IOMMU drivers * Public API for use by IOMMU drivers
...@@ -9,6 +10,7 @@ enum io_pgtable_fmt { ...@@ -9,6 +10,7 @@ enum io_pgtable_fmt {
ARM_32_LPAE_S2, ARM_32_LPAE_S2,
ARM_64_LPAE_S1, ARM_64_LPAE_S1,
ARM_64_LPAE_S2, ARM_64_LPAE_S2,
ARM_V7S,
IO_PGTABLE_NUM_FMTS, IO_PGTABLE_NUM_FMTS,
}; };
...@@ -45,8 +47,24 @@ struct iommu_gather_ops { ...@@ -45,8 +47,24 @@ struct iommu_gather_ops {
* page table walker. * page table walker.
*/ */
struct io_pgtable_cfg { struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ /*
int quirks; * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
* stage 1 PTEs, for hardware which insists on validating them
* even in non-secure state where they should normally be ignored.
*
* IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
* IOMMU_NOEXEC flags and map everything with full access, for
* hardware which does not implement the permissions of a given
* format, and/or requires some format-specific default value.
*
* IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
* (unmapped) entries but the hardware might do so anyway, perform
* TLB maintenance when mapping as well as when unmapping.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
unsigned int oas; unsigned int oas;
...@@ -65,6 +83,13 @@ struct io_pgtable_cfg { ...@@ -65,6 +83,13 @@ struct io_pgtable_cfg {
u64 vttbr; u64 vttbr;
u64 vtcr; u64 vtcr;
} arm_lpae_s2_cfg; } arm_lpae_s2_cfg;
struct {
u32 ttbr[2];
u32 tcr;
u32 nmrr;
u32 prrr;
} arm_v7s_cfg;
}; };
}; };
...@@ -121,18 +146,41 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops); ...@@ -121,18 +146,41 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format. * @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to * @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines. * any callback routines.
* @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration. * @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables. * @ops: The page table operations in use for this set of page tables.
*/ */
struct io_pgtable { struct io_pgtable {
enum io_pgtable_fmt fmt; enum io_pgtable_fmt fmt;
void *cookie; void *cookie;
bool tlb_sync_pending;
struct io_pgtable_cfg cfg; struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops; struct io_pgtable_ops ops;
}; };
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
if (iop->tlb_sync_pending) {
iop->cfg.tlb->tlb_sync(iop->cookie);
iop->tlb_sync_pending = false;
}
}
/** /**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format. * particular format.
...@@ -149,5 +197,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; ...@@ -149,5 +197,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
#endif /* __IO_PGTABLE_H */ #endif /* __IO_PGTABLE_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册