arm-smmu-impl.c 4.3 KB
Newer Older
1 2 3 4 5 6
// SPDX-License-Identifier: GPL-2.0-only
// Miscellaneous Arm SMMU implementation and integration quirks
// Copyright (C) 2019 Arm Limited

#define pr_fmt(fmt) "arm-smmu: " fmt

7
#include <linux/bitfield.h>
8 9
#include <linux/of.h>

10 11 12
#include "arm-smmu.h"


13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
static int arm_smmu_gr0_ns(int offset)
{
	switch(offset) {
	case ARM_SMMU_GR0_sCR0:
	case ARM_SMMU_GR0_sACR:
	case ARM_SMMU_GR0_sGFSR:
	case ARM_SMMU_GR0_sGFSYNR0:
	case ARM_SMMU_GR0_sGFSYNR1:
	case ARM_SMMU_GR0_sGFSYNR2:
		return offset + 0x400;
	default:
		return offset;
	}
}

static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
			    int offset)
{
	if (page == ARM_SMMU_GR0)
		offset = arm_smmu_gr0_ns(offset);
	return readl_relaxed(arm_smmu_page(smmu, page) + offset);
}

static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
			      int offset, u32 val)
{
	if (page == ARM_SMMU_GR0)
		offset = arm_smmu_gr0_ns(offset);
	writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
}

/* Since we don't care for sGFAR, we can do without 64-bit accessors */
const struct arm_smmu_impl calxeda_impl = {
	.read_reg = arm_smmu_read_ns,
	.write_reg = arm_smmu_write_ns,
};


51 52 53 54 55
struct cavium_smmu {
	struct arm_smmu_device smmu;
	u32 id_base;
};

56 57 58
static int cavium_cfg_probe(struct arm_smmu_device *smmu)
{
	static atomic_t context_count = ATOMIC_INIT(0);
59
	struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
60 61 62 63 64
	/*
	 * Cavium CN88xx erratum #27704.
	 * Ensure ASID and VMID allocation is unique across all SMMUs in
	 * the system.
	 */
65
	cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
66 67 68 69 70
	dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");

	return 0;
}

71 72 73 74 75 76 77 78 79 80 81 82 83
int cavium_init_context(struct arm_smmu_domain *smmu_domain)
{
	struct cavium_smmu *cs = container_of(smmu_domain->smmu,
					      struct cavium_smmu, smmu);

	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
		smmu_domain->cfg.vmid += cs->id_base;
	else
		smmu_domain->cfg.asid += cs->id_base;

	return 0;
}

84 85
const struct arm_smmu_impl cavium_impl = {
	.cfg_probe = cavium_cfg_probe,
86
	.init_context = cavium_init_context,
87 88
};

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
{
	struct cavium_smmu *cs;

	cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
	if (!cs)
		return ERR_PTR(-ENOMEM);

	cs->smmu = *smmu;
	cs->smmu.impl = &cavium_impl;

	devm_kfree(smmu->dev, smmu);

	return &cs->smmu;
}

105

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
#define ARM_MMU500_ACTLR_CPRE		(1 << 1)

#define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
#define ARM_MMU500_ACR_S2CRB_TLBEN	(1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN	(1 << 8)

static int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
	u32 reg, major;
	int i;
	/*
	 * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
	 * writes to the context bank ACTLRs will stick. And we just hope that
	 * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
	 */
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
	major = FIELD_GET(ID7_MAJOR, reg);
	reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
	if (major >= 2)
		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
	/*
	 * Allow unmatched Stream IDs to allocate bypass
	 * TLB entries for reduced latency.
	 */
	reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);

	/*
	 * Disable MMU-500's not-particularly-beneficial next-page
	 * prefetcher for the sake of errata #841119 and #826419.
	 */
	for (i = 0; i < smmu->num_context_banks; ++i) {
		reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
		reg &= ~ARM_MMU500_ACTLR_CPRE;
		arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
	}

	return 0;
}

const struct arm_smmu_impl arm_mmu500_impl = {
	.reset = arm_mmu500_reset,
};


151 152
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
153 154 155 156 157 158 159
	/*
	 * We will inevitably have to combine model-specific implementation
	 * quirks with platform-specific integration quirks, but everything
	 * we currently support happens to work out as straightforward
	 * mutually-exclusive assignments.
	 */
	switch (smmu->model) {
160 161 162
	case ARM_MMU500:
		smmu->impl = &arm_mmu500_impl;
		break;
163
	case CAVIUM_SMMUV2:
164
		return cavium_smmu_impl_init(smmu);
165 166 167 168
	default:
		break;
	}

169 170 171 172
	if (of_property_read_bool(smmu->dev->of_node,
				  "calxeda,smmu-secure-config-access"))
		smmu->impl = &calxeda_impl;

173 174
	return smmu;
}