coherency.c 8.3 KB
Newer Older
1
/*
2 3
 * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
 * platforms.
4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright (C) 2012 Marvell
 *
 * Yehuda Yitschak <yehuday@marvell.com>
 * Gregory Clement <gregory.clement@free-electrons.com>
 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 *
15
 * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
16 17 18 19 20
 * responsible for ensuring hardware coherency between all CPUs and between
 * CPUs and I/O masters. This file initializes the coherency fabric and
 * supplies basic routines for configuring and controlling hardware coherency
 */

21 22
#define pr_fmt(fmt) "mvebu-coherency: " fmt

23 24 25 26 27
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/smp.h>
28 29
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
30 31
#include <linux/slab.h>
#include <linux/mbus.h>
32
#include <linux/pci.h>
33
#include <asm/smp_plat.h>
34
#include <asm/cacheflush.h>
35
#include <asm/mach/map.h>
36
#include "coherency.h"
37
#include "mvebu-soc-id.h"
38

39
unsigned long coherency_phys_base;
40
void __iomem *coherency_base;
41
static void __iomem *coherency_cpu_base;
42 43

/* Coherency fabric registers */
44 45
#define IO_SYNC_BARRIER_CTL_OFFSET		   0x0

46
enum {
47
	COHERENCY_FABRIC_TYPE_NONE,
48
	COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
49
	COHERENCY_FABRIC_TYPE_ARMADA_375,
50
	COHERENCY_FABRIC_TYPE_ARMADA_380,
51 52
};

53
static struct of_device_id of_coherency_table[] = {
54 55
	{.compatible = "marvell,coherency-fabric",
	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
56 57
	{.compatible = "marvell,armada-375-coherency-fabric",
	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
58 59
	{.compatible = "marvell,armada-380-coherency-fabric",
	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
60 61 62
	{ /* end of list */ },
};

63 64 65
/* Functions defined in coherency_ll.S */
int ll_enable_coherency(void);
void ll_add_cpu_to_smp_group(void);
66

67
int set_cpu_coherent(void)
68 69
{
	if (!coherency_base) {
70
		pr_warn("Can't make current CPU cache coherent.\n");
71 72 73 74
		pr_warn("Coherency fabric is not initialized\n");
		return 1;
	}

75 76
	ll_add_cpu_to_smp_group();
	return ll_enable_coherency();
77 78
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
static inline void mvebu_hwcc_sync_io_barrier(void)
{
	writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
	while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
}

static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir,
				  struct dma_attrs *attrs)
{
	if (dir != DMA_TO_DEVICE)
		mvebu_hwcc_sync_io_barrier();
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}


static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
			      size_t size, enum dma_data_direction dir,
			      struct dma_attrs *attrs)
{
	if (dir != DMA_TO_DEVICE)
		mvebu_hwcc_sync_io_barrier();
}

static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
			size_t size, enum dma_data_direction dir)
{
	if (dir != DMA_TO_DEVICE)
		mvebu_hwcc_sync_io_barrier();
}

static struct dma_map_ops mvebu_hwcc_dma_ops = {
	.alloc			= arm_dma_alloc,
	.free			= arm_dma_free,
	.mmap			= arm_dma_mmap,
	.map_page		= mvebu_hwcc_dma_map_page,
	.unmap_page		= mvebu_hwcc_dma_unmap_page,
	.get_sgtable		= arm_dma_get_sgtable,
	.map_sg			= arm_dma_map_sg,
	.unmap_sg		= arm_dma_unmap_sg,
	.sync_single_for_cpu	= mvebu_hwcc_dma_sync,
	.sync_single_for_device	= mvebu_hwcc_dma_sync,
	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
	.set_dma_mask		= arm_dma_set_mask,
};

127 128
static int mvebu_hwcc_notifier(struct notifier_block *nb,
			       unsigned long event, void *__dev)
129 130 131 132 133 134 135 136 137 138
{
	struct device *dev = __dev;

	if (event != BUS_NOTIFY_ADD_DEVICE)
		return NOTIFY_DONE;
	set_dma_ops(dev, &mvebu_hwcc_dma_ops);

	return NOTIFY_OK;
}

139 140
static struct notifier_block mvebu_hwcc_nb = {
	.notifier_call = mvebu_hwcc_notifier,
141 142
};

143 144 145 146
static struct notifier_block mvebu_hwcc_pci_nb = {
	.notifier_call = mvebu_hwcc_notifier,
};

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static void __init armada_370_coherency_init(struct device_node *np)
{
	struct resource res;

	of_address_to_resource(np, 0, &res);
	coherency_phys_base = res.start;
	/*
	 * Ensure secondary CPUs will see the updated value,
	 * which they read before they join the coherency
	 * fabric, and therefore before they are coherent with
	 * the boot CPU cache.
	 */
	sync_cache_w(&coherency_phys_base);
	coherency_base = of_iomap(np, 0);
	coherency_cpu_base = of_iomap(np, 1);
162
	set_cpu_coherent();
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
 * is needed as a workaround for a deadlock issue between the PCIe
 * interface and the cache controller.
 */
static void __iomem *
armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
			      unsigned int mtype, void *caller)
{
	struct resource pcie_mem;

	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);

	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
		mtype = MT_UNCACHED;

	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}

185
static void __init armada_375_380_coherency_init(struct device_node *np)
186
{
187 188
	struct device_node *cache_dn;

189
	coherency_cpu_base = of_iomap(np, 0);
190 191
	arch_ioremap_caller = armada_pcie_wa_ioremap_caller;

192 193 194 195 196 197 198
	/*
	 * We should switch the PL310 to I/O coherency mode only if
	 * I/O coherency is actually enabled.
	 */
	if (!coherency_available())
		return;

199 200 201 202 203 204 205 206 207 208 209 210 211 212
	/*
	 * Add the PL310 property "arm,io-coherent". This makes sure the
	 * outer sync operation is not used, which allows to
	 * workaround the system erratum that causes deadlocks when
	 * doing PCIe in an SMP situation on Armada 375 and Armada
	 * 38x.
	 */
	for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
		struct property *p;

		p = kzalloc(sizeof(*p), GFP_KERNEL);
		p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
		of_add_property(cache_dn, p);
	}
213 214
}

215
static int coherency_type(void)
216 217
{
	struct device_node *np;
218
	const struct of_device_id *match;
219
	int type;
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	/*
	 * The coherency fabric is needed:
	 * - For coherency between processors on Armada XP, so only
	 *   when SMP is enabled.
	 * - For coherency between the processor and I/O devices, but
	 *   this coherency requires many pre-requisites (write
	 *   allocate cache policy, shareable pages, SMP bit set) that
	 *   are only meant in SMP situations.
	 *
	 * Note that this means that on Armada 370, there is currently
	 * no way to use hardware I/O coherency, because even when
	 * CONFIG_SMP is enabled, is_smp() returns false due to the
	 * Armada 370 being a single-core processor. To lift this
	 * limitation, we would have to find a way to make the cache
	 * policy set to write-allocate (on all Armada SoCs), and to
	 * set the shareable attribute in page tables (on all Armada
	 * SoCs except the Armada 370). Unfortunately, such decisions
	 * are taken very early in the kernel boot process, at a point
	 * where we don't know yet on which SoC we are running.

	 */
	if (!is_smp())
		return COHERENCY_FABRIC_TYPE_NONE;
244

245 246 247
	np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
	if (!np)
		return COHERENCY_FABRIC_TYPE_NONE;
248

249
	type = (int) match->data;
250

251
	of_node_put(np);
252

253
	return type;
254
}
255

256 257 258 259 260
/*
 * As a precaution, we currently completely disable hardware I/O
 * coherency, until enough testing is done with automatic I/O
 * synchronization barriers to validate that it is a proper solution.
 */
261
int coherency_available(void)
262
{
263
	return false;
264 265 266 267 268
}

int __init coherency_init(void)
{
	int type = coherency_type();
269 270 271
	struct device_node *np;

	np = of_find_matching_node(NULL, of_coherency_table);
272 273 274

	if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
		armada_370_coherency_init(np);
275 276 277
	else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
		 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
		armada_375_380_coherency_init(np);
278

279 280
	of_node_put(np);

281 282 283 284 285
	return 0;
}

static int __init coherency_late_init(void)
{
286 287 288
	if (coherency_available())
		bus_register_notifier(&platform_bus_type,
				      &mvebu_hwcc_nb);
289 290 291 292
	return 0;
}

postcore_initcall(coherency_late_init);
293

294
#if IS_ENABLED(CONFIG_PCI)
295 296 297 298
static int __init coherency_pci_init(void)
{
	if (coherency_available())
		bus_register_notifier(&pci_bus_type,
299
				       &mvebu_hwcc_pci_nb);
300 301 302 303
	return 0;
}

arch_initcall(coherency_pci_init);
304
#endif