irq-armada-370-xp.c 7.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Marvell Armada 370 and Armada XP SoC IRQ handling
 *
 * Copyright (C) 2012 Marvell
 *
 * Lior Amsalem <alior@marvell.com>
 * Gregory CLEMENT <gregory.clement@free-electrons.com>
 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 * Ben Dooks <ben.dooks@codethink.co.uk>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
27
#include <asm/smp_plat.h>
G
Gregory CLEMENT 已提交
28
#include <asm/hardware/cache-l2x0.h>
29 30 31 32 33

/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)

34
#define ARMADA_370_XP_INT_CONTROL		(0x00)
35 36
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
37
#define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
38 39 40

#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)

41 42 43 44
#define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)

45 46
#define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)

47 48
#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)

49 50
#define ACTIVE_DOORBELLS			(8)

51 52
static DEFINE_RAW_SPINLOCK(irq_controller_lock);

53 54 55 56
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;

57 58 59 60 61
/*
 * In SMP mode:
 * For shared global interrupts, mask/unmask global enable bit
 * For CPU interrtups, mask/unmask the calling CPU's bit
 */
62 63
static void armada_370_xp_irq_mask(struct irq_data *d)
{
64 65
	irq_hw_number_t hwirq = irqd_to_hwirq(d);

66
	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
67 68 69 70 71
		writel(hwirq, main_int_base +
				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
	else
		writel(hwirq, per_cpu_int_base +
				ARMADA_370_XP_INT_SET_MASK_OFFS);
72 73 74 75
}

static void armada_370_xp_irq_unmask(struct irq_data *d)
{
76 77
	irq_hw_number_t hwirq = irqd_to_hwirq(d);

78
	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
79 80 81 82 83
		writel(hwirq, main_int_base +
				ARMADA_370_XP_INT_SET_ENABLE_OFFS);
	else
		writel(hwirq, per_cpu_int_base +
				ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
84 85
}

86 87 88 89
#ifdef CONFIG_SMP
static int armada_xp_set_affinity(struct irq_data *d,
				  const struct cpumask *mask_val, bool force)
{
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
	unsigned long reg;
	unsigned long new_mask = 0;
	unsigned long online_mask = 0;
	unsigned long count = 0;
	irq_hw_number_t hwirq = irqd_to_hwirq(d);
	int cpu;

	for_each_cpu(cpu, mask_val) {
		new_mask |= 1 << cpu_logical_map(cpu);
		count++;
	}

	/*
	 * Forbid mutlicore interrupt affinity
	 * This is required since the MPIC HW doesn't limit
	 * several CPUs from acknowledging the same interrupt.
	 */
	if (count > 1)
		return -EINVAL;

	for_each_cpu(cpu, cpu_online_mask)
		online_mask |= 1 << cpu_logical_map(cpu);

	raw_spin_lock(&irq_controller_lock);

	reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
	reg = (reg & (~online_mask)) | new_mask;
	writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));

	raw_spin_unlock(&irq_controller_lock);

121 122 123 124
	return 0;
}
#endif

125 126 127 128 129
static struct irq_chip armada_370_xp_irq_chip = {
	.name		= "armada_370_xp_irq",
	.irq_mask       = armada_370_xp_irq_mask,
	.irq_mask_ack   = armada_370_xp_irq_mask,
	.irq_unmask     = armada_370_xp_irq_unmask,
130 131 132
#ifdef CONFIG_SMP
	.irq_set_affinity = armada_xp_set_affinity,
#endif
133 134 135 136 137 138
};

static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
				      unsigned int virq, irq_hw_number_t hw)
{
	armada_370_xp_irq_mask(irq_get_irq_data(virq));
139 140 141 142 143
	if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
		writel(hw, per_cpu_int_base +
			ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
	else
		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
144
	irq_set_status_flags(virq, IRQ_LEVEL);
145

146
	if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
147 148 149 150 151 152 153 154
		irq_set_percpu_devid(virq);
		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
					handle_percpu_devid_irq);

	} else {
		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
					handle_level_irq);
	}
155 156 157 158 159
	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);

	return 0;
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#ifdef CONFIG_SMP
void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
{
	int cpu;
	unsigned long map = 0;

	/* Convert our logical CPU mask into a physical one. */
	for_each_cpu(cpu, mask)
		map |= 1 << cpu_logical_map(cpu);

	/*
	 * Ensure that stores to Normal memory are visible to the
	 * other CPUs before issuing the IPI.
	 */
	dsb();

	/* submit softirq */
	writel((map << 8) | irq, main_int_base +
		ARMADA_370_XP_SW_TRIG_INT_OFFS);
}

void armada_xp_mpic_smp_cpu_init(void)
{
	/* Clear pending IPIs */
	writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);

	/* Enable first 8 IPIs */
	writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
		ARMADA_370_XP_IN_DRBEL_MSK_OFFS);

	/* Unmask IPI interrupt */
	writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
#endif /* CONFIG_SMP */

195 196 197 198 199 200 201 202
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
	.map = armada_370_xp_mpic_irq_map,
	.xlate = irq_domain_xlate_onecell,
};

static int __init armada_370_xp_mpic_of_init(struct device_node *node,
					     struct device_node *parent)
{
203 204
	u32 control;

205 206 207 208 209 210
	main_int_base = of_iomap(node, 0);
	per_cpu_int_base = of_iomap(node, 1);

	BUG_ON(!main_int_base);
	BUG_ON(!per_cpu_int_base);

211 212
	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);

213
	armada_370_xp_mpic_domain =
214 215
		irq_domain_add_linear(node, (control >> 2) & 0x3ff,
				&armada_370_xp_mpic_irq_ops, NULL);
216 217 218 219 220

	if (!armada_370_xp_mpic_domain)
		panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");

	irq_set_default_host(armada_370_xp_mpic_domain);
221 222 223

#ifdef CONFIG_SMP
	armada_xp_mpic_smp_cpu_init();
224 225 226 227 228 229 230 231 232

	/*
	 * Set the default affinity from all CPUs to the boot cpu.
	 * This is required since the MPIC doesn't limit several CPUs
	 * from acknowledging the same interrupt.
	 */
	cpumask_clear(irq_default_affinity);
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);

233 234
#endif

235 236 237 238 239 240 241 242 243 244 245 246 247
	return 0;
}

asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
							       *regs)
{
	u32 irqstat, irqnr;

	do {
		irqstat = readl_relaxed(per_cpu_int_base +
					ARMADA_370_XP_CPU_INTACK_OFFS);
		irqnr = irqstat & 0x3FF;

248 249 250
		if (irqnr > 1022)
			break;

251
		if (irqnr > 0) {
252 253
			irqnr =	irq_find_mapping(armada_370_xp_mpic_domain,
					irqnr);
254 255 256
			handle_IRQ(irqnr, regs);
			continue;
		}
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
#ifdef CONFIG_SMP
		/* IPI Handling */
		if (irqnr == 0) {
			u32 ipimask, ipinr;

			ipimask = readl_relaxed(per_cpu_int_base +
						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
				& 0xFF;

			writel(0x0, per_cpu_int_base +
				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);

			/* Handle all pending doorbells */
			for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
				if (ipimask & (0x1 << ipinr))
					handle_IPI(ipinr, regs);
			}
			continue;
		}
#endif
277 278 279 280 281 282 283 284 285 286 287 288

	} while (1);
}

static const struct of_device_id mpic_of_match[] __initconst = {
	{.compatible = "marvell,mpic", .data = armada_370_xp_mpic_of_init},
	{},
};

void __init armada_370_xp_init_irq(void)
{
	of_irq_init(mpic_of_match);
G
Gregory CLEMENT 已提交
289 290 291
#ifdef CONFIG_CACHE_L2X0
	l2x0_of_init(0, ~0UL);
#endif
292
}