exynos-combiner.c 6.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * Combiner irqchip for EXYNOS
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/err.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
15
#include <linux/slab.h>
16 17 18 19 20 21 22 23 24 25 26 27 28
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>

#include <plat/cpu.h>

#include "irqchip.h"

#define COMBINER_ENABLE_SET	0x0
#define COMBINER_ENABLE_CLEAR	0x4
#define COMBINER_INT_STATUS	0xC

29 30
#define IRQ_IN_COMBINER		8

31 32 33 34 35 36
static DEFINE_SPINLOCK(irq_controller_lock);

struct combiner_chip_data {
	unsigned int irq_offset;
	unsigned int irq_mask;
	void __iomem *base;
37
	unsigned int parent_irq;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
};

static struct irq_domain *combiner_irq_domain;

static inline void __iomem *combiner_base(struct irq_data *data)
{
	struct combiner_chip_data *combiner_data =
		irq_data_get_irq_chip_data(data);

	return combiner_data->base;
}

static void combiner_mask_irq(struct irq_data *data)
{
	u32 mask = 1 << (data->hwirq % 32);

	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}

static void combiner_unmask_irq(struct irq_data *data)
{
	u32 mask = 1 << (data->hwirq % 32);

	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}

static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
	struct irq_chip *chip = irq_get_chip(irq);
	unsigned int cascade_irq, combiner_irq;
	unsigned long status;

	chained_irq_enter(chip, desc);

	spin_lock(&irq_controller_lock);
	status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
	spin_unlock(&irq_controller_lock);
	status &= chip_data->irq_mask;

	if (status == 0)
		goto out;

	combiner_irq = __ffs(status);

	cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
	if (unlikely(cascade_irq >= NR_IRQS))
		do_bad_IRQ(cascade_irq, desc);
	else
		generic_handle_irq(cascade_irq);

 out:
	chained_irq_exit(chip, desc);
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
#ifdef CONFIG_SMP
static int combiner_set_affinity(struct irq_data *d,
				 const struct cpumask *mask_val, bool force)
{
	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);

	if (chip && chip->irq_set_affinity)
		return chip->irq_set_affinity(data, mask_val, force);
	else
		return -EINVAL;
}
#endif

108
static struct irq_chip combiner_chip = {
109 110 111 112 113 114
	.name			= "COMBINER",
	.irq_mask		= combiner_mask_irq,
	.irq_unmask		= combiner_unmask_irq,
#ifdef CONFIG_SMP
	.irq_set_affinity	= combiner_set_affinity,
#endif
115 116
};

117
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
118 119
					unsigned int irq)
{
120
	if (irq_set_handler_data(irq, combiner_data) != 0)
121 122 123 124
		BUG();
	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
}

125 126
static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
				     unsigned int combiner_nr,
127
				     void __iomem *base, unsigned int irq)
128
{
129 130
	combiner_data->base = base;
	combiner_data->irq_offset = irq_find_mapping(
131
		combiner_irq_domain, combiner_nr * IRQ_IN_COMBINER);
132 133
	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
	combiner_data->parent_irq = irq;
134 135

	/* Disable all interrupts */
136
	__raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
}

#ifdef CONFIG_OF
static int combiner_irq_domain_xlate(struct irq_domain *d,
				     struct device_node *controller,
				     const u32 *intspec, unsigned int intsize,
				     unsigned long *out_hwirq,
				     unsigned int *out_type)
{
	if (d->of_node != controller)
		return -EINVAL;

	if (intsize < 2)
		return -EINVAL;

152
	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	*out_type = 0;

	return 0;
}
#else
static int combiner_irq_domain_xlate(struct irq_domain *d,
				     struct device_node *controller,
				     const u32 *intspec, unsigned int intsize,
				     unsigned long *out_hwirq,
				     unsigned int *out_type)
{
	return -EINVAL;
}
#endif

static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
				   irq_hw_number_t hw)
{
171 172
	struct combiner_chip_data *combiner_data = d->host_data;

173 174 175 176 177 178 179 180 181 182 183 184
	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);

	return 0;
}

static struct irq_domain_ops combiner_irq_domain_ops = {
	.xlate	= combiner_irq_domain_xlate,
	.map	= combiner_irq_domain_map,
};

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static unsigned int exynos4x12_combiner_extra_irq(int group)
{
	switch (group) {
	case 16:
		return IRQ_SPI(107);
	case 17:
		return IRQ_SPI(108);
	case 18:
		return IRQ_SPI(48);
	case 19:
		return IRQ_SPI(42);
	default:
		return 0;
	}
}

201
void __init combiner_init(void __iomem *combiner_base,
202 203
			  struct device_node *np,
			  unsigned int max_nr)
204 205
{
	int i, irq, irq_base;
206
	unsigned int nr_irq;
207
	struct combiner_chip_data *combiner_data;
208

209
	nr_irq = max_nr * IRQ_IN_COMBINER;
210 211 212 213 214 215 216

	irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
	if (IS_ERR_VALUE(irq_base)) {
		irq_base = COMBINER_IRQ(0, 0);
		pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
	}

217 218 219 220 221 222
	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
	if (!combiner_data) {
		pr_warning("%s: could not allocate combiner data\n", __func__);
		return;
	}

223
	combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
224
				&combiner_irq_domain_ops, combiner_data);
225 226 227 228 229 230
	if (WARN_ON(!combiner_irq_domain)) {
		pr_warning("%s: irq domain init failed\n", __func__);
		return;
	}

	for (i = 0; i < max_nr; i++) {
231 232 233 234
		if (i < EXYNOS4210_MAX_COMBINER_NR || soc_is_exynos5250())
			irq = IRQ_SPI(i);
		else
			irq = exynos4x12_combiner_extra_irq(i);
235 236 237 238
#ifdef CONFIG_OF
		if (np)
			irq = irq_of_parse_and_map(np, i);
#endif
239 240 241
		combiner_init_one(&combiner_data[i], i,
				  combiner_base + (i >> 2) * 0x10, irq);
		combiner_cascade_irq(&combiner_data[i], irq);
242 243 244 245 246 247 248 249
	}
}

#ifdef CONFIG_OF
static int __init combiner_of_init(struct device_node *np,
				   struct device_node *parent)
{
	void __iomem *combiner_base;
250
	unsigned int max_nr = 20;
251 252 253 254 255 256 257

	combiner_base = of_iomap(np, 0);
	if (!combiner_base) {
		pr_err("%s: failed to map combiner registers\n", __func__);
		return -ENXIO;
	}

258 259 260 261 262 263 264
	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
		pr_info("%s: number of combiners not specified, "
			"setting default as %d.\n",
			__func__, max_nr);
	}

	combiner_init(combiner_base, np, max_nr);
265 266 267 268 269 270

	return 0;
}
IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
		combiner_of_init);
#endif