octeon-irq.c 73.0 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 2004-2016 Cavium, Inc.
7
 */
8

9
#include <linux/of_address.h>
10
#include <linux/interrupt.h>
11
#include <linux/irqdomain.h>
12
#include <linux/bitops.h>
13
#include <linux/of_irq.h>
14
#include <linux/percpu.h>
15
#include <linux/slab.h>
16
#include <linux/irq.h>
17
#include <linux/smp.h>
18
#include <linux/of.h>
19 20

#include <asm/octeon/octeon.h>
21
#include <asm/octeon/cvmx-ciu2-defs.h>
22
#include <asm/octeon/cvmx-ciu3-defs.h>
23

24 25
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
26
static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);

static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
#define CIU3_MBOX_PER_CORE 10

/*
 * The 8 most significant bits of the intsn identify the interrupt major block.
 * Each major block might use its own interrupt domain. Thus 256 domains are
 * needed.
 */
#define MAX_CIU3_DOMAINS		256

typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);

/* Information for each ciu3 in the system */
struct octeon_ciu3_info {
	u64			ciu3_addr;
	int			node;
	struct irq_domain	*domain[MAX_CIU3_DOMAINS];
	octeon_ciu3_intsn2hw_t	intsn2hw[MAX_CIU3_DOMAINS];
};

/* Each ciu3 in the system uses its own data (one ciu3 per node) */
static struct octeon_ciu3_info	*octeon_ciu3_info_per_node[4];
52

53 54 55 56
struct octeon_irq_ciu_domain_data {
	int num_sum;  /* number of sum registers (2 or 3). */
};

57 58 59 60 61 62 63 64 65 66 67
/* Register offsets from ciu3_addr */
#define CIU3_CONST		0x220
#define CIU3_IDT_CTL(_idt)	((_idt) * 8 + 0x110000)
#define CIU3_IDT_PP(_idt, _idx)	((_idt) * 32 + (_idx) * 8 + 0x120000)
#define CIU3_IDT_IO(_idt)	((_idt) * 8 + 0x130000)
#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
#define CIU3_DEST_IO_INT(_io)	((_io) * 8 + 0x210000)
#define CIU3_ISC_CTL(_intsn)	((_intsn) * 8 + 0x80000000)
#define CIU3_ISC_W1C(_intsn)	((_intsn) * 8 + 0x90000000)
#define CIU3_ISC_W1S(_intsn)	((_intsn) * 8 + 0xa0000000)

68
static __read_mostly int octeon_irq_ciu_to_irq[8][64];
69

70 71 72 73 74 75 76 77 78 79 80
struct octeon_ciu_chip_data {
	union {
		struct {		/* only used for ciu3 */
			u64 ciu3_addr;
			unsigned int intsn;
		};
		struct {		/* only used for ciu/ciu2 */
			u8 line;
			u8 bit;
		};
	};
81
	int gpio_line;
82
	int current_cpu;	/* Next CPU expected to take this irq */
83
	int ciu_node; /* NUMA node number of the CIU */
84 85 86 87 88 89 90 91 92 93 94 95 96
};

struct octeon_core_chip_data {
	struct mutex core_irq_mutex;
	bool current_en;
	bool desired_en;
	u8 bit;
};

#define MIPS_CORE_IRQ_LINES 8

static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];

97 98 99
static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
				      struct irq_chip *chip,
				      irq_flow_handler_t handler)
100
{
101 102 103 104 105
	struct octeon_ciu_chip_data *cd;

	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
	if (!cd)
		return -ENOMEM;
106 107 108

	irq_set_chip_and_handler(irq, chip, handler);

109 110 111
	cd->line = line;
	cd->bit = bit;
	cd->gpio_line = gpio_line;
112

113
	irq_set_chip_data(irq, cd);
114
	octeon_irq_ciu_to_irq[line][bit] = irq;
115 116 117 118 119 120 121 122 123 124
	return 0;
}

static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
{
	struct irq_data *data = irq_get_irq_data(irq);
	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);

	irq_set_chip_data(irq, NULL);
	kfree(cd);
125 126
}

127 128
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
					int irq, int line, int bit)
129
{
130
	return irq_domain_associate(domain, irq, line << 6 | bit);
131 132
}

133 134 135 136 137 138 139 140 141
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
	return cpu_logical_map(cpu);
#else
	return cvmx_get_core_num();
#endif
}

142 143 144 145 146 147 148 149 150 151
static int octeon_cpu_for_coreid(int coreid)
{
#ifdef CONFIG_SMP
	return cpu_number_map(coreid);
#else
	return smp_processor_id();
#endif
}

static void octeon_irq_core_ack(struct irq_data *data)
152
{
153 154 155
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	unsigned int bit = cd->bit;

156 157 158 159 160 161 162 163 164 165 166
	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
	clear_c0_status(0x100 << bit);
	/* The two user interrupts must be cleared manually. */
	if (bit < 2)
		clear_c0_cause(0x100 << bit);
}

167
static void octeon_irq_core_eoi(struct irq_data *data)
168
{
169 170
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);

171 172 173 174 175
	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
176
	set_c0_status(0x100 << cd->bit);
177 178
}

179
static void octeon_irq_core_set_enable_local(void *arg)
180
{
181 182 183
	struct irq_data *data = arg;
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	unsigned int mask = 0x100 << cd->bit;
184 185

	/*
186
	 * Interrupts are already disabled, so these are atomic.
187
	 */
188 189 190 191 192
	if (cd->desired_en)
		set_c0_status(mask);
	else
		clear_c0_status(mask);

193 194
}

195
static void octeon_irq_core_disable(struct irq_data *data)
196
{
197 198
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	cd->desired_en = false;
199 200
}

201
static void octeon_irq_core_enable(struct irq_data *data)
202
{
203 204
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	cd->desired_en = true;
205 206
}

207 208 209
static void octeon_irq_core_bus_lock(struct irq_data *data)
{
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
210

211 212
	mutex_lock(&cd->core_irq_mutex);
}
213

214
static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
215
{
216 217 218 219 220 221
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);

	if (cd->desired_en != cd->current_en) {
		on_each_cpu(octeon_irq_core_set_enable_local, data, 1);

		cd->current_en = cd->desired_en;
222 223
	}

224
	mutex_unlock(&cd->core_irq_mutex);
225 226
}

227 228 229 230 231 232 233 234 235
static struct irq_chip octeon_irq_chip_core = {
	.name = "Core",
	.irq_enable = octeon_irq_core_enable,
	.irq_disable = octeon_irq_core_disable,
	.irq_ack = octeon_irq_core_ack,
	.irq_eoi = octeon_irq_core_eoi,
	.irq_bus_lock = octeon_irq_core_bus_lock,
	.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,

236 237 238
	.irq_cpu_online = octeon_irq_core_eoi,
	.irq_cpu_offline = octeon_irq_core_ack,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
};

static void __init octeon_irq_init_core(void)
{
	int i;
	int irq;
	struct octeon_core_chip_data *cd;

	for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
		cd = &octeon_irq_core_chip_data[i];
		cd->current_en = false;
		cd->desired_en = false;
		cd->bit = i;
		mutex_init(&cd->core_irq_mutex);

		irq = OCTEON_IRQ_SW0 + i;
255 256 257
		irq_set_chip_data(irq, cd);
		irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
					 handle_percpu_irq);
258 259 260 261
	}
}

static int next_cpu_for_irq(struct irq_data *data)
262 263 264
{

#ifdef CONFIG_SMP
265
	int cpu;
266 267
	struct cpumask *mask = irq_data_get_affinity_mask(data);
	int weight = cpumask_weight(mask);
268
	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
269 270

	if (weight > 1) {
271
		cpu = cd->current_cpu;
272
		for (;;) {
273
			cpu = cpumask_next(cpu, mask);
274 275 276 277 278 279 280 281
			if (cpu >= nr_cpu_ids) {
				cpu = -1;
				continue;
			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
				break;
			}
		}
	} else if (weight == 1) {
282
		cpu = cpumask_first(mask);
283
	} else {
284
		cpu = smp_processor_id();
285
	}
286
	cd->current_cpu = cpu;
287
	return cpu;
288
#else
289
	return smp_processor_id();
290 291 292
#endif
}

293
static void octeon_irq_ciu_enable(struct irq_data *data)
294
{
295 296 297
	int cpu = next_cpu_for_irq(data);
	int coreid = octeon_coreid_for_cpu(cpu);
	unsigned long *pen;
298
	unsigned long flags;
299
	struct octeon_ciu_chip_data *cd;
300
	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
301

302
	cd = irq_data_get_irq_chip_data(data);
303

304
	raw_spin_lock_irqsave(lock, flags);
305
	if (cd->line == 0) {
306
		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
307
		__set_bit(cd->bit, pen);
308 309 310 311 312
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
313 314 315
		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
	} else {
		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
316
		__set_bit(cd->bit, pen);
317 318 319 320 321
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
322 323
		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
	}
324
	raw_spin_unlock_irqrestore(lock, flags);
325 326
}

327 328 329 330
static void octeon_irq_ciu_enable_local(struct irq_data *data)
{
	unsigned long *pen;
	unsigned long flags;
331
	struct octeon_ciu_chip_data *cd;
332
	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
333

334
	cd = irq_data_get_irq_chip_data(data);
335

336
	raw_spin_lock_irqsave(lock, flags);
337
	if (cd->line == 0) {
338
		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
339
		__set_bit(cd->bit, pen);
340 341 342 343 344
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
345 346
		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
	} else {
347
		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
348
		__set_bit(cd->bit, pen);
349 350 351 352 353
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
354 355
		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
	}
356
	raw_spin_unlock_irqrestore(lock, flags);
357 358 359 360 361 362
}

static void octeon_irq_ciu_disable_local(struct irq_data *data)
{
	unsigned long *pen;
	unsigned long flags;
363
	struct octeon_ciu_chip_data *cd;
364
	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
365

366
	cd = irq_data_get_irq_chip_data(data);
367

368
	raw_spin_lock_irqsave(lock, flags);
369
	if (cd->line == 0) {
370
		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
371
		__clear_bit(cd->bit, pen);
372 373 374 375 376
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
377 378
		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
	} else {
379
		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
380
		__clear_bit(cd->bit, pen);
381 382 383 384 385
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
386 387
		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
	}
388
	raw_spin_unlock_irqrestore(lock, flags);
389 390 391
}

static void octeon_irq_ciu_disable_all(struct irq_data *data)
392 393
{
	unsigned long flags;
394 395
	unsigned long *pen;
	int cpu;
396
	struct octeon_ciu_chip_data *cd;
397
	raw_spinlock_t *lock;
398

399
	cd = irq_data_get_irq_chip_data(data);
400

401 402 403
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);
		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
404
		if (cd->line == 0)
405
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
406
		else
407
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
408 409

		raw_spin_lock_irqsave(lock, flags);
410
		__clear_bit(cd->bit, pen);
411 412 413 414 415
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
416
		if (cd->line == 0)
417 418
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
419
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
420
		raw_spin_unlock_irqrestore(lock, flags);
421
	}
422 423
}

424
static void octeon_irq_ciu_enable_all(struct irq_data *data)
425 426
{
	unsigned long flags;
427
	unsigned long *pen;
428
	int cpu;
429
	struct octeon_ciu_chip_data *cd;
430
	raw_spinlock_t *lock;
431

432
	cd = irq_data_get_irq_chip_data(data);
433

434 435 436
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);
		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
437
		if (cd->line == 0)
438
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
439
		else
440
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
441 442

		raw_spin_lock_irqsave(lock, flags);
443
		__set_bit(cd->bit, pen);
444 445 446 447 448
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
449
		if (cd->line == 0)
450 451
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
452
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
453
		raw_spin_unlock_irqrestore(lock, flags);
454
	}
455 456 457
}

/*
458 459
 * Enable the irq on the next core in the affinity set for chips that
 * have the EN*_W1{S,C} registers.
460
 */
461
static void octeon_irq_ciu_enable_v2(struct irq_data *data)
462
{
463 464
	u64 mask;
	int cpu = next_cpu_for_irq(data);
465
	struct octeon_ciu_chip_data *cd;
466

467 468
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
469 470 471 472 473

	/*
	 * Called under the desc lock, so these should never get out
	 * of sync.
	 */
474
	if (cd->line == 0) {
475
		int index = octeon_coreid_for_cpu(cpu) * 2;
476
		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
477
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
478 479
	} else {
		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
480
		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
481
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
482
	}
483 484
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
/*
 * Enable the irq in the sum2 registers.
 */
static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
}

/*
 * Disable the irq in the sum2 registers.
 */
static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
}

static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
}

static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
{
	int cpu;
	struct octeon_ciu_chip_data *cd;
	u64 mask;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);

		cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
	}
}

546
/*
547 548
 * Enable the irq on the current CPU for chips that
 * have the EN*_W1{S,C} registers.
549
 */
550
static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
551
{
552
	u64 mask;
553
	struct octeon_ciu_chip_data *cd;
554

555 556
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
557

558
	if (cd->line == 0) {
559
		int index = cvmx_get_core_num() * 2;
560
		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
561 562 563
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
	} else {
		int index = cvmx_get_core_num() * 2 + 1;
564
		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
565 566 567 568 569 570 571
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
	}
}

static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
{
	u64 mask;
572
	struct octeon_ciu_chip_data *cd;
573

574 575
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
576

577
	if (cd->line == 0) {
578
		int index = cvmx_get_core_num() * 2;
579
		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
580 581 582
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
	} else {
		int index = cvmx_get_core_num() * 2 + 1;
583
		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
584 585
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
	}
586 587
}

588
/*
589
 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
590
 */
591 592 593
static void octeon_irq_ciu_ack(struct irq_data *data)
{
	u64 mask;
594
	struct octeon_ciu_chip_data *cd;
595

596 597
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
598

599
	if (cd->line == 0) {
600
		int index = cvmx_get_core_num() * 2;
601
		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
602 603
	} else {
		cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
604
	}
605 606
}

D
David Daney 已提交
607
/*
608
 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
D
David Daney 已提交
609 610
 * registers.
 */
611
static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
D
David Daney 已提交
612
{
613 614
	int cpu;
	u64 mask;
615
	struct octeon_ciu_chip_data *cd;
D
David Daney 已提交
616

617 618
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
619

620
	if (cd->line == 0) {
621 622
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2;
623 624
			clear_bit(cd->bit,
				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
625 626 627 628 629
			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
		}
	} else {
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
630 631
			clear_bit(cd->bit,
				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
632 633 634
			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
		}
	}
D
David Daney 已提交
635 636
}

637
/*
638
 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
639 640
 * registers.
 */
641
static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
642 643
{
	int cpu;
644
	u64 mask;
645
	struct octeon_ciu_chip_data *cd;
646

647 648
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
649

650
	if (cd->line == 0) {
651 652
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2;
653 654
			set_bit(cd->bit,
				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
655 656 657 658 659
			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
		}
	} else {
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
660 661
			set_bit(cd->bit,
				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
662 663
			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
		}
664
	}
665 666
}

667 668 669 670 671 672 673 674 675 676 677 678
static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
{
	irqd_set_trigger_type(data, t);

	if (t & IRQ_TYPE_EDGE_BOTH)
		irq_set_handler_locked(data, handle_edge_irq);
	else
		irq_set_handler_locked(data, handle_level_irq);

	return IRQ_SET_MASK_OK;
}

679 680 681
static void octeon_irq_gpio_setup(struct irq_data *data)
{
	union cvmx_gpio_bit_cfgx cfg;
682
	struct octeon_ciu_chip_data *cd;
683 684
	u32 t = irqd_get_trigger_type(data);

685
	cd = irq_data_get_irq_chip_data(data);
686 687 688 689 690 691 692 693 694 695

	cfg.u64 = 0;
	cfg.s.int_en = 1;
	cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
	cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;

	/* 140 nS glitch filter*/
	cfg.s.fil_cnt = 7;
	cfg.s.fil_sel = 3;

696
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
}

static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu_enable_v2(data);
}

static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu_enable(data);
}

static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
{
	irqd_set_trigger_type(data, t);
	octeon_irq_gpio_setup(data);

716
	if (t & IRQ_TYPE_EDGE_BOTH)
717 718 719 720
		irq_set_handler_locked(data, handle_edge_irq);
	else
		irq_set_handler_locked(data, handle_level_irq);

721 722 723 724 725
	return IRQ_SET_MASK_OK;
}

static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
{
726
	struct octeon_ciu_chip_data *cd;
727

728 729
	cd = irq_data_get_irq_chip_data(data);
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
730 731 732 733 734 735

	octeon_irq_ciu_disable_all_v2(data);
}

static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
{
736
	struct octeon_ciu_chip_data *cd;
737

738 739
	cd = irq_data_get_irq_chip_data(data);
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
740 741 742 743 744 745

	octeon_irq_ciu_disable_all(data);
}

static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
{
746
	struct octeon_ciu_chip_data *cd;
747 748
	u64 mask;

749 750
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->gpio_line);
751 752 753 754

	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
}

755
#ifdef CONFIG_SMP
756 757 758 759 760

static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
{
	int cpu = smp_processor_id();
	cpumask_t new_affinity;
761
	struct cpumask *mask = irq_data_get_affinity_mask(data);
762

763
	if (!cpumask_test_cpu(cpu, mask))
764 765
		return;

766
	if (cpumask_weight(mask) > 1) {
767 768 769 770
		/*
		 * It has multi CPU affinity, just remove this CPU
		 * from the affinity set.
		 */
771
		cpumask_copy(&new_affinity, mask);
772 773 774 775 776 777
		cpumask_clear_cpu(cpu, &new_affinity);
	} else {
		/* Otherwise, put it on lowest numbered online CPU. */
		cpumask_clear(&new_affinity);
		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
	}
778
	irq_set_affinity_locked(data, &new_affinity, false);
779 780 781 782
}

static int octeon_irq_ciu_set_affinity(struct irq_data *data,
				       const struct cpumask *dest, bool force)
783 784
{
	int cpu;
785
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
786
	unsigned long flags;
787
	struct octeon_ciu_chip_data *cd;
788 789
	unsigned long *pen;
	raw_spinlock_t *lock;
790

791
	cd = irq_data_get_irq_chip_data(data);
792

793 794 795 796 797 798 799 800
	/*
	 * For non-v2 CIU, we will allow only single CPU affinity.
	 * This removes the need to do locking in the .ack/.eoi
	 * functions.
	 */
	if (cpumask_weight(dest) != 1)
		return -EINVAL;

801
	if (!enable_one)
802 803 804
		return 0;


805 806 807 808 809 810
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);

		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
		raw_spin_lock_irqsave(lock, flags);

811
		if (cd->line == 0)
812 813 814 815 816 817
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
		else
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);

		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = 0;
818
			__set_bit(cd->bit, pen);
819
		} else {
820
			__clear_bit(cd->bit, pen);
821
		}
822 823 824 825 826
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
827

828
		if (cd->line == 0)
829 830
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
831
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
832 833

		raw_spin_unlock_irqrestore(lock, flags);
834
	}
835
	return 0;
836
}
837 838 839 840 841

/*
 * Set affinity for the irq for chips that have the EN*_W1{S,C}
 * registers.
 */
842 843 844
static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
					  const struct cpumask *dest,
					  bool force)
845 846
{
	int cpu;
847
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
848
	u64 mask;
849
	struct octeon_ciu_chip_data *cd;
850

851
	if (!enable_one)
852 853
		return 0;

854 855
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;
856

857
	if (cd->line == 0) {
858 859 860 861
		for_each_online_cpu(cpu) {
			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
			int index = octeon_coreid_for_cpu(cpu) * 2;
			if (cpumask_test_cpu(cpu, dest) && enable_one) {
862
				enable_one = false;
863
				set_bit(cd->bit, pen);
864 865
				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
			} else {
866
				clear_bit(cd->bit, pen);
867 868 869 870 871 872 873 874
				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
			}
		}
	} else {
		for_each_online_cpu(cpu) {
			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
			if (cpumask_test_cpu(cpu, dest) && enable_one) {
875
				enable_one = false;
876
				set_bit(cd->bit, pen);
877 878
				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
			} else {
879
				clear_bit(cd->bit, pen);
880 881
				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
			}
882
		}
883 884 885
	}
	return 0;
}
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913

static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
					    const struct cpumask *dest,
					    bool force)
{
	int cpu;
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
	u64 mask;
	struct octeon_ciu_chip_data *cd;

	if (!enable_one)
		return 0;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;

	for_each_online_cpu(cpu) {
		int index = octeon_coreid_for_cpu(cpu);

		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = false;
			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
		} else {
			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
		}
	}
	return 0;
}
914 915
#endif

916 917 918 919 920 921 922 923 924 925
static unsigned int edge_startup(struct irq_data *data)
{
	/* ack any pending edge-irq at startup, so there is
	 * an _edge_ to fire on when the event reappears.
	 */
	data->chip->irq_ack(data);
	data->chip->irq_enable(data);
	return 0;
}

926 927 928
/*
 * Newer octeon chips have support for lockless CIU operation.
 */
929
static struct irq_chip octeon_irq_chip_ciu_v2 = {
930 931 932 933 934 935 936 937 938 939 940 941
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
942 943 944 945 946 947
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_ack = octeon_irq_ciu_ack,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
948
#ifdef CONFIG_SMP
949 950
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
951 952 953
#endif
};

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
/*
 * Newer octeon chips have support for lockless CIU operation.
 */
static struct irq_chip octeon_irq_chip_ciu_sum2 = {
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_sum2,
	.irq_disable = octeon_irq_ciu_disable_all_sum2,
	.irq_mask = octeon_irq_ciu_disable_local_sum2,
	.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_sum2,
	.irq_disable = octeon_irq_ciu_disable_all_sum2,
	.irq_ack = octeon_irq_ciu_ack_sum2,
	.irq_mask = octeon_irq_ciu_disable_local_sum2,
	.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

982
static struct irq_chip octeon_irq_chip_ciu = {
983 984 985 986 987 988 989 990 991 992 993 994
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_edge = {
995 996 997 998
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
	.irq_ack = octeon_irq_ciu_ack,
999 1000
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
1001 1002 1003 1004
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
1005 1006
};

1007 1008 1009 1010 1011 1012 1013 1014
/* The mbox versions don't do any affinity or round-robin. */
static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
	.name = "CIU-M",
	.irq_enable = octeon_irq_ciu_enable_all_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_ack = octeon_irq_ciu_disable_local_v2,
	.irq_eoi = octeon_irq_ciu_enable_local_v2,

1015 1016 1017
	.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
	.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
1018
};
1019

1020 1021 1022 1023
static struct irq_chip octeon_irq_chip_ciu_mbox = {
	.name = "CIU-M",
	.irq_enable = octeon_irq_ciu_enable_all,
	.irq_disable = octeon_irq_ciu_disable_all,
1024 1025
	.irq_ack = octeon_irq_ciu_disable_local,
	.irq_eoi = octeon_irq_ciu_enable_local,
1026

1027 1028 1029
	.irq_cpu_online = octeon_irq_ciu_enable_local,
	.irq_cpu_offline = octeon_irq_ciu_disable_local,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
1030 1031
};

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu_enable_gpio_v2,
	.irq_disable = octeon_irq_ciu_disable_gpio_v2,
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
1042
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1043 1044 1045 1046 1047 1048 1049 1050
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

static struct irq_chip octeon_irq_chip_ciu_gpio = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu_enable_gpio,
	.irq_disable = octeon_irq_ciu_disable_gpio,
1051 1052
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
1053 1054 1055 1056
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
1057
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1058 1059 1060 1061
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

1062 1063 1064 1065 1066
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
static void octeon_irq_ciu_wd_enable(struct irq_data *data)
1067 1068
{
	unsigned long flags;
1069 1070 1071
	unsigned long *pen;
	int coreid = data->irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
	int cpu = octeon_cpu_for_coreid(coreid);
1072
	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
1073

1074
	raw_spin_lock_irqsave(lock, flags);
1075
	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
1076 1077 1078 1079 1080 1081
	__set_bit(coreid, pen);
	/*
	 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
	 * the irq.
	 */
	wmb();
1082
	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
1083
	raw_spin_unlock_irqrestore(lock, flags);
1084 1085
}

1086 1087 1088 1089
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
1090
static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
1091
{
1092 1093
	int coreid = data->irq - OCTEON_IRQ_WDOG0;
	int cpu = octeon_cpu_for_coreid(coreid);
1094

1095 1096
	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
1097 1098
}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
	.name = "CIU-W",
	.irq_enable = octeon_irq_ciu1_wd_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_local_v2,
};

static struct irq_chip octeon_irq_chip_ciu_wd = {
	.name = "CIU-W",
	.irq_enable = octeon_irq_ciu_wd_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
1112 1113
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable_local,
1114 1115
};

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
{
	bool edge = false;

	if (line == 0)
		switch (bit) {
		case 48 ... 49: /* GMX DRP */
		case 50: /* IPD_DRP */
		case 52 ... 55: /* Timers */
		case 58: /* MPI */
			edge = true;
			break;
		default:
			break;
		}
	else /* line == 1 */
		switch (bit) {
		case 47: /* PTP */
			edge = true;
			break;
		default:
			break;
		}
	return edge;
}

struct octeon_irq_gpio_domain_data {
	unsigned int base_hwirq;
};

static int octeon_irq_gpio_xlat(struct irq_domain *d,
				struct device_node *node,
				const u32 *intspec,
				unsigned int intsize,
				unsigned long *out_hwirq,
				unsigned int *out_type)
{
	unsigned int type;
	unsigned int pin;
	unsigned int trigger;

1157
	if (irq_domain_get_of_node(d) != node)
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
		return -EINVAL;

	if (intsize < 2)
		return -EINVAL;

	pin = intspec[0];
	if (pin >= 16)
		return -EINVAL;

	trigger = intspec[1];

	switch (trigger) {
	case 1:
		type = IRQ_TYPE_EDGE_RISING;
		break;
	case 2:
		type = IRQ_TYPE_EDGE_FALLING;
		break;
	case 4:
		type = IRQ_TYPE_LEVEL_HIGH;
		break;
	case 8:
		type = IRQ_TYPE_LEVEL_LOW;
		break;
	default:
		pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
		       node->name,
		       trigger);
		type = IRQ_TYPE_LEVEL_LOW;
		break;
	}
	*out_type = type;
1190
	*out_hwirq = pin;
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202

	return 0;
}

static int octeon_irq_ciu_xlat(struct irq_domain *d,
			       struct device_node *node,
			       const u32 *intspec,
			       unsigned int intsize,
			       unsigned long *out_hwirq,
			       unsigned int *out_type)
{
	unsigned int ciu, bit;
1203
	struct octeon_irq_ciu_domain_data *dd = d->host_data;
1204 1205 1206 1207

	ciu = intspec[0];
	bit = intspec[1];

1208
	if (ciu >= dd->num_sum || bit > 63)
1209 1210 1211 1212 1213 1214 1215 1216 1217
		return -EINVAL;

	*out_hwirq = (ciu << 6) | bit;
	*out_type = 0;

	return 0;
}

static struct irq_chip *octeon_irq_ciu_chip;
1218
static struct irq_chip *octeon_irq_ciu_chip_edge;
1219 1220 1221 1222 1223
static struct irq_chip *octeon_irq_gpio_chip;

static int octeon_irq_ciu_map(struct irq_domain *d,
			      unsigned int virq, irq_hw_number_t hw)
{
1224
	int rv;
1225 1226
	unsigned int line = hw >> 6;
	unsigned int bit = hw & 63;
1227
	struct octeon_irq_ciu_domain_data *dd = d->host_data;
1228

1229
	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
1230 1231
		return -EINVAL;

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	if (line == 2) {
		if (octeon_irq_ciu_is_edge(line, bit))
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				&octeon_irq_chip_ciu_sum2_edge,
				handle_edge_irq);
		else
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				&octeon_irq_chip_ciu_sum2,
				handle_level_irq);
	} else {
		if (octeon_irq_ciu_is_edge(line, bit))
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				octeon_irq_ciu_chip_edge,
				handle_edge_irq);
		else
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				octeon_irq_ciu_chip,
				handle_level_irq);
	}
	return rv;
1252 1253
}

1254 1255
static int octeon_irq_gpio_map(struct irq_domain *d,
			       unsigned int virq, irq_hw_number_t hw)
1256
{
1257 1258
	struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
	unsigned int line, bit;
1259
	int r;
1260

1261 1262
	line = (hw + gpiod->base_hwirq) >> 6;
	bit = (hw + gpiod->base_hwirq) & 63;
1263
	if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
1264
		octeon_irq_ciu_to_irq[line][bit] != 0)
1265 1266
		return -EINVAL;

1267 1268 1269 1270 1271
	/*
	 * Default to handle_level_irq. If the DT contains a different
	 * trigger type, it will call the irq_set_type callback and
	 * the handler gets updated.
	 */
1272
	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1273
				       octeon_irq_gpio_chip, handle_level_irq);
1274
	return r;
1275 1276
}

1277 1278
static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
	.map = octeon_irq_ciu_map,
1279
	.unmap = octeon_irq_free_cd,
1280 1281 1282 1283 1284
	.xlate = octeon_irq_ciu_xlat,
};

static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
	.map = octeon_irq_gpio_map,
1285
	.unmap = octeon_irq_free_cd,
1286 1287 1288
	.xlate = octeon_irq_gpio_xlat,
};

1289
static void octeon_irq_ip2_ciu(void)
1290
{
1291 1292 1293
	const unsigned long core_id = cvmx_get_core_num();
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));

1294
	ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
1295 1296 1297 1298 1299 1300 1301 1302 1303
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[0][bit];
		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
1304
	}
1305 1306
}

1307
static void octeon_irq_ip3_ciu(void)
D
David Daney 已提交
1308
{
1309 1310
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);

1311
	ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[1][bit];
		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
	}
D
David Daney 已提交
1322 1323
}

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
static void octeon_irq_ip4_ciu(void)
{
	int coreid = cvmx_get_core_num();
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
	u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));

	ciu_sum &= ciu_en;
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[2][bit];

		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
	}
}

1344 1345
static bool octeon_irq_use_ip4;

1346
static void octeon_irq_local_enable_ip4(void *arg)
1347 1348 1349 1350
{
	set_c0_status(STATUSF_IP4);
}

1351
static void octeon_irq_ip4_mask(void)
1352
{
1353 1354
	clear_c0_status(STATUSF_IP4);
	spurious_interrupt();
1355 1356
}

1357 1358 1359
static void (*octeon_irq_ip2)(void);
static void (*octeon_irq_ip3)(void);
static void (*octeon_irq_ip4)(void);
1360

1361
void (*octeon_irq_setup_secondary)(void);
1362

1363
void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1364 1365 1366 1367 1368 1369
{
	octeon_irq_ip4 = h;
	octeon_irq_use_ip4 = true;
	on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
}

1370
static void octeon_irq_percpu_enable(void)
1371 1372 1373 1374
{
	irq_cpu_online();
}

1375
static void octeon_irq_init_ciu_percpu(void)
1376 1377
{
	int coreid = cvmx_get_core_num();
1378 1379


1380 1381
	__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
	__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
1382
	wmb();
1383
	raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
1384
	/*
1385 1386 1387
	 * Disable All CIU Interrupts. The ones we need will be
	 * enabled later.  Read the SUM register so we know the write
	 * completed.
1388
	 */
1389 1390 1391 1392 1393
	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1394
}
1395

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
static void octeon_irq_init_ciu2_percpu(void)
{
	u64 regx, ipx;
	int coreid = cvmx_get_core_num();
	u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);

	/*
	 * Disable All CIU2 Interrupts. The ones we need will be
	 * enabled later.  Read the SUM register so we know the write
	 * completed.
	 *
	 * There are 9 registers and 3 IPX levels with strides 0x1000
	 * and 0x200 respectivly.  Use loops to clear them.
	 */
	for (regx = 0; regx <= 0x8000; regx += 0x1000) {
		for (ipx = 0; ipx <= 0x400; ipx += 0x200)
			cvmx_write_csr(base + regx + ipx, 0);
	}

	cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
}

1418
static void octeon_irq_setup_secondary_ciu(void)
1419
{
1420 1421
	octeon_irq_init_ciu_percpu();
	octeon_irq_percpu_enable();
1422

1423 1424
	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1425 1426 1427 1428
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);
1429
}
1430

1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
static void octeon_irq_setup_secondary_ciu2(void)
{
	octeon_irq_init_ciu2_percpu();
	octeon_irq_percpu_enable();

	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);
}

1444 1445
static int __init octeon_irq_init_ciu(
	struct device_node *ciu_node, struct device_node *parent)
1446
{
1447
	unsigned int i, r;
1448
	struct irq_chip *chip;
1449
	struct irq_chip *chip_edge;
1450 1451
	struct irq_chip *chip_mbox;
	struct irq_chip *chip_wd;
1452
	struct irq_domain *ciu_domain = NULL;
1453 1454 1455 1456 1457
	struct octeon_irq_ciu_domain_data *dd;

	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
	if (!dd)
		return -ENOMEM;
1458 1459 1460

	octeon_irq_init_ciu_percpu();
	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1461

1462 1463
	octeon_irq_ip2 = octeon_irq_ip2_ciu;
	octeon_irq_ip3 = octeon_irq_ip3_ciu;
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
		&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
		octeon_irq_ip4 =  octeon_irq_ip4_ciu;
		dd->num_sum = 3;
		octeon_irq_use_ip4 = true;
	} else {
		octeon_irq_ip4 = octeon_irq_ip4_mask;
		dd->num_sum = 2;
		octeon_irq_use_ip4 = false;
	}
1474 1475 1476
	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1477
	    OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1478
		chip = &octeon_irq_chip_ciu_v2;
1479
		chip_edge = &octeon_irq_chip_ciu_v2_edge;
1480 1481
		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
		chip_wd = &octeon_irq_chip_ciu_wd_v2;
1482
		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1483 1484
	} else {
		chip = &octeon_irq_chip_ciu;
1485
		chip_edge = &octeon_irq_chip_ciu_edge;
1486 1487
		chip_mbox = &octeon_irq_chip_ciu_mbox;
		chip_wd = &octeon_irq_chip_ciu_wd;
1488
		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1489
	}
1490
	octeon_irq_ciu_chip = chip;
1491
	octeon_irq_ciu_chip_edge = chip_edge;
1492 1493 1494 1495

	/* Mips internal */
	octeon_irq_init_core();

1496 1497 1498
	ciu_domain = irq_domain_add_tree(
		ciu_node, &octeon_irq_domain_ciu_ops, dd);
	irq_set_default_host(ciu_domain);
1499 1500

	/* CIU_0 */
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	for (i = 0; i < 16; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
		if (r)
			goto err;
	}

	r = octeon_irq_set_ciu_mapping(
		OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
	if (r)
		goto err;
	r = octeon_irq_set_ciu_mapping(
		OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
	if (r)
		goto err;

	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
		if (r)
			goto err;
	}
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
		if (r)
			goto err;
	}
1529

1530 1531 1532
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
	if (r)
		goto err;
1533

1534 1535 1536
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
	if (r)
		goto err;
1537

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
		if (r)
			goto err;
	}

	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
	if (r)
		goto err;
1548 1549

	/* CIU_1 */
1550 1551 1552 1553 1554 1555 1556
	for (i = 0; i < 16; i++) {
		r = octeon_irq_set_ciu_mapping(
			i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
			handle_level_irq);
		if (r)
			goto err;
	}
1557

1558 1559
	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1560 1561 1562 1563 1564 1565 1566 1567
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);

	return 0;
err:
	return r;
1568
}
1569

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
static int __init octeon_irq_init_gpio(
	struct device_node *gpio_node, struct device_node *parent)
{
	struct octeon_irq_gpio_domain_data *gpiod;
	u32 interrupt_cells;
	unsigned int base_hwirq;
	int r;

	r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
	if (r)
		return r;

	if (interrupt_cells == 1) {
		u32 v;

		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		base_hwirq = v;
	} else if (interrupt_cells == 2) {
		u32 v0, v1;

		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		base_hwirq = (v0 << 6) | v1;
	} else {
		pr_warn("Bad \"#interrupt-cells\" property: %u\n",
			interrupt_cells);
		return -EINVAL;
	}

	gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
	if (gpiod) {
		/* gpio domain host_data is the base hwirq number. */
		gpiod->base_hwirq = base_hwirq;
		irq_domain_add_linear(
			gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
	} else {
		pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
		return -ENOMEM;
	}

1622 1623 1624 1625 1626 1627
	/*
	 * Clear the OF_POPULATED flag that was set by of_irq_init()
	 * so that all GPIO devices will be probed.
	 */
	of_node_clear_flag(gpio_node, OF_POPULATED);

1628 1629
	return 0;
}
1630 1631 1632 1633 1634 1635 1636 1637 1638
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = data->irq - OCTEON_IRQ_WDOG0;
1639
	struct octeon_ciu_chip_data *cd;
1640

1641 1642
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1643

1644 1645
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_enable(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int cpu = next_cpu_for_irq(data);
	int coreid = octeon_coreid_for_cpu(cpu);
1656
	struct octeon_ciu_chip_data *cd;
1657

1658 1659
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1660

1661 1662
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1663 1664 1665 1666 1667 1668 1669 1670
	cvmx_write_csr(en_addr, mask);
}

static void octeon_irq_ciu2_enable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1671
	struct octeon_ciu_chip_data *cd;
1672

1673 1674
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1675

1676 1677
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1678 1679 1680 1681 1682 1683 1684 1685 1686
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_disable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1687
	struct octeon_ciu_chip_data *cd;
1688

1689 1690
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1691

1692 1693
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
		(0x1000ull * cd->line);
1694 1695 1696 1697 1698 1699 1700 1701 1702
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_ack(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1703
	struct octeon_ciu_chip_data *cd;
1704

1705 1706
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1707

1708
	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
1709 1710 1711 1712 1713 1714 1715 1716
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_disable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;
1717
	struct octeon_ciu_chip_data *cd;
1718

1719 1720
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1721 1722

	for_each_online_cpu(cpu) {
1723 1724
		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);

	for_each_online_cpu(cpu) {
1737 1738
		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
			octeon_coreid_for_cpu(cpu));
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);

	for_each_online_cpu(cpu) {
1751 1752
		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
			octeon_coreid_for_cpu(cpu));
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
	cvmx_write_csr(en_addr, mask);
}

static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
	cvmx_write_csr(en_addr, mask);
}

#ifdef CONFIG_SMP
static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
					const struct cpumask *dest, bool force)
{
	int cpu;
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
	u64 mask;
1786
	struct octeon_ciu_chip_data *cd;
1787 1788 1789 1790

	if (!enable_one)
		return 0;

1791 1792
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;
1793 1794 1795 1796 1797

	for_each_online_cpu(cpu) {
		u64 en_addr;
		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = false;
1798 1799 1800
			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
				octeon_coreid_for_cpu(cpu)) +
				(0x1000ull * cd->line);
1801
		} else {
1802 1803 1804
			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
				octeon_coreid_for_cpu(cpu)) +
				(0x1000ull * cd->line);
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
		}
		cvmx_write_csr(en_addr, mask);
	}

	return 0;
}
#endif

static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu2_enable(data);
}

static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
{
1821
	struct octeon_ciu_chip_data *cd;
1822

1823 1824 1825
	cd = irq_data_get_irq_chip_data(data);

	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
1826 1827 1828 1829 1830

	octeon_irq_ciu2_disable_all(data);
}

static struct irq_chip octeon_irq_chip_ciu2 = {
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
	.name = "CIU2-E",
	.irq_enable = octeon_irq_ciu2_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu2_edge = {
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
	.name = "CIU2-E",
	.irq_enable = octeon_irq_ciu2_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_ack = octeon_irq_ciu2_ack,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu2_mbox = {
	.name = "CIU2-M",
	.irq_enable = octeon_irq_ciu2_mbox_enable_all,
	.irq_disable = octeon_irq_ciu2_mbox_disable_all,
	.irq_ack = octeon_irq_ciu2_mbox_disable_local,
	.irq_eoi = octeon_irq_ciu2_mbox_enable_local,

	.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
	.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
};

static struct irq_chip octeon_irq_chip_ciu2_wd = {
	.name = "CIU2-W",
	.irq_enable = octeon_irq_ciu2_wd_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable_local,
};

static struct irq_chip octeon_irq_chip_ciu2_gpio = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu2_enable_gpio,
	.irq_disable = octeon_irq_ciu2_disable_gpio,
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

static int octeon_irq_ciu2_xlat(struct irq_domain *d,
				struct device_node *node,
				const u32 *intspec,
				unsigned int intsize,
				unsigned long *out_hwirq,
				unsigned int *out_type)
{
	unsigned int ciu, bit;

	ciu = intspec[0];
	bit = intspec[1];

	*out_hwirq = (ciu << 6) | bit;
	*out_type = 0;

	return 0;
}

static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
{
	bool edge = false;

	if (line == 3) /* MIO */
		switch (bit) {
R
Ralf Baechle 已提交
1914
		case 2:	 /* IPD_DRP */
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
		case 8 ... 11: /* Timers */
		case 48: /* PTP */
			edge = true;
			break;
		default:
			break;
		}
	else if (line == 6) /* PKT */
		switch (bit) {
		case 52 ... 53: /* ILK_DRP */
R
Ralf Baechle 已提交
1925
		case 8 ... 12:	/* GMX_DRP */
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
			edge = true;
			break;
		default:
			break;
		}
	return edge;
}

static int octeon_irq_ciu2_map(struct irq_domain *d,
			       unsigned int virq, irq_hw_number_t hw)
{
	unsigned int line = hw >> 6;
	unsigned int bit = hw & 63;

1940 1941 1942 1943 1944 1945 1946 1947
	/*
	 * Don't map irq if it is reserved for GPIO.
	 * (Line 7 are the GPIO lines.)
	 */
	if (line == 7)
		return 0;

	if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1948 1949 1950 1951
		return -EINVAL;

	if (octeon_irq_ciu2_is_edge(line, bit))
		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1952
					   &octeon_irq_chip_ciu2_edge,
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
					   handle_edge_irq);
	else
		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
					   &octeon_irq_chip_ciu2,
					   handle_level_irq);

	return 0;
}

static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
	.map = octeon_irq_ciu2_map,
1964
	.unmap = octeon_irq_free_cd,
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
	.xlate = octeon_irq_ciu2_xlat,
};

static void octeon_irq_ciu2(void)
{
	int line;
	int bit;
	int irq;
	u64 src_reg, src, sum;
	const unsigned long core_id = cvmx_get_core_num();

	sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;

	if (unlikely(!sum))
		goto spurious;

	line = fls64(sum) - 1;
	src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
	src = cvmx_read_csr(src_reg);

	if (unlikely(!src))
		goto spurious;

	bit = fls64(src) - 1;
	irq = octeon_irq_ciu_to_irq[line][bit];
	if (unlikely(!irq))
		goto spurious;

	do_IRQ(irq);
	goto out;

spurious:
	spurious_interrupt();
out:
	/* CN68XX pass 1.x has an errata that accessing the ACK registers
		can stop interrupts from propagating */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
	else
		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
	return;
}

static void octeon_irq_ciu2_mbox(void)
{
	int line;

	const unsigned long core_id = cvmx_get_core_num();
	u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;

	if (unlikely(!sum))
		goto spurious;

	line = fls64(sum) - 1;

	do_IRQ(OCTEON_IRQ_MBOX0 + line);
	goto out;

spurious:
	spurious_interrupt();
out:
	/* CN68XX pass 1.x has an errata that accessing the ACK registers
		can stop interrupts from propagating */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
	else
		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
	return;
}

2035 2036
static int __init octeon_irq_init_ciu2(
	struct device_node *ciu_node, struct device_node *parent)
2037
{
2038
	unsigned int i, r;
2039 2040 2041 2042 2043
	struct irq_domain *ciu_domain = NULL;

	octeon_irq_init_ciu2_percpu();
	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;

2044
	octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
2045 2046 2047 2048 2049 2050 2051
	octeon_irq_ip2 = octeon_irq_ciu2;
	octeon_irq_ip3 = octeon_irq_ciu2_mbox;
	octeon_irq_ip4 = octeon_irq_ip4_mask;

	/* Mips internal */
	octeon_irq_init_core();

2052 2053 2054
	ciu_domain = irq_domain_add_tree(
		ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
	irq_set_default_host(ciu_domain);
2055 2056

	/* CUI2 */
2057 2058 2059 2060 2061 2062
	for (i = 0; i < 64; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
		if (r)
			goto err;
	}
2063

2064 2065 2066 2067 2068 2069
	for (i = 0; i < 32; i++) {
		r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
			&octeon_irq_chip_ciu2_wd, handle_level_irq);
		if (r)
			goto err;
	}
2070

2071 2072 2073 2074 2075 2076
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
		if (r)
			goto err;
	}
2077

2078 2079 2080 2081 2082 2083
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
		if (r)
			goto err;
	}
2084

2085 2086 2087 2088 2089 2090
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
		if (r)
			goto err;
	}
2091 2092 2093 2094 2095 2096 2097 2098 2099

	irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);

	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
	clear_c0_status(STATUSF_IP4);
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
	return 0;
err:
	return r;
}

struct octeon_irq_cib_host_data {
	raw_spinlock_t lock;
	u64 raw_reg;
	u64 en_reg;
	int max_bits;
};

struct octeon_irq_cib_chip_data {
	struct octeon_irq_cib_host_data *host_data;
	int bit;
};

static void octeon_irq_cib_enable(struct irq_data *data)
{
	unsigned long flags;
	u64 en;
	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
	struct octeon_irq_cib_host_data *host_data = cd->host_data;

	raw_spin_lock_irqsave(&host_data->lock, flags);
	en = cvmx_read_csr(host_data->en_reg);
	en |= 1ull << cd->bit;
	cvmx_write_csr(host_data->en_reg, en);
	raw_spin_unlock_irqrestore(&host_data->lock, flags);
2129 2130
}

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
static void octeon_irq_cib_disable(struct irq_data *data)
{
	unsigned long flags;
	u64 en;
	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
	struct octeon_irq_cib_host_data *host_data = cd->host_data;

	raw_spin_lock_irqsave(&host_data->lock, flags);
	en = cvmx_read_csr(host_data->en_reg);
	en &= ~(1ull << cd->bit);
	cvmx_write_csr(host_data->en_reg, en);
	raw_spin_unlock_irqrestore(&host_data->lock, flags);
}

static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
{
	irqd_set_trigger_type(data, t);
	return IRQ_SET_MASK_OK;
}

static struct irq_chip octeon_irq_chip_cib = {
	.name = "CIB",
	.irq_enable = octeon_irq_cib_enable,
	.irq_disable = octeon_irq_cib_disable,
	.irq_mask = octeon_irq_cib_disable,
	.irq_unmask = octeon_irq_cib_enable,
	.irq_set_type = octeon_irq_cib_set_type,
};

static int octeon_irq_cib_xlat(struct irq_domain *d,
				   struct device_node *node,
				   const u32 *intspec,
				   unsigned int intsize,
				   unsigned long *out_hwirq,
				   unsigned int *out_type)
{
	unsigned int type = 0;

	if (intsize == 2)
		type = intspec[1];

	switch (type) {
	case 0: /* unofficial value, but we might as well let it work. */
	case 4: /* official value for level triggering. */
		*out_type = IRQ_TYPE_LEVEL_HIGH;
		break;
	case 1: /* official value for edge triggering. */
		*out_type = IRQ_TYPE_EDGE_RISING;
		break;
	default: /* Nothing else is acceptable. */
		return -EINVAL;
	}

	*out_hwirq = intspec[0];

	return 0;
}

static int octeon_irq_cib_map(struct irq_domain *d,
			      unsigned int virq, irq_hw_number_t hw)
{
	struct octeon_irq_cib_host_data *host_data = d->host_data;
	struct octeon_irq_cib_chip_data *cd;

	if (hw >= host_data->max_bits) {
		pr_err("ERROR: %s mapping %u is to big!\n",
2197
		       irq_domain_get_of_node(d)->name, (unsigned)hw);
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
		return -EINVAL;
	}

	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
	cd->host_data = host_data;
	cd->bit = hw;

	irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
				 handle_simple_irq);
	irq_set_chip_data(virq, cd);
	return 0;
}

static struct irq_domain_ops octeon_irq_domain_cib_ops = {
	.map = octeon_irq_cib_map,
	.unmap = octeon_irq_free_cd,
	.xlate = octeon_irq_cib_xlat,
};

/* Chain to real handler. */
static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
{
	u64 en;
	u64 raw;
	u64 bits;
	int i;
	int irq;
	struct irq_domain *cib_domain = data;
	struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;

	en = cvmx_read_csr(host_data->en_reg);
	raw = cvmx_read_csr(host_data->raw_reg);

	bits = en & raw;

	for (i = 0; i < host_data->max_bits; i++) {
		if ((bits & 1ull << i) == 0)
			continue;
		irq = irq_find_mapping(cib_domain, i);
		if (!irq) {
			unsigned long flags;

			pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
				i, host_data->raw_reg);
			raw_spin_lock_irqsave(&host_data->lock, flags);
			en = cvmx_read_csr(host_data->en_reg);
			en &= ~(1ull << i);
			cvmx_write_csr(host_data->en_reg, en);
			cvmx_write_csr(host_data->raw_reg, 1ull << i);
			raw_spin_unlock_irqrestore(&host_data->lock, flags);
		} else {
			struct irq_desc *desc = irq_to_desc(irq);
			struct irq_data *irq_data = irq_desc_get_irq_data(desc);
			/* If edge, acknowledge the bit we will be sending. */
			if (irqd_get_trigger_type(irq_data) &
				IRQ_TYPE_EDGE_BOTH)
				cvmx_write_csr(host_data->raw_reg, 1ull << i);
2255
			generic_handle_irq_desc(desc);
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
		}
	}

	return IRQ_HANDLED;
}

static int __init octeon_irq_init_cib(struct device_node *ciu_node,
				      struct device_node *parent)
{
	const __be32 *addr;
	u32 val;
	struct octeon_irq_cib_host_data *host_data;
	int parent_irq;
	int r;
	struct irq_domain *cib_domain;

	parent_irq = irq_of_parse_and_map(ciu_node, 0);
	if (!parent_irq) {
		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
			ciu_node->name);
		return -EINVAL;
	}

	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
	raw_spin_lock_init(&host_data->lock);

	addr = of_get_address(ciu_node, 0, NULL, NULL);
	if (!addr) {
		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
		return -EINVAL;
	}
	host_data->raw_reg = (u64)phys_to_virt(
		of_translate_address(ciu_node, addr));

	addr = of_get_address(ciu_node, 1, NULL, NULL);
	if (!addr) {
		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
		return -EINVAL;
	}
	host_data->en_reg = (u64)phys_to_virt(
		of_translate_address(ciu_node, addr));

	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
	if (r) {
		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
			ciu_node->name);
		return r;
	}
	host_data->max_bits = val;

	cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
					   &octeon_irq_domain_cib_ops,
					   host_data);
	if (!cib_domain) {
		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
		return -ENOMEM;
	}

	cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
	cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */

	r = request_irq(parent_irq, octeon_irq_cib_handler,
			IRQF_NO_THREAD, "cib", cib_domain);
	if (r) {
		pr_err("request_irq cib failed %d\n", r);
		return r;
	}
	pr_info("CIB interrupt controller probed: %llx %d\n",
		host_data->raw_reg, host_data->max_bits);
	return 0;
}

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
int octeon_irq_ciu3_xlat(struct irq_domain *d,
			 struct device_node *node,
			 const u32 *intspec,
			 unsigned int intsize,
			 unsigned long *out_hwirq,
			 unsigned int *out_type)
{
	struct octeon_ciu3_info *ciu3_info = d->host_data;
	unsigned int hwirq, type, intsn_major;
	union cvmx_ciu3_iscx_ctl isc;

	if (intsize < 2)
		return -EINVAL;
	hwirq = intspec[0];
	type = intspec[1];

	if (hwirq >= (1 << 20))
		return -EINVAL;

	intsn_major = hwirq >> 12;
	switch (intsn_major) {
	case 0x04: /* Software handled separately. */
		return -EINVAL;
	default:
		break;
	}

	isc.u64 =  cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
	if (!isc.s.imp)
		return -EINVAL;

	switch (type) {
	case 4: /* official value for level triggering. */
		*out_type = IRQ_TYPE_LEVEL_HIGH;
		break;
	case 0: /* unofficial value, but we might as well let it work. */
	case 1: /* official value for edge triggering. */
		*out_type = IRQ_TYPE_EDGE_RISING;
		break;
	default: /* Nothing else is acceptable. */
		return -EINVAL;
	}

	*out_hwirq = hwirq;

	return 0;
}

void octeon_irq_ciu3_enable(struct irq_data *data)
{
	int cpu;
	union cvmx_ciu3_iscx_ctl isc_ctl;
	union cvmx_ciu3_iscx_w1c isc_w1c;
	u64 isc_ctl_addr;

	struct octeon_ciu_chip_data *cd;

	cpu = next_cpu_for_irq(data);

	cd = irq_data_get_irq_chip_data(data);

	isc_w1c.u64 = 0;
	isc_w1c.s.en = 1;
	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);

	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
	isc_ctl.u64 = 0;
	isc_ctl.s.en = 1;
	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
	cvmx_read_csr(isc_ctl_addr);
}

void octeon_irq_ciu3_disable(struct irq_data *data)
{
	u64 isc_ctl_addr;
	union cvmx_ciu3_iscx_w1c isc_w1c;

	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);

	isc_w1c.u64 = 0;
	isc_w1c.s.en = 1;

	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
	cvmx_write_csr(isc_ctl_addr, 0);
	cvmx_read_csr(isc_ctl_addr);
}

void octeon_irq_ciu3_ack(struct irq_data *data)
{
	u64 isc_w1c_addr;
	union cvmx_ciu3_iscx_w1c isc_w1c;
	struct octeon_ciu_chip_data *cd;
	u32 trigger_type = irqd_get_trigger_type(data);

	/*
	 * We use a single irq_chip, so we have to do nothing to ack a
	 * level interrupt.
	 */
	if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
		return;

	cd = irq_data_get_irq_chip_data(data);

	isc_w1c.u64 = 0;
	isc_w1c.s.raw = 1;

	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
	cvmx_read_csr(isc_w1c_addr);
}

void octeon_irq_ciu3_mask(struct irq_data *data)
{
	union cvmx_ciu3_iscx_w1c isc_w1c;
	u64 isc_w1c_addr;
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);

	isc_w1c.u64 = 0;
	isc_w1c.s.en = 1;

	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
	cvmx_read_csr(isc_w1c_addr);
}

void octeon_irq_ciu3_mask_ack(struct irq_data *data)
{
	union cvmx_ciu3_iscx_w1c isc_w1c;
	u64 isc_w1c_addr;
	struct octeon_ciu_chip_data *cd;
	u32 trigger_type = irqd_get_trigger_type(data);

	cd = irq_data_get_irq_chip_data(data);

	isc_w1c.u64 = 0;
	isc_w1c.s.en = 1;

	/*
	 * We use a single irq_chip, so only ack an edge (!level)
	 * interrupt.
	 */
	if (trigger_type & IRQ_TYPE_EDGE_BOTH)
		isc_w1c.s.raw = 1;

	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
	cvmx_read_csr(isc_w1c_addr);
}

#ifdef CONFIG_SMP
int octeon_irq_ciu3_set_affinity(struct irq_data *data,
				 const struct cpumask *dest, bool force)
{
	union cvmx_ciu3_iscx_ctl isc_ctl;
	union cvmx_ciu3_iscx_w1c isc_w1c;
	u64 isc_ctl_addr;
	int cpu;
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);

	if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
		return -EINVAL;

	if (!enable_one)
		return IRQ_SET_MASK_OK;

	cd = irq_data_get_irq_chip_data(data);
	cpu = cpumask_first(dest);
	if (cpu >= nr_cpu_ids)
		cpu = smp_processor_id();
	cd->current_cpu = cpu;

	isc_w1c.u64 = 0;
	isc_w1c.s.en = 1;
	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);

	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
	isc_ctl.u64 = 0;
	isc_ctl.s.en = 1;
	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
	cvmx_read_csr(isc_ctl_addr);

	return IRQ_SET_MASK_OK;
}
#endif

static struct irq_chip octeon_irq_chip_ciu3 = {
	.name = "CIU3",
	.irq_startup = edge_startup,
	.irq_enable = octeon_irq_ciu3_enable,
	.irq_disable = octeon_irq_ciu3_disable,
	.irq_ack = octeon_irq_ciu3_ack,
	.irq_mask = octeon_irq_ciu3_mask,
	.irq_mask_ack = octeon_irq_ciu3_mask_ack,
	.irq_unmask = octeon_irq_ciu3_enable,
	.irq_set_type = octeon_irq_ciu_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu3_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
			 irq_hw_number_t hw, struct irq_chip *chip)
{
	struct octeon_ciu3_info *ciu3_info = d->host_data;
	struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
						       ciu3_info->node);
	if (!cd)
		return -ENOMEM;
	cd->intsn = hw;
	cd->current_cpu = -1;
	cd->ciu3_addr = ciu3_info->ciu3_addr;
	cd->ciu_node = ciu3_info->node;
	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
	irq_set_chip_data(virq, cd);

	return 0;
}

static int octeon_irq_ciu3_map(struct irq_domain *d,
			       unsigned int virq, irq_hw_number_t hw)
{
	return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
}

static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
	.map = octeon_irq_ciu3_map,
	.unmap = octeon_irq_free_cd,
	.xlate = octeon_irq_ciu3_xlat,
};

static void octeon_irq_ciu3_ip2(void)
{
	union cvmx_ciu3_destx_pp_int dest_pp_int;
	struct octeon_ciu3_info *ciu3_info;
	u64 ciu3_addr;

	ciu3_info = __this_cpu_read(octeon_ciu3_info);
	ciu3_addr = ciu3_info->ciu3_addr;

	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));

	if (likely(dest_pp_int.s.intr)) {
		irq_hw_number_t intsn = dest_pp_int.s.intsn;
		irq_hw_number_t hw;
		struct irq_domain *domain;
		/* Get the domain to use from the major block */
		int block = intsn >> 12;
		int ret;

		domain = ciu3_info->domain[block];
		if (ciu3_info->intsn2hw[block])
			hw = ciu3_info->intsn2hw[block](domain, intsn);
		else
			hw = intsn;

		ret = handle_domain_irq(domain, hw, NULL);
		if (ret < 0) {
			union cvmx_ciu3_iscx_w1c isc_w1c;
			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);

			isc_w1c.u64 = 0;
			isc_w1c.s.en = 1;
			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
			cvmx_read_csr(isc_w1c_addr);
			spurious_interrupt();
		}
	} else {
		spurious_interrupt();
	}
}

/*
 * 10 mbox per core starting from zero.
 * Base mbox is core * 10
 */
static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
{
	/* SW (mbox) are 0x04 in bits 12..19 */
	return 0x04000 + CIU3_MBOX_PER_CORE * core;
}

static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
{
	return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
}

static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
{
	int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;

	return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
}

static void octeon_irq_ciu3_mbox(void)
{
	union cvmx_ciu3_destx_pp_int dest_pp_int;
	struct octeon_ciu3_info *ciu3_info;
	u64 ciu3_addr;
	int core = cvmx_get_local_core_num();

	ciu3_info = __this_cpu_read(octeon_ciu3_info);
	ciu3_addr = ciu3_info->ciu3_addr;

	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));

	if (likely(dest_pp_int.s.intr)) {
		irq_hw_number_t intsn = dest_pp_int.s.intsn;
		int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);

		if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
			do_IRQ(mbox + OCTEON_IRQ_MBOX0);
		} else {
			union cvmx_ciu3_iscx_w1c isc_w1c;
			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);

			isc_w1c.u64 = 0;
			isc_w1c.s.en = 1;
			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
			cvmx_read_csr(isc_w1c_addr);
			spurious_interrupt();
		}
	} else {
		spurious_interrupt();
	}
}

void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
{
	struct octeon_ciu3_info *ciu3_info;
	unsigned int intsn;
	union cvmx_ciu3_iscx_w1s isc_w1s;
	u64 isc_w1s_addr;

	if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
		return;

	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
	isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);

	isc_w1s.u64 = 0;
	isc_w1s.s.raw = 1;

	cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
	cvmx_read_csr(isc_w1s_addr);
}

static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
{
	struct octeon_ciu3_info *ciu3_info;
	unsigned int intsn;
	u64 isc_ctl_addr, isc_w1c_addr;
	union cvmx_ciu3_iscx_ctl isc_ctl;
	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;

	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
	isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);

	isc_ctl.u64 = 0;
	isc_ctl.s.en = 1;

	cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
	cvmx_write_csr(isc_ctl_addr, 0);
	if (en) {
		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);

		isc_ctl.u64 = 0;
		isc_ctl.s.en = 1;
		isc_ctl.s.idt = idt;
		cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
	}
	cvmx_read_csr(isc_ctl_addr);
}

static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
{
	int cpu;
	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;

	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);

	for_each_online_cpu(cpu)
		octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
}

static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
{
	int cpu;
	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;

	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);

	for_each_online_cpu(cpu)
		octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
}

static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
{
	struct octeon_ciu3_info *ciu3_info;
	unsigned int intsn;
	u64 isc_w1c_addr;
	union cvmx_ciu3_iscx_w1c isc_w1c;
	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;

	intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);

	isc_w1c.u64 = 0;
	isc_w1c.s.raw = 1;

	ciu3_info = __this_cpu_read(octeon_ciu3_info);
	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
	cvmx_read_csr(isc_w1c_addr);
}

static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
{
	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
}

static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
{
	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
}

static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
{
	u64 b = ciu3_info->ciu3_addr;
	int idt_ip2, idt_ip3, idt_ip4;
	int unused_idt2;
	int core = cvmx_get_local_core_num();
	int i;

	__this_cpu_write(octeon_ciu3_info, ciu3_info);

	/*
	 * 4 idt per core starting from 1 because zero is reserved.
	 * Base idt per core is 4 * core + 1
	 */
	idt_ip2 = core * 4 + 1;
	idt_ip3 = core * 4 + 2;
	idt_ip4 = core * 4 + 3;
	unused_idt2 = core * 4 + 4;
	__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
	__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);

	/* ip2 interrupts for this CPU */
	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);

	/* ip3 interrupts for this CPU */
	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);

	/* ip4 interrupts for this CPU */
	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);

	cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
	cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
	cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);

	for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
		unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);

		cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
		cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
	}

	return 0;
}

static void octeon_irq_setup_secondary_ciu3(void)
{
	struct octeon_ciu3_info *ciu3_info;

	ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
	octeon_irq_ciu3_alloc_resources(ciu3_info);
	irq_cpu_online();

	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);
}

static struct irq_chip octeon_irq_chip_ciu3_mbox = {
	.name = "CIU3-M",
	.irq_enable = octeon_irq_ciu3_mbox_enable,
	.irq_disable = octeon_irq_ciu3_mbox_disable,
	.irq_ack = octeon_irq_ciu3_mbox_ack,

	.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
	.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
};

static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
				       struct device_node *parent)
{
	int i;
	int node;
	struct irq_domain *domain;
	struct octeon_ciu3_info *ciu3_info;
	const __be32 *zero_addr;
	u64 base_addr;
	union cvmx_ciu3_const consts;

	node = 0; /* of_node_to_nid(ciu_node); */
	ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);

	if (!ciu3_info)
		return -ENOMEM;

	zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
	if (WARN_ON(!zero_addr))
		return -EINVAL;

	base_addr = of_translate_address(ciu_node, zero_addr);
	base_addr = (u64)phys_to_virt(base_addr);

	ciu3_info->ciu3_addr = base_addr;
	ciu3_info->node = node;

	consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);

	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;

	octeon_irq_ip2 = octeon_irq_ciu3_ip2;
	octeon_irq_ip3 = octeon_irq_ciu3_mbox;
	octeon_irq_ip4 = octeon_irq_ip4_mask;

	if (node == cvmx_get_node_num()) {
		/* Mips internal */
		octeon_irq_init_core();

		/* Only do per CPU things if it is the CIU of the boot node. */
		i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
		WARN_ON(i < 0);

		for (i = 0; i < 8; i++)
			irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
						 &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
	}

	/*
	 * Initialize all domains to use the default domain. Specific major
	 * blocks will overwrite the default domain as needed.
	 */
	domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
				     ciu3_info);
	for (i = 0; i < MAX_CIU3_DOMAINS; i++)
		ciu3_info->domain[i] = domain;

	octeon_ciu3_info_per_node[node] = ciu3_info;

	if (node == cvmx_get_node_num()) {
		/* Only do per CPU things if it is the CIU of the boot node. */
		octeon_irq_ciu3_alloc_resources(ciu3_info);
		if (node == 0)
			irq_set_default_host(domain);

		octeon_irq_use_ip4 = false;
		/* Enable the CIU lines */
		set_c0_status(STATUSF_IP2 | STATUSF_IP3);
		clear_c0_status(STATUSF_IP4);
	}

	return 0;
}

2915 2916 2917 2918
static struct of_device_id ciu_types[] __initdata = {
	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
2919
	{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
2920 2921 2922 2923
	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
	{}
};

2924 2925 2926 2927 2928 2929 2930
void __init arch_init_irq(void)
{
#ifdef CONFIG_SMP
	/* Set the default affinity to the boot cpu. */
	cpumask_clear(irq_default_affinity);
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif
2931
	of_irq_init(ciu_types);
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
}

asmlinkage void plat_irq_dispatch(void)
{
	unsigned long cop0_cause;
	unsigned long cop0_status;

	while (1) {
		cop0_cause = read_c0_cause();
		cop0_status = read_c0_status();
		cop0_cause &= cop0_status;
		cop0_cause &= ST0_IM;

2945
		if (cop0_cause & STATUSF_IP2)
2946
			octeon_irq_ip2();
2947
		else if (cop0_cause & STATUSF_IP3)
2948
			octeon_irq_ip3();
2949
		else if (cop0_cause & STATUSF_IP4)
2950
			octeon_irq_ip4();
2951
		else if (cop0_cause)
2952
			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
2953
		else
2954 2955 2956
			break;
	}
}
2957 2958 2959

#ifdef CONFIG_HOTPLUG_CPU

2960
void octeon_fixup_irqs(void)
2961
{
2962
	irq_cpu_offline();
2963 2964 2965
}

#endif /* CONFIG_HOTPLUG_CPU */
2966 2967 2968 2969 2970 2971 2972 2973 2974

struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
{
	struct octeon_ciu3_info *ciu3_info;

	ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
	return ciu3_info->domain[block];
}
EXPORT_SYMBOL(octeon_irq_get_block_domain);