octeon-irq.c 56.4 KB
Newer Older
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 2004-2014 Cavium, Inc.
7
 */
8

9
#include <linux/of_address.h>
10
#include <linux/interrupt.h>
11
#include <linux/irqdomain.h>
12
#include <linux/bitops.h>
13
#include <linux/of_irq.h>
14
#include <linux/percpu.h>
15
#include <linux/slab.h>
16
#include <linux/irq.h>
17
#include <linux/smp.h>
18
#include <linux/of.h>
19 20

#include <asm/octeon/octeon.h>
21
#include <asm/octeon/cvmx-ciu2-defs.h>
22

23 24
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
25
static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
26

27 28 29 30
struct octeon_irq_ciu_domain_data {
	int num_sum;  /* number of sum registers (2 or 3). */
};

31 32
static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];

33 34 35 36 37 38 39 40 41 42 43 44 45
struct octeon_ciu_chip_data {
	union {
		struct {		/* only used for ciu3 */
			u64 ciu3_addr;
			unsigned int intsn;
		};
		struct {		/* only used for ciu/ciu2 */
			u8 line;
			u8 bit;
			u8 gpio_line;
		};
	};
	int current_cpu;	/* Next CPU expected to take this irq */
46 47 48 49 50 51 52 53 54 55 56 57 58
};

struct octeon_core_chip_data {
	struct mutex core_irq_mutex;
	bool current_en;
	bool desired_en;
	u8 bit;
};

#define MIPS_CORE_IRQ_LINES 8

static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];

59 60 61
static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
				      struct irq_chip *chip,
				      irq_flow_handler_t handler)
62
{
63 64 65 66 67
	struct octeon_ciu_chip_data *cd;

	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
	if (!cd)
		return -ENOMEM;
68 69 70

	irq_set_chip_and_handler(irq, chip, handler);

71 72 73
	cd->line = line;
	cd->bit = bit;
	cd->gpio_line = gpio_line;
74

75
	irq_set_chip_data(irq, cd);
76
	octeon_irq_ciu_to_irq[line][bit] = irq;
77 78 79 80 81 82 83 84 85 86
	return 0;
}

static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
{
	struct irq_data *data = irq_get_irq_data(irq);
	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);

	irq_set_chip_data(irq, NULL);
	kfree(cd);
87 88
}

89 90
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
					int irq, int line, int bit)
91
{
92
	return irq_domain_associate(domain, irq, line << 6 | bit);
93 94
}

95 96 97 98 99 100 101 102 103
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
	return cpu_logical_map(cpu);
#else
	return cvmx_get_core_num();
#endif
}

104 105 106 107 108 109 110 111 112 113
static int octeon_cpu_for_coreid(int coreid)
{
#ifdef CONFIG_SMP
	return cpu_number_map(coreid);
#else
	return smp_processor_id();
#endif
}

static void octeon_irq_core_ack(struct irq_data *data)
114
{
115 116 117
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	unsigned int bit = cd->bit;

118 119 120 121 122 123 124 125 126 127 128
	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
	clear_c0_status(0x100 << bit);
	/* The two user interrupts must be cleared manually. */
	if (bit < 2)
		clear_c0_cause(0x100 << bit);
}

129
static void octeon_irq_core_eoi(struct irq_data *data)
130
{
131 132
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);

133 134 135 136 137
	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
138
	set_c0_status(0x100 << cd->bit);
139 140
}

141
static void octeon_irq_core_set_enable_local(void *arg)
142
{
143 144 145
	struct irq_data *data = arg;
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	unsigned int mask = 0x100 << cd->bit;
146 147

	/*
148
	 * Interrupts are already disabled, so these are atomic.
149
	 */
150 151 152 153 154
	if (cd->desired_en)
		set_c0_status(mask);
	else
		clear_c0_status(mask);

155 156
}

157
static void octeon_irq_core_disable(struct irq_data *data)
158
{
159 160
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	cd->desired_en = false;
161 162
}

163
static void octeon_irq_core_enable(struct irq_data *data)
164
{
165 166
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	cd->desired_en = true;
167 168
}

169 170 171
static void octeon_irq_core_bus_lock(struct irq_data *data)
{
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
172

173 174
	mutex_lock(&cd->core_irq_mutex);
}
175

176
static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
177
{
178 179 180 181 182 183
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);

	if (cd->desired_en != cd->current_en) {
		on_each_cpu(octeon_irq_core_set_enable_local, data, 1);

		cd->current_en = cd->desired_en;
184 185
	}

186
	mutex_unlock(&cd->core_irq_mutex);
187 188
}

189 190 191 192 193 194 195 196 197
static struct irq_chip octeon_irq_chip_core = {
	.name = "Core",
	.irq_enable = octeon_irq_core_enable,
	.irq_disable = octeon_irq_core_disable,
	.irq_ack = octeon_irq_core_ack,
	.irq_eoi = octeon_irq_core_eoi,
	.irq_bus_lock = octeon_irq_core_bus_lock,
	.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,

198 199 200
	.irq_cpu_online = octeon_irq_core_eoi,
	.irq_cpu_offline = octeon_irq_core_ack,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
};

static void __init octeon_irq_init_core(void)
{
	int i;
	int irq;
	struct octeon_core_chip_data *cd;

	for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
		cd = &octeon_irq_core_chip_data[i];
		cd->current_en = false;
		cd->desired_en = false;
		cd->bit = i;
		mutex_init(&cd->core_irq_mutex);

		irq = OCTEON_IRQ_SW0 + i;
217 218 219
		irq_set_chip_data(irq, cd);
		irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
					 handle_percpu_irq);
220 221 222 223
	}
}

static int next_cpu_for_irq(struct irq_data *data)
224 225 226
{

#ifdef CONFIG_SMP
227 228
	int cpu;
	int weight = cpumask_weight(data->affinity);
229
	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
230 231

	if (weight > 1) {
232
		cpu = cd->current_cpu;
233
		for (;;) {
234
			cpu = cpumask_next(cpu, data->affinity);
235 236 237 238 239 240 241 242
			if (cpu >= nr_cpu_ids) {
				cpu = -1;
				continue;
			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
				break;
			}
		}
	} else if (weight == 1) {
243
		cpu = cpumask_first(data->affinity);
244
	} else {
245
		cpu = smp_processor_id();
246
	}
247
	cd->current_cpu = cpu;
248
	return cpu;
249
#else
250
	return smp_processor_id();
251 252 253
#endif
}

254
static void octeon_irq_ciu_enable(struct irq_data *data)
255
{
256 257 258
	int cpu = next_cpu_for_irq(data);
	int coreid = octeon_coreid_for_cpu(cpu);
	unsigned long *pen;
259
	unsigned long flags;
260
	struct octeon_ciu_chip_data *cd;
261
	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
262

263
	cd = irq_data_get_irq_chip_data(data);
264

265
	raw_spin_lock_irqsave(lock, flags);
266
	if (cd->line == 0) {
267
		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
268
		__set_bit(cd->bit, pen);
269 270 271 272 273
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
274 275 276
		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
	} else {
		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
277
		__set_bit(cd->bit, pen);
278 279 280 281 282
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
283 284
		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
	}
285
	raw_spin_unlock_irqrestore(lock, flags);
286 287
}

288 289 290 291
static void octeon_irq_ciu_enable_local(struct irq_data *data)
{
	unsigned long *pen;
	unsigned long flags;
292
	struct octeon_ciu_chip_data *cd;
293
	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
294

295
	cd = irq_data_get_irq_chip_data(data);
296

297
	raw_spin_lock_irqsave(lock, flags);
298
	if (cd->line == 0) {
299
		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
300
		__set_bit(cd->bit, pen);
301 302 303 304 305
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
306 307
		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
	} else {
308
		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
309
		__set_bit(cd->bit, pen);
310 311 312 313 314
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
315 316
		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
	}
317
	raw_spin_unlock_irqrestore(lock, flags);
318 319 320 321 322 323
}

static void octeon_irq_ciu_disable_local(struct irq_data *data)
{
	unsigned long *pen;
	unsigned long flags;
324
	struct octeon_ciu_chip_data *cd;
325
	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
326

327
	cd = irq_data_get_irq_chip_data(data);
328

329
	raw_spin_lock_irqsave(lock, flags);
330
	if (cd->line == 0) {
331
		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
332
		__clear_bit(cd->bit, pen);
333 334 335 336 337
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
338 339
		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
	} else {
340
		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
341
		__clear_bit(cd->bit, pen);
342 343 344 345 346
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
347 348
		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
	}
349
	raw_spin_unlock_irqrestore(lock, flags);
350 351 352
}

static void octeon_irq_ciu_disable_all(struct irq_data *data)
353 354
{
	unsigned long flags;
355 356
	unsigned long *pen;
	int cpu;
357
	struct octeon_ciu_chip_data *cd;
358
	raw_spinlock_t *lock;
359

360
	cd = irq_data_get_irq_chip_data(data);
361

362 363 364
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);
		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
365
		if (cd->line == 0)
366
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
367
		else
368
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
369 370

		raw_spin_lock_irqsave(lock, flags);
371
		__clear_bit(cd->bit, pen);
372 373 374 375 376
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
377
		if (cd->line == 0)
378 379
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
380
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
381
		raw_spin_unlock_irqrestore(lock, flags);
382
	}
383 384
}

385
static void octeon_irq_ciu_enable_all(struct irq_data *data)
386 387
{
	unsigned long flags;
388
	unsigned long *pen;
389
	int cpu;
390
	struct octeon_ciu_chip_data *cd;
391
	raw_spinlock_t *lock;
392

393
	cd = irq_data_get_irq_chip_data(data);
394

395 396 397
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);
		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
398
		if (cd->line == 0)
399
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
400
		else
401
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
402 403

		raw_spin_lock_irqsave(lock, flags);
404
		__set_bit(cd->bit, pen);
405 406 407 408 409
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
410
		if (cd->line == 0)
411 412
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
413
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
414
		raw_spin_unlock_irqrestore(lock, flags);
415
	}
416 417 418
}

/*
419 420
 * Enable the irq on the next core in the affinity set for chips that
 * have the EN*_W1{S,C} registers.
421
 */
422
static void octeon_irq_ciu_enable_v2(struct irq_data *data)
423
{
424 425
	u64 mask;
	int cpu = next_cpu_for_irq(data);
426
	struct octeon_ciu_chip_data *cd;
427

428 429
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
430 431 432 433 434

	/*
	 * Called under the desc lock, so these should never get out
	 * of sync.
	 */
435
	if (cd->line == 0) {
436
		int index = octeon_coreid_for_cpu(cpu) * 2;
437
		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
438
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
439 440
	} else {
		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
441
		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
442
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
443
	}
444 445
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
/*
 * Enable the irq in the sum2 registers.
 */
static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
}

/*
 * Disable the irq in the sum2 registers.
 */
static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
}

static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
{
	u64 mask;
	int cpu = next_cpu_for_irq(data);
	int index = octeon_coreid_for_cpu(cpu);
	struct octeon_ciu_chip_data *cd;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
}

static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
{
	int cpu;
	struct octeon_ciu_chip_data *cd;
	u64 mask;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);

	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);

		cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
	}
}

507
/*
508 509
 * Enable the irq on the current CPU for chips that
 * have the EN*_W1{S,C} registers.
510
 */
511
static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
512
{
513
	u64 mask;
514
	struct octeon_ciu_chip_data *cd;
515

516 517
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
518

519
	if (cd->line == 0) {
520
		int index = cvmx_get_core_num() * 2;
521
		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
522 523 524
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
	} else {
		int index = cvmx_get_core_num() * 2 + 1;
525
		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
526 527 528 529 530 531 532
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
	}
}

static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
{
	u64 mask;
533
	struct octeon_ciu_chip_data *cd;
534

535 536
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
537

538
	if (cd->line == 0) {
539
		int index = cvmx_get_core_num() * 2;
540
		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
541 542 543
		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
	} else {
		int index = cvmx_get_core_num() * 2 + 1;
544
		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
545 546
		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
	}
547 548
}

549
/*
550
 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
551
 */
552 553 554
static void octeon_irq_ciu_ack(struct irq_data *data)
{
	u64 mask;
555
	struct octeon_ciu_chip_data *cd;
556

557 558
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
559

560
	if (cd->line == 0) {
561
		int index = cvmx_get_core_num() * 2;
562
		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
563 564
	} else {
		cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
565
	}
566 567
}

D
David Daney 已提交
568
/*
569
 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
D
David Daney 已提交
570 571
 * registers.
 */
572
static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
D
David Daney 已提交
573
{
574 575
	int cpu;
	u64 mask;
576
	struct octeon_ciu_chip_data *cd;
D
David Daney 已提交
577

578 579
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
580

581
	if (cd->line == 0) {
582 583
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2;
584 585
			clear_bit(cd->bit,
				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
586 587 588 589 590
			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
		}
	} else {
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
591 592
			clear_bit(cd->bit,
				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
593 594 595
			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
		}
	}
D
David Daney 已提交
596 597
}

598
/*
599
 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
600 601
 * registers.
 */
602
static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
603 604
{
	int cpu;
605
	u64 mask;
606
	struct octeon_ciu_chip_data *cd;
607

608 609
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
610

611
	if (cd->line == 0) {
612 613
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2;
614 615
			set_bit(cd->bit,
				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
616 617 618 619 620
			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
		}
	} else {
		for_each_online_cpu(cpu) {
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
621 622
			set_bit(cd->bit,
				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
623 624
			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
		}
625
	}
626 627
}

628 629 630
static void octeon_irq_gpio_setup(struct irq_data *data)
{
	union cvmx_gpio_bit_cfgx cfg;
631
	struct octeon_ciu_chip_data *cd;
632 633
	u32 t = irqd_get_trigger_type(data);

634
	cd = irq_data_get_irq_chip_data(data);
635 636 637 638 639 640 641 642 643 644

	cfg.u64 = 0;
	cfg.s.int_en = 1;
	cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
	cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;

	/* 140 nS glitch filter*/
	cfg.s.fil_cnt = 7;
	cfg.s.fil_sel = 3;

645
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
}

static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu_enable_v2(data);
}

static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu_enable(data);
}

static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
{
	irqd_set_trigger_type(data, t);
	octeon_irq_gpio_setup(data);

	return IRQ_SET_MASK_OK;
}

static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
{
670
	struct octeon_ciu_chip_data *cd;
671

672 673
	cd = irq_data_get_irq_chip_data(data);
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
674 675 676 677 678 679

	octeon_irq_ciu_disable_all_v2(data);
}

static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
{
680
	struct octeon_ciu_chip_data *cd;
681

682 683
	cd = irq_data_get_irq_chip_data(data);
	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
684 685 686 687 688 689

	octeon_irq_ciu_disable_all(data);
}

static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
{
690
	struct octeon_ciu_chip_data *cd;
691 692
	u64 mask;

693 694
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->gpio_line);
695 696 697 698

	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
}

699
static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
700
{
701
	if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
702 703 704 705 706
		handle_edge_irq(irq, desc);
	else
		handle_level_irq(irq, desc);
}

707
#ifdef CONFIG_SMP
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
{
	int cpu = smp_processor_id();
	cpumask_t new_affinity;

	if (!cpumask_test_cpu(cpu, data->affinity))
		return;

	if (cpumask_weight(data->affinity) > 1) {
		/*
		 * It has multi CPU affinity, just remove this CPU
		 * from the affinity set.
		 */
		cpumask_copy(&new_affinity, data->affinity);
		cpumask_clear_cpu(cpu, &new_affinity);
	} else {
		/* Otherwise, put it on lowest numbered online CPU. */
		cpumask_clear(&new_affinity);
		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
	}
729
	irq_set_affinity_locked(data, &new_affinity, false);
730 731 732 733
}

static int octeon_irq_ciu_set_affinity(struct irq_data *data,
				       const struct cpumask *dest, bool force)
734 735
{
	int cpu;
736
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
737
	unsigned long flags;
738
	struct octeon_ciu_chip_data *cd;
739 740
	unsigned long *pen;
	raw_spinlock_t *lock;
741

742
	cd = irq_data_get_irq_chip_data(data);
743

744 745 746 747 748 749 750 751
	/*
	 * For non-v2 CIU, we will allow only single CPU affinity.
	 * This removes the need to do locking in the .ack/.eoi
	 * functions.
	 */
	if (cpumask_weight(dest) != 1)
		return -EINVAL;

752
	if (!enable_one)
753 754 755
		return 0;


756 757 758 759 760 761
	for_each_online_cpu(cpu) {
		int coreid = octeon_coreid_for_cpu(cpu);

		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
		raw_spin_lock_irqsave(lock, flags);

762
		if (cd->line == 0)
763 764 765 766 767 768
			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
		else
			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);

		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = 0;
769
			__set_bit(cd->bit, pen);
770
		} else {
771
			__clear_bit(cd->bit, pen);
772
		}
773 774 775 776 777
		/*
		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
		 * enabling the irq.
		 */
		wmb();
778

779
		if (cd->line == 0)
780 781
			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
		else
782
			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
783 784

		raw_spin_unlock_irqrestore(lock, flags);
785
	}
786
	return 0;
787
}
788 789 790 791 792

/*
 * Set affinity for the irq for chips that have the EN*_W1{S,C}
 * registers.
 */
793 794 795
static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
					  const struct cpumask *dest,
					  bool force)
796 797
{
	int cpu;
798
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
799
	u64 mask;
800
	struct octeon_ciu_chip_data *cd;
801

802
	if (!enable_one)
803 804
		return 0;

805 806
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;
807

808
	if (cd->line == 0) {
809 810 811 812
		for_each_online_cpu(cpu) {
			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
			int index = octeon_coreid_for_cpu(cpu) * 2;
			if (cpumask_test_cpu(cpu, dest) && enable_one) {
813
				enable_one = false;
814
				set_bit(cd->bit, pen);
815 816
				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
			} else {
817
				clear_bit(cd->bit, pen);
818 819 820 821 822 823 824 825
				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
			}
		}
	} else {
		for_each_online_cpu(cpu) {
			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
			if (cpumask_test_cpu(cpu, dest) && enable_one) {
826
				enable_one = false;
827
				set_bit(cd->bit, pen);
828 829
				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
			} else {
830
				clear_bit(cd->bit, pen);
831 832
				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
			}
833
		}
834 835 836
	}
	return 0;
}
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864

static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
					    const struct cpumask *dest,
					    bool force)
{
	int cpu;
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
	u64 mask;
	struct octeon_ciu_chip_data *cd;

	if (!enable_one)
		return 0;

	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;

	for_each_online_cpu(cpu) {
		int index = octeon_coreid_for_cpu(cpu);

		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = false;
			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
		} else {
			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
		}
	}
	return 0;
}
865 866
#endif

867 868 869
/*
 * Newer octeon chips have support for lockless CIU operation.
 */
870
static struct irq_chip octeon_irq_chip_ciu_v2 = {
871 872 873 874 875 876 877 878 879 880 881 882
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
883 884 885 886 887 888
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_ack = octeon_irq_ciu_ack,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
889
#ifdef CONFIG_SMP
890 891
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
892 893 894
#endif
};

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
/*
 * Newer octeon chips have support for lockless CIU operation.
 */
static struct irq_chip octeon_irq_chip_ciu_sum2 = {
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_sum2,
	.irq_disable = octeon_irq_ciu_disable_all_sum2,
	.irq_mask = octeon_irq_ciu_disable_local_sum2,
	.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable_sum2,
	.irq_disable = octeon_irq_ciu_disable_all_sum2,
	.irq_ack = octeon_irq_ciu_ack_sum2,
	.irq_mask = octeon_irq_ciu_disable_local_sum2,
	.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

923
static struct irq_chip octeon_irq_chip_ciu = {
924 925 926 927 928 929 930 931 932 933 934 935
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu_edge = {
936 937 938 939
	.name = "CIU",
	.irq_enable = octeon_irq_ciu_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
	.irq_ack = octeon_irq_ciu_ack,
940 941
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
942 943 944 945
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
946 947
};

948 949 950 951 952 953 954 955
/* The mbox versions don't do any affinity or round-robin. */
static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
	.name = "CIU-M",
	.irq_enable = octeon_irq_ciu_enable_all_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_ack = octeon_irq_ciu_disable_local_v2,
	.irq_eoi = octeon_irq_ciu_enable_local_v2,

956 957 958
	.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
	.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
959
};
960

961 962 963 964
static struct irq_chip octeon_irq_chip_ciu_mbox = {
	.name = "CIU-M",
	.irq_enable = octeon_irq_ciu_enable_all,
	.irq_disable = octeon_irq_ciu_disable_all,
965 966
	.irq_ack = octeon_irq_ciu_disable_local,
	.irq_eoi = octeon_irq_ciu_enable_local,
967

968 969 970
	.irq_cpu_online = octeon_irq_ciu_enable_local,
	.irq_cpu_offline = octeon_irq_ciu_disable_local,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
971 972
};

973 974 975 976 977 978 979 980 981 982
static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu_enable_gpio_v2,
	.irq_disable = octeon_irq_ciu_disable_gpio_v2,
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_v2,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
983
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
984 985 986 987 988 989 990 991
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

static struct irq_chip octeon_irq_chip_ciu_gpio = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu_enable_gpio,
	.irq_disable = octeon_irq_ciu_disable_gpio,
992 993
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable,
994 995 996 997
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu_set_affinity,
998
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
999 1000 1001 1002
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

1003 1004 1005 1006 1007
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
static void octeon_irq_ciu_wd_enable(struct irq_data *data)
1008 1009
{
	unsigned long flags;
1010 1011 1012
	unsigned long *pen;
	int coreid = data->irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
	int cpu = octeon_cpu_for_coreid(coreid);
1013
	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
1014

1015
	raw_spin_lock_irqsave(lock, flags);
1016
	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
1017 1018 1019 1020 1021 1022
	__set_bit(coreid, pen);
	/*
	 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
	 * the irq.
	 */
	wmb();
1023
	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
1024
	raw_spin_unlock_irqrestore(lock, flags);
1025 1026
}

1027 1028 1029 1030
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
1031
static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
1032
{
1033 1034
	int coreid = data->irq - OCTEON_IRQ_WDOG0;
	int cpu = octeon_cpu_for_coreid(coreid);
1035

1036 1037
	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
	.name = "CIU-W",
	.irq_enable = octeon_irq_ciu1_wd_enable_v2,
	.irq_disable = octeon_irq_ciu_disable_all_v2,
	.irq_mask = octeon_irq_ciu_disable_local_v2,
	.irq_unmask = octeon_irq_ciu_enable_local_v2,
};

static struct irq_chip octeon_irq_chip_ciu_wd = {
	.name = "CIU-W",
	.irq_enable = octeon_irq_ciu_wd_enable,
	.irq_disable = octeon_irq_ciu_disable_all,
1053 1054
	.irq_mask = octeon_irq_ciu_disable_local,
	.irq_unmask = octeon_irq_ciu_enable_local,
1055 1056
};

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
{
	bool edge = false;

	if (line == 0)
		switch (bit) {
		case 48 ... 49: /* GMX DRP */
		case 50: /* IPD_DRP */
		case 52 ... 55: /* Timers */
		case 58: /* MPI */
			edge = true;
			break;
		default:
			break;
		}
	else /* line == 1 */
		switch (bit) {
		case 47: /* PTP */
			edge = true;
			break;
		default:
			break;
		}
	return edge;
}

struct octeon_irq_gpio_domain_data {
	unsigned int base_hwirq;
};

static int octeon_irq_gpio_xlat(struct irq_domain *d,
				struct device_node *node,
				const u32 *intspec,
				unsigned int intsize,
				unsigned long *out_hwirq,
				unsigned int *out_type)
{
	unsigned int type;
	unsigned int pin;
	unsigned int trigger;

	if (d->of_node != node)
		return -EINVAL;

	if (intsize < 2)
		return -EINVAL;

	pin = intspec[0];
	if (pin >= 16)
		return -EINVAL;

	trigger = intspec[1];

	switch (trigger) {
	case 1:
		type = IRQ_TYPE_EDGE_RISING;
		break;
	case 2:
		type = IRQ_TYPE_EDGE_FALLING;
		break;
	case 4:
		type = IRQ_TYPE_LEVEL_HIGH;
		break;
	case 8:
		type = IRQ_TYPE_LEVEL_LOW;
		break;
	default:
		pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
		       node->name,
		       trigger);
		type = IRQ_TYPE_LEVEL_LOW;
		break;
	}
	*out_type = type;
1131
	*out_hwirq = pin;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143

	return 0;
}

static int octeon_irq_ciu_xlat(struct irq_domain *d,
			       struct device_node *node,
			       const u32 *intspec,
			       unsigned int intsize,
			       unsigned long *out_hwirq,
			       unsigned int *out_type)
{
	unsigned int ciu, bit;
1144
	struct octeon_irq_ciu_domain_data *dd = d->host_data;
1145 1146 1147 1148

	ciu = intspec[0];
	bit = intspec[1];

1149
	if (ciu >= dd->num_sum || bit > 63)
1150 1151 1152 1153 1154 1155 1156 1157 1158
		return -EINVAL;

	*out_hwirq = (ciu << 6) | bit;
	*out_type = 0;

	return 0;
}

static struct irq_chip *octeon_irq_ciu_chip;
1159
static struct irq_chip *octeon_irq_ciu_chip_edge;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
static struct irq_chip *octeon_irq_gpio_chip;

static bool octeon_irq_virq_in_range(unsigned int virq)
{
	/* We cannot let it overflow the mapping array. */
	if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
		return true;

	WARN_ONCE(true, "virq out of range %u.\n", virq);
	return false;
}

static int octeon_irq_ciu_map(struct irq_domain *d,
			      unsigned int virq, irq_hw_number_t hw)
{
1175
	int rv;
1176 1177
	unsigned int line = hw >> 6;
	unsigned int bit = hw & 63;
1178
	struct octeon_irq_ciu_domain_data *dd = d->host_data;
1179 1180 1181 1182

	if (!octeon_irq_virq_in_range(virq))
		return -EINVAL;

1183 1184 1185 1186
	/* Don't map irq if it is reserved for GPIO. */
	if (line == 0 && bit >= 16 && bit <32)
		return 0;

1187
	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
1188 1189
		return -EINVAL;

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	if (line == 2) {
		if (octeon_irq_ciu_is_edge(line, bit))
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				&octeon_irq_chip_ciu_sum2_edge,
				handle_edge_irq);
		else
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				&octeon_irq_chip_ciu_sum2,
				handle_level_irq);
	} else {
		if (octeon_irq_ciu_is_edge(line, bit))
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				octeon_irq_ciu_chip_edge,
				handle_edge_irq);
		else
			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
				octeon_irq_ciu_chip,
				handle_level_irq);
	}
	return rv;
1210 1211
}

1212 1213
static int octeon_irq_gpio_map(struct irq_domain *d,
			       unsigned int virq, irq_hw_number_t hw)
1214
{
1215 1216
	struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
	unsigned int line, bit;
1217
	int r;
1218 1219 1220 1221

	if (!octeon_irq_virq_in_range(virq))
		return -EINVAL;

1222 1223
	line = (hw + gpiod->base_hwirq) >> 6;
	bit = (hw + gpiod->base_hwirq) & 63;
1224 1225
	if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
		octeon_irq_ciu_to_irq[line][bit] != 0)
1226 1227
		return -EINVAL;

1228 1229 1230
	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
		octeon_irq_gpio_chip, octeon_irq_handle_trigger);
	return r;
1231 1232
}

1233 1234
static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
	.map = octeon_irq_ciu_map,
1235
	.unmap = octeon_irq_free_cd,
1236 1237 1238 1239 1240
	.xlate = octeon_irq_ciu_xlat,
};

static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
	.map = octeon_irq_gpio_map,
1241
	.unmap = octeon_irq_free_cd,
1242 1243 1244
	.xlate = octeon_irq_gpio_xlat,
};

1245
static void octeon_irq_ip2_ciu(void)
1246
{
1247 1248 1249
	const unsigned long core_id = cvmx_get_core_num();
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));

1250
	ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
1251 1252 1253 1254 1255 1256 1257 1258 1259
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[0][bit];
		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
1260
	}
1261 1262
}

1263
static void octeon_irq_ip3_ciu(void)
D
David Daney 已提交
1264
{
1265 1266
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);

1267
	ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[1][bit];
		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
	}
D
David Daney 已提交
1278 1279
}

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static void octeon_irq_ip4_ciu(void)
{
	int coreid = cvmx_get_core_num();
	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
	u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));

	ciu_sum &= ciu_en;
	if (likely(ciu_sum)) {
		int bit = fls64(ciu_sum) - 1;
		int irq = octeon_irq_ciu_to_irq[2][bit];

		if (likely(irq))
			do_IRQ(irq);
		else
			spurious_interrupt();
	} else {
		spurious_interrupt();
	}
}

1300 1301
static bool octeon_irq_use_ip4;

1302
static void octeon_irq_local_enable_ip4(void *arg)
1303 1304 1305 1306
{
	set_c0_status(STATUSF_IP4);
}

1307
static void octeon_irq_ip4_mask(void)
1308
{
1309 1310
	clear_c0_status(STATUSF_IP4);
	spurious_interrupt();
1311 1312
}

1313 1314 1315
static void (*octeon_irq_ip2)(void);
static void (*octeon_irq_ip3)(void);
static void (*octeon_irq_ip4)(void);
1316

1317
void (*octeon_irq_setup_secondary)(void);
1318

1319
void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1320 1321 1322 1323 1324 1325
{
	octeon_irq_ip4 = h;
	octeon_irq_use_ip4 = true;
	on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
}

1326
static void octeon_irq_percpu_enable(void)
1327 1328 1329 1330
{
	irq_cpu_online();
}

1331
static void octeon_irq_init_ciu_percpu(void)
1332 1333
{
	int coreid = cvmx_get_core_num();
1334 1335


1336 1337
	__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
	__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
1338
	wmb();
1339
	raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
1340
	/*
1341 1342 1343
	 * Disable All CIU Interrupts. The ones we need will be
	 * enabled later.  Read the SUM register so we know the write
	 * completed.
1344
	 */
1345 1346 1347 1348 1349
	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1350
}
1351

1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static void octeon_irq_init_ciu2_percpu(void)
{
	u64 regx, ipx;
	int coreid = cvmx_get_core_num();
	u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);

	/*
	 * Disable All CIU2 Interrupts. The ones we need will be
	 * enabled later.  Read the SUM register so we know the write
	 * completed.
	 *
	 * There are 9 registers and 3 IPX levels with strides 0x1000
	 * and 0x200 respectivly.  Use loops to clear them.
	 */
	for (regx = 0; regx <= 0x8000; regx += 0x1000) {
		for (ipx = 0; ipx <= 0x400; ipx += 0x200)
			cvmx_write_csr(base + regx + ipx, 0);
	}

	cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
}

1374
static void octeon_irq_setup_secondary_ciu(void)
1375
{
1376 1377
	octeon_irq_init_ciu_percpu();
	octeon_irq_percpu_enable();
1378

1379 1380
	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1381 1382 1383 1384
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);
1385
}
1386

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
static void octeon_irq_setup_secondary_ciu2(void)
{
	octeon_irq_init_ciu2_percpu();
	octeon_irq_percpu_enable();

	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);
}

1400 1401
static int __init octeon_irq_init_ciu(
	struct device_node *ciu_node, struct device_node *parent)
1402
{
1403
	unsigned int i, r;
1404
	struct irq_chip *chip;
1405
	struct irq_chip *chip_edge;
1406 1407
	struct irq_chip *chip_mbox;
	struct irq_chip *chip_wd;
1408
	struct irq_domain *ciu_domain = NULL;
1409 1410 1411 1412 1413
	struct octeon_irq_ciu_domain_data *dd;

	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
	if (!dd)
		return -ENOMEM;
1414 1415 1416

	octeon_irq_init_ciu_percpu();
	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1417

1418 1419
	octeon_irq_ip2 = octeon_irq_ip2_ciu;
	octeon_irq_ip3 = octeon_irq_ip3_ciu;
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
		&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
		octeon_irq_ip4 =  octeon_irq_ip4_ciu;
		dd->num_sum = 3;
		octeon_irq_use_ip4 = true;
	} else {
		octeon_irq_ip4 = octeon_irq_ip4_mask;
		dd->num_sum = 2;
		octeon_irq_use_ip4 = false;
	}
1430 1431 1432
	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1433
	    OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1434
		chip = &octeon_irq_chip_ciu_v2;
1435
		chip_edge = &octeon_irq_chip_ciu_v2_edge;
1436 1437
		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
		chip_wd = &octeon_irq_chip_ciu_wd_v2;
1438
		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1439 1440
	} else {
		chip = &octeon_irq_chip_ciu;
1441
		chip_edge = &octeon_irq_chip_ciu_edge;
1442 1443
		chip_mbox = &octeon_irq_chip_ciu_mbox;
		chip_wd = &octeon_irq_chip_ciu_wd;
1444
		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1445
	}
1446
	octeon_irq_ciu_chip = chip;
1447
	octeon_irq_ciu_chip_edge = chip_edge;
1448 1449 1450 1451

	/* Mips internal */
	octeon_irq_init_core();

1452 1453 1454
	ciu_domain = irq_domain_add_tree(
		ciu_node, &octeon_irq_domain_ciu_ops, dd);
	irq_set_default_host(ciu_domain);
1455 1456

	/* CIU_0 */
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	for (i = 0; i < 16; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
		if (r)
			goto err;
	}

	r = octeon_irq_set_ciu_mapping(
		OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
	if (r)
		goto err;
	r = octeon_irq_set_ciu_mapping(
		OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
	if (r)
		goto err;

	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
		if (r)
			goto err;
	}
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
		if (r)
			goto err;
	}
1485

1486 1487 1488
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
	if (r)
		goto err;
1489

1490 1491 1492
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
	if (r)
		goto err;
1493

1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
		if (r)
			goto err;
	}

	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
	if (r)
		goto err;
1504

1505 1506 1507
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
	if (r)
		goto err;
1508 1509

	/* CIU_1 */
1510 1511 1512 1513 1514 1515 1516
	for (i = 0; i < 16; i++) {
		r = octeon_irq_set_ciu_mapping(
			i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
			handle_level_irq);
		if (r)
			goto err;
	}
1517

1518 1519 1520
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
	if (r)
		goto err;
1521

1522 1523
	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1524 1525 1526 1527 1528 1529 1530 1531
	if (octeon_irq_use_ip4)
		set_c0_status(STATUSF_IP4);
	else
		clear_c0_status(STATUSF_IP4);

	return 0;
err:
	return r;
1532
}
1533

1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static int __init octeon_irq_init_gpio(
	struct device_node *gpio_node, struct device_node *parent)
{
	struct octeon_irq_gpio_domain_data *gpiod;
	u32 interrupt_cells;
	unsigned int base_hwirq;
	int r;

	r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
	if (r)
		return r;

	if (interrupt_cells == 1) {
		u32 v;

		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		base_hwirq = v;
	} else if (interrupt_cells == 2) {
		u32 v0, v1;

		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
		if (r) {
			pr_warn("No \"interrupts\" property.\n");
			return r;
		}
		base_hwirq = (v0 << 6) | v1;
	} else {
		pr_warn("Bad \"#interrupt-cells\" property: %u\n",
			interrupt_cells);
		return -EINVAL;
	}

	gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
	if (gpiod) {
		/* gpio domain host_data is the base hwirq number. */
		gpiod->base_hwirq = base_hwirq;
		irq_domain_add_linear(
			gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
	} else {
		pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
		return -ENOMEM;
	}

	return 0;
}
1588 1589 1590 1591 1592 1593 1594 1595 1596
/*
 * Watchdog interrupts are special.  They are associated with a single
 * core, so we hardwire the affinity to that core.
 */
static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = data->irq - OCTEON_IRQ_WDOG0;
1597
	struct octeon_ciu_chip_data *cd;
1598

1599 1600
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1601

1602 1603
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_enable(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int cpu = next_cpu_for_irq(data);
	int coreid = octeon_coreid_for_cpu(cpu);
1614
	struct octeon_ciu_chip_data *cd;
1615

1616 1617
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1618

1619 1620
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1621 1622 1623 1624 1625 1626 1627 1628
	cvmx_write_csr(en_addr, mask);
}

static void octeon_irq_ciu2_enable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1629
	struct octeon_ciu_chip_data *cd;
1630

1631 1632
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1633

1634 1635
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
		(0x1000ull * cd->line);
1636 1637 1638 1639 1640 1641 1642 1643 1644
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_disable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1645
	struct octeon_ciu_chip_data *cd;
1646

1647 1648
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1649

1650 1651
	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
		(0x1000ull * cd->line);
1652 1653 1654 1655 1656 1657 1658 1659 1660
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_ack(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();
1661
	struct octeon_ciu_chip_data *cd;
1662

1663 1664
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1665

1666
	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
1667 1668 1669 1670 1671 1672 1673 1674
	cvmx_write_csr(en_addr, mask);

}

static void octeon_irq_ciu2_disable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;
1675
	struct octeon_ciu_chip_data *cd;
1676

1677 1678
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << (cd->bit);
1679 1680

	for_each_online_cpu(cpu) {
1681 1682
		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);

	for_each_online_cpu(cpu) {
1695 1696
		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
			octeon_coreid_for_cpu(cpu));
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
{
	int cpu;
	u64 mask;

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);

	for_each_online_cpu(cpu) {
1709 1710
		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
			octeon_coreid_for_cpu(cpu));
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
		cvmx_write_csr(en_addr, mask);
	}
}

static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
	cvmx_write_csr(en_addr, mask);
}

static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
{
	u64 mask;
	u64 en_addr;
	int coreid = cvmx_get_core_num();

	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
	cvmx_write_csr(en_addr, mask);
}

#ifdef CONFIG_SMP
static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
					const struct cpumask *dest, bool force)
{
	int cpu;
	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
	u64 mask;
1744
	struct octeon_ciu_chip_data *cd;
1745 1746 1747 1748

	if (!enable_one)
		return 0;

1749 1750
	cd = irq_data_get_irq_chip_data(data);
	mask = 1ull << cd->bit;
1751 1752 1753 1754 1755

	for_each_online_cpu(cpu) {
		u64 en_addr;
		if (cpumask_test_cpu(cpu, dest) && enable_one) {
			enable_one = false;
1756 1757 1758
			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
				octeon_coreid_for_cpu(cpu)) +
				(0x1000ull * cd->line);
1759
		} else {
1760 1761 1762
			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
				octeon_coreid_for_cpu(cpu)) +
				(0x1000ull * cd->line);
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
		}
		cvmx_write_csr(en_addr, mask);
	}

	return 0;
}
#endif

static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
{
	octeon_irq_gpio_setup(data);
	octeon_irq_ciu2_enable(data);
}

static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
{
1779
	struct octeon_ciu_chip_data *cd;
1780

1781 1782 1783
	cd = irq_data_get_irq_chip_data(data);

	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
1784 1785 1786 1787 1788

	octeon_irq_ciu2_disable_all(data);
}

static struct irq_chip octeon_irq_chip_ciu2 = {
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
	.name = "CIU2-E",
	.irq_enable = octeon_irq_ciu2_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu2_edge = {
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
	.name = "CIU2-E",
	.irq_enable = octeon_irq_ciu2_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_ack = octeon_irq_ciu2_ack,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};

static struct irq_chip octeon_irq_chip_ciu2_mbox = {
	.name = "CIU2-M",
	.irq_enable = octeon_irq_ciu2_mbox_enable_all,
	.irq_disable = octeon_irq_ciu2_mbox_disable_all,
	.irq_ack = octeon_irq_ciu2_mbox_disable_local,
	.irq_eoi = octeon_irq_ciu2_mbox_enable_local,

	.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
	.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
	.flags = IRQCHIP_ONOFFLINE_ENABLED,
};

static struct irq_chip octeon_irq_chip_ciu2_wd = {
	.name = "CIU2-W",
	.irq_enable = octeon_irq_ciu2_wd_enable,
	.irq_disable = octeon_irq_ciu2_disable_all,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable_local,
};

static struct irq_chip octeon_irq_chip_ciu2_gpio = {
	.name = "CIU-GPIO",
	.irq_enable = octeon_irq_ciu2_enable_gpio,
	.irq_disable = octeon_irq_ciu2_disable_gpio,
	.irq_ack = octeon_irq_ciu_gpio_ack,
	.irq_mask = octeon_irq_ciu2_disable_local,
	.irq_unmask = octeon_irq_ciu2_enable,
	.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
	.flags = IRQCHIP_SET_TYPE_MASKED,
};

static int octeon_irq_ciu2_xlat(struct irq_domain *d,
				struct device_node *node,
				const u32 *intspec,
				unsigned int intsize,
				unsigned long *out_hwirq,
				unsigned int *out_type)
{
	unsigned int ciu, bit;

	ciu = intspec[0];
	bit = intspec[1];

	*out_hwirq = (ciu << 6) | bit;
	*out_type = 0;

	return 0;
}

static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
{
	bool edge = false;

	if (line == 3) /* MIO */
		switch (bit) {
R
Ralf Baechle 已提交
1872
		case 2:	 /* IPD_DRP */
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
		case 8 ... 11: /* Timers */
		case 48: /* PTP */
			edge = true;
			break;
		default:
			break;
		}
	else if (line == 6) /* PKT */
		switch (bit) {
		case 52 ... 53: /* ILK_DRP */
R
Ralf Baechle 已提交
1883
		case 8 ... 12:	/* GMX_DRP */
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
			edge = true;
			break;
		default:
			break;
		}
	return edge;
}

static int octeon_irq_ciu2_map(struct irq_domain *d,
			       unsigned int virq, irq_hw_number_t hw)
{
	unsigned int line = hw >> 6;
	unsigned int bit = hw & 63;

	if (!octeon_irq_virq_in_range(virq))
		return -EINVAL;

1901 1902 1903 1904 1905 1906 1907 1908
	/*
	 * Don't map irq if it is reserved for GPIO.
	 * (Line 7 are the GPIO lines.)
	 */
	if (line == 7)
		return 0;

	if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1909 1910 1911 1912
		return -EINVAL;

	if (octeon_irq_ciu2_is_edge(line, bit))
		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1913
					   &octeon_irq_chip_ciu2_edge,
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
					   handle_edge_irq);
	else
		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
					   &octeon_irq_chip_ciu2,
					   handle_level_irq);

	return 0;
}

static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
	.map = octeon_irq_ciu2_map,
1925
	.unmap = octeon_irq_free_cd,
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
	.xlate = octeon_irq_ciu2_xlat,
};

static void octeon_irq_ciu2(void)
{
	int line;
	int bit;
	int irq;
	u64 src_reg, src, sum;
	const unsigned long core_id = cvmx_get_core_num();

	sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;

	if (unlikely(!sum))
		goto spurious;

	line = fls64(sum) - 1;
	src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
	src = cvmx_read_csr(src_reg);

	if (unlikely(!src))
		goto spurious;

	bit = fls64(src) - 1;
	irq = octeon_irq_ciu_to_irq[line][bit];
	if (unlikely(!irq))
		goto spurious;

	do_IRQ(irq);
	goto out;

spurious:
	spurious_interrupt();
out:
	/* CN68XX pass 1.x has an errata that accessing the ACK registers
		can stop interrupts from propagating */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
	else
		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
	return;
}

static void octeon_irq_ciu2_mbox(void)
{
	int line;

	const unsigned long core_id = cvmx_get_core_num();
	u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;

	if (unlikely(!sum))
		goto spurious;

	line = fls64(sum) - 1;

	do_IRQ(OCTEON_IRQ_MBOX0 + line);
	goto out;

spurious:
	spurious_interrupt();
out:
	/* CN68XX pass 1.x has an errata that accessing the ACK registers
		can stop interrupts from propagating */
	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
	else
		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
	return;
}

1996 1997
static int __init octeon_irq_init_ciu2(
	struct device_node *ciu_node, struct device_node *parent)
1998
{
1999
	unsigned int i, r;
2000 2001 2002 2003 2004
	struct irq_domain *ciu_domain = NULL;

	octeon_irq_init_ciu2_percpu();
	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;

2005
	octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
2006 2007 2008 2009 2010 2011 2012
	octeon_irq_ip2 = octeon_irq_ciu2;
	octeon_irq_ip3 = octeon_irq_ciu2_mbox;
	octeon_irq_ip4 = octeon_irq_ip4_mask;

	/* Mips internal */
	octeon_irq_init_core();

2013 2014 2015
	ciu_domain = irq_domain_add_tree(
		ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
	irq_set_default_host(ciu_domain);
2016 2017

	/* CUI2 */
2018 2019 2020 2021 2022 2023
	for (i = 0; i < 64; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
		if (r)
			goto err;
	}
2024

2025 2026 2027 2028 2029 2030
	for (i = 0; i < 32; i++) {
		r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
			&octeon_irq_chip_ciu2_wd, handle_level_irq);
		if (r)
			goto err;
	}
2031

2032 2033 2034 2035 2036 2037
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
		if (r)
			goto err;
	}
2038

2039 2040 2041
	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
	if (r)
		goto err;
2042

2043 2044 2045 2046 2047 2048
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
		if (r)
			goto err;
	}
2049

2050 2051 2052 2053 2054 2055
	for (i = 0; i < 4; i++) {
		r = octeon_irq_force_ciu_mapping(
			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
		if (r)
			goto err;
	}
2056 2057 2058 2059 2060 2061 2062 2063 2064

	irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
	irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);

	/* Enable the CIU lines */
	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
	clear_c0_status(STATUSF_IP4);
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
	return 0;
err:
	return r;
}

struct octeon_irq_cib_host_data {
	raw_spinlock_t lock;
	u64 raw_reg;
	u64 en_reg;
	int max_bits;
};

struct octeon_irq_cib_chip_data {
	struct octeon_irq_cib_host_data *host_data;
	int bit;
};

static void octeon_irq_cib_enable(struct irq_data *data)
{
	unsigned long flags;
	u64 en;
	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
	struct octeon_irq_cib_host_data *host_data = cd->host_data;

	raw_spin_lock_irqsave(&host_data->lock, flags);
	en = cvmx_read_csr(host_data->en_reg);
	en |= 1ull << cd->bit;
	cvmx_write_csr(host_data->en_reg, en);
	raw_spin_unlock_irqrestore(&host_data->lock, flags);
2094 2095
}

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
static void octeon_irq_cib_disable(struct irq_data *data)
{
	unsigned long flags;
	u64 en;
	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
	struct octeon_irq_cib_host_data *host_data = cd->host_data;

	raw_spin_lock_irqsave(&host_data->lock, flags);
	en = cvmx_read_csr(host_data->en_reg);
	en &= ~(1ull << cd->bit);
	cvmx_write_csr(host_data->en_reg, en);
	raw_spin_unlock_irqrestore(&host_data->lock, flags);
}

static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
{
	irqd_set_trigger_type(data, t);
	return IRQ_SET_MASK_OK;
}

static struct irq_chip octeon_irq_chip_cib = {
	.name = "CIB",
	.irq_enable = octeon_irq_cib_enable,
	.irq_disable = octeon_irq_cib_disable,
	.irq_mask = octeon_irq_cib_disable,
	.irq_unmask = octeon_irq_cib_enable,
	.irq_set_type = octeon_irq_cib_set_type,
};

static int octeon_irq_cib_xlat(struct irq_domain *d,
				   struct device_node *node,
				   const u32 *intspec,
				   unsigned int intsize,
				   unsigned long *out_hwirq,
				   unsigned int *out_type)
{
	unsigned int type = 0;

	if (intsize == 2)
		type = intspec[1];

	switch (type) {
	case 0: /* unofficial value, but we might as well let it work. */
	case 4: /* official value for level triggering. */
		*out_type = IRQ_TYPE_LEVEL_HIGH;
		break;
	case 1: /* official value for edge triggering. */
		*out_type = IRQ_TYPE_EDGE_RISING;
		break;
	default: /* Nothing else is acceptable. */
		return -EINVAL;
	}

	*out_hwirq = intspec[0];

	return 0;
}

static int octeon_irq_cib_map(struct irq_domain *d,
			      unsigned int virq, irq_hw_number_t hw)
{
	struct octeon_irq_cib_host_data *host_data = d->host_data;
	struct octeon_irq_cib_chip_data *cd;

	if (hw >= host_data->max_bits) {
		pr_err("ERROR: %s mapping %u is to big!\n",
		       d->of_node->name, (unsigned)hw);
		return -EINVAL;
	}

	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
	cd->host_data = host_data;
	cd->bit = hw;

	irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
				 handle_simple_irq);
	irq_set_chip_data(virq, cd);
	return 0;
}

static struct irq_domain_ops octeon_irq_domain_cib_ops = {
	.map = octeon_irq_cib_map,
	.unmap = octeon_irq_free_cd,
	.xlate = octeon_irq_cib_xlat,
};

/* Chain to real handler. */
static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
{
	u64 en;
	u64 raw;
	u64 bits;
	int i;
	int irq;
	struct irq_domain *cib_domain = data;
	struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;

	en = cvmx_read_csr(host_data->en_reg);
	raw = cvmx_read_csr(host_data->raw_reg);

	bits = en & raw;

	for (i = 0; i < host_data->max_bits; i++) {
		if ((bits & 1ull << i) == 0)
			continue;
		irq = irq_find_mapping(cib_domain, i);
		if (!irq) {
			unsigned long flags;

			pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
				i, host_data->raw_reg);
			raw_spin_lock_irqsave(&host_data->lock, flags);
			en = cvmx_read_csr(host_data->en_reg);
			en &= ~(1ull << i);
			cvmx_write_csr(host_data->en_reg, en);
			cvmx_write_csr(host_data->raw_reg, 1ull << i);
			raw_spin_unlock_irqrestore(&host_data->lock, flags);
		} else {
			struct irq_desc *desc = irq_to_desc(irq);
			struct irq_data *irq_data = irq_desc_get_irq_data(desc);
			/* If edge, acknowledge the bit we will be sending. */
			if (irqd_get_trigger_type(irq_data) &
				IRQ_TYPE_EDGE_BOTH)
				cvmx_write_csr(host_data->raw_reg, 1ull << i);
			generic_handle_irq_desc(irq, desc);
		}
	}

	return IRQ_HANDLED;
}

static int __init octeon_irq_init_cib(struct device_node *ciu_node,
				      struct device_node *parent)
{
	const __be32 *addr;
	u32 val;
	struct octeon_irq_cib_host_data *host_data;
	int parent_irq;
	int r;
	struct irq_domain *cib_domain;

	parent_irq = irq_of_parse_and_map(ciu_node, 0);
	if (!parent_irq) {
		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
			ciu_node->name);
		return -EINVAL;
	}

	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
	raw_spin_lock_init(&host_data->lock);

	addr = of_get_address(ciu_node, 0, NULL, NULL);
	if (!addr) {
		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
		return -EINVAL;
	}
	host_data->raw_reg = (u64)phys_to_virt(
		of_translate_address(ciu_node, addr));

	addr = of_get_address(ciu_node, 1, NULL, NULL);
	if (!addr) {
		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
		return -EINVAL;
	}
	host_data->en_reg = (u64)phys_to_virt(
		of_translate_address(ciu_node, addr));

	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
	if (r) {
		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
			ciu_node->name);
		return r;
	}
	host_data->max_bits = val;

	cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
					   &octeon_irq_domain_cib_ops,
					   host_data);
	if (!cib_domain) {
		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
		return -ENOMEM;
	}

	cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
	cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */

	r = request_irq(parent_irq, octeon_irq_cib_handler,
			IRQF_NO_THREAD, "cib", cib_domain);
	if (r) {
		pr_err("request_irq cib failed %d\n", r);
		return r;
	}
	pr_info("CIB interrupt controller probed: %llx %d\n",
		host_data->raw_reg, host_data->max_bits);
	return 0;
}

static struct of_device_id ciu_types[] __initdata = {
	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
	{}
};

2301 2302 2303 2304 2305 2306 2307
void __init arch_init_irq(void)
{
#ifdef CONFIG_SMP
	/* Set the default affinity to the boot cpu. */
	cpumask_clear(irq_default_affinity);
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif
2308
	of_irq_init(ciu_types);
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
}

asmlinkage void plat_irq_dispatch(void)
{
	unsigned long cop0_cause;
	unsigned long cop0_status;

	while (1) {
		cop0_cause = read_c0_cause();
		cop0_status = read_c0_status();
		cop0_cause &= cop0_status;
		cop0_cause &= ST0_IM;

2322
		if (cop0_cause & STATUSF_IP2)
2323
			octeon_irq_ip2();
2324
		else if (cop0_cause & STATUSF_IP3)
2325
			octeon_irq_ip3();
2326
		else if (cop0_cause & STATUSF_IP4)
2327
			octeon_irq_ip4();
2328
		else if (cop0_cause)
2329
			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
2330
		else
2331 2332 2333
			break;
	}
}
2334 2335 2336

#ifdef CONFIG_HOTPLUG_CPU

2337
void octeon_fixup_irqs(void)
2338
{
2339
	irq_cpu_offline();
2340 2341 2342
}

#endif /* CONFIG_HOTPLUG_CPU */