hw_irq.h 8.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
/*
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 */
K
Kumar Gala 已提交
5 6 7
#ifndef _ASM_POWERPC_HW_IRQ_H
#define _ASM_POWERPC_HW_IRQ_H

L
Linus Torvalds 已提交
8 9 10
#ifdef __KERNEL__

#include <linux/errno.h>
11
#include <linux/compiler.h>
K
Kumar Gala 已提交
12 13
#include <asm/ptrace.h>
#include <asm/processor.h>
L
Linus Torvalds 已提交
14

15 16 17 18 19 20 21 22 23 24 25 26 27 28
#ifdef CONFIG_PPC64

/*
 * PACA flags in paca->irq_happened.
 *
 * This bits are set when interrupts occur while soft-disabled
 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
 * is set whenever we manually hard disable.
 */
#define PACA_IRQ_HARD_DIS	0x01
#define PACA_IRQ_DBELL		0x02
#define PACA_IRQ_EE		0x04
#define PACA_IRQ_DEC		0x08 /* Or FIT */
#define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
29
#define PACA_IRQ_HMI		0x20
30
#define PACA_IRQ_PMI		0x40
31

32 33 34 35 36 37 38 39 40 41
/*
 * Some soft-masked interrupts must be hard masked until they are replayed
 * (e.g., because the soft-masked handler does not clear the exception).
 */
#ifdef CONFIG_PPC_BOOK3S
#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
#else
#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
#endif

42
/*
43
 * flags for paca->irq_soft_mask
44
 */
45
#define IRQS_ENABLED		0
46 47 48
#define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
#define IRQS_PMI_DISABLED	2
#define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
49

50 51 52 53
#endif /* CONFIG_PPC64 */

#ifndef __ASSEMBLY__

54
extern void replay_system_reset(void);
55

56
extern void timer_interrupt(struct pt_regs *);
57
extern void timer_broadcast_interrupt(void);
58
extern void performance_monitor_exception(struct pt_regs *regs);
59 60
extern void WatchdogException(struct pt_regs *regs);
extern void unknown_exception(struct pt_regs *regs);
L
Linus Torvalds 已提交
61

62 63 64
#ifdef CONFIG_PPC64
#include <asm/paca.h>

65
static inline notrace unsigned long irq_soft_mask_return(void)
66 67 68 69 70 71
{
	unsigned long flags;

	asm volatile(
		"lbz %0,%1(13)"
		: "=r" (flags)
72
		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
73 74 75 76

	return flags;
}

77 78 79
/*
 * The "memory" clobber acts as both a compiler barrier
 * for the critical section and as a clobber because
80
 * we changed paca->irq_soft_mask
81
 */
82
static inline notrace void irq_soft_mask_set(unsigned long mask)
83
{
84
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
85
	/*
86 87 88 89 90 91 92 93 94 95 96 97
	 * The irq mask must always include the STD bit if any are set.
	 *
	 * and interrupts don't get replayed until the standard
	 * interrupt (local_irq_disable()) is unmasked.
	 *
	 * Other masks must only provide additional masking beyond
	 * the standard, and they are also not replayed until the
	 * standard interrupt becomes unmasked.
	 *
	 * This could be changed, but it will require partial
	 * unmasks to be replayed, among other things. For now, take
	 * the simple approach.
98 99 100 101
	 */
	WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif

102 103 104
	asm volatile(
		"stb %0,%1(13)"
		:
105 106
		: "r" (mask),
		  "i" (offsetof(struct paca_struct, irq_soft_mask))
107 108 109
		: "memory");
}

110
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
111 112 113
{
	unsigned long flags;

114
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
115 116 117
	WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif

118 119 120
	asm volatile(
		"lbz %0,%1(13); stb %2,%1(13)"
		: "=&r" (flags)
121
		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
122
		  "r" (mask)
123 124 125 126 127
		: "memory");

	return flags;
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
	unsigned long flags, tmp;

	asm volatile(
		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
		: "=&r" (flags), "=r" (tmp)
		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
		  "r" (mask)
		: "memory");

#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
#endif

	return flags;
}

D
David Howells 已提交
146
static inline unsigned long arch_local_save_flags(void)
147
{
148
	return irq_soft_mask_return();
149 150
}

151
static inline void arch_local_irq_disable(void)
152
{
153
	irq_soft_mask_set(IRQS_DISABLED);
154
}
L
Linus Torvalds 已提交
155

D
David Howells 已提交
156
extern void arch_local_irq_restore(unsigned long);
L
Linus Torvalds 已提交
157

D
David Howells 已提交
158 159
static inline void arch_local_irq_enable(void)
{
160
	arch_local_irq_restore(IRQS_ENABLED);
D
David Howells 已提交
161 162 163 164
}

static inline unsigned long arch_local_irq_save(void)
{
165
	return irq_soft_mask_set_return(IRQS_DISABLED);
D
David Howells 已提交
166 167 168 169
}

static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
170
	return flags & IRQS_DISABLED;
D
David Howells 已提交
171
}
L
Linus Torvalds 已提交
172

D
David Howells 已提交
173 174 175 176
static inline bool arch_irqs_disabled(void)
{
	return arch_irqs_disabled_flags(arch_local_save_flags());
}
L
Linus Torvalds 已提交
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#ifdef CONFIG_PPC_BOOK3S
/*
 * To support disabling and enabling of irq with PMI, set of
 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
 * functions are added. These macros are implemented using generic
 * linux local_irq_* code from include/linux/irqflags.h.
 */
#define raw_local_irq_pmu_save(flags)					\
	do {								\
		typecheck(unsigned long, flags);			\
		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
				IRQS_PMI_DISABLED);			\
	} while(0)

#define raw_local_irq_pmu_restore(flags)				\
	do {								\
		typecheck(unsigned long, flags);			\
		arch_local_irq_restore(flags);				\
	} while(0)

#ifdef CONFIG_TRACE_IRQFLAGS
#define powerpc_local_irq_pmu_save(flags)			\
	 do {							\
		raw_local_irq_pmu_save(flags);			\
		trace_hardirqs_off();				\
	} while(0)
#define powerpc_local_irq_pmu_restore(flags)			\
	do {							\
		if (raw_irqs_disabled_flags(flags)) {		\
			raw_local_irq_pmu_restore(flags);	\
			trace_hardirqs_off();			\
		} else {					\
			trace_hardirqs_on();			\
			raw_local_irq_pmu_restore(flags);	\
		}						\
	} while(0)
#else
#define powerpc_local_irq_pmu_save(flags)			\
	do {							\
		raw_local_irq_pmu_save(flags);			\
	} while(0)
#define powerpc_local_irq_pmu_restore(flags)			\
	do {							\
		raw_local_irq_pmu_restore(flags);		\
	} while (0)
#endif  /* CONFIG_TRACE_IRQFLAGS */

#endif /* CONFIG_PPC_BOOK3S */

227
#ifdef CONFIG_PPC_BOOK3E
C
Christophe Leroy 已提交
228 229
#define __hard_irq_enable()	wrtee(MSR_EE)
#define __hard_irq_disable()	wrtee(0)
230 231
#define __hard_EE_RI_disable()	wrtee(0)
#define __hard_RI_enable()	do { } while (0)
232
#else
233 234
#define __hard_irq_enable()	__mtmsrd(MSR_EE|MSR_RI, 1)
#define __hard_irq_disable()	__mtmsrd(MSR_RI, 1)
235 236
#define __hard_EE_RI_disable()	__mtmsrd(0, 1)
#define __hard_RI_enable()	__mtmsrd(MSR_RI, 1)
237
#endif
238

239 240 241 242 243
#define hard_irq_disable()	do {					\
	unsigned long flags;						\
	__hard_irq_disable();						\
	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
244 245 246 247
	if (!arch_irqs_disabled_flags(flags)) {				\
		asm ("stdx %%r1, 0, %1 ;"				\
		     : "=m" (local_paca->saved_r1)			\
		     : "b" (&local_paca->saved_r1));			\
248
		trace_hardirqs_off();					\
249
	}								\
250
} while(0)
251

252 253 254 255 256
static inline bool lazy_irq_pending(void)
{
	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
}

257 258
/*
 * This is called by asynchronous interrupts to conditionally
259 260 261
 * re-enable hard interrupts after having cleared the source
 * of the interrupt. They are kept disabled if there is a different
 * soft-masked interrupt pending that requires hard masking.
262 263 264
 */
static inline void may_hard_irq_enable(void)
{
265 266
	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
267
		__hard_irq_enable();
268
	}
269
}
270

271 272
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
273
	return (regs->softe & IRQS_DISABLED);
274 275
}

276
extern bool prep_irq_for_idle(void);
277
extern bool prep_irq_for_idle_irqsoff(void);
278
extern void irq_set_pending_from_srr1(unsigned long srr1);
279 280

#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
281

282 283
extern void force_external_irq_replay(void);

D
David Howells 已提交
284
#else /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
285

D
David Howells 已提交
286 287 288 289 290 291 292
static inline unsigned long arch_local_save_flags(void)
{
	return mfmsr();
}

static inline void arch_local_irq_restore(unsigned long flags)
{
C
Christophe Leroy 已提交
293 294 295 296
	if (IS_ENABLED(CONFIG_BOOKE))
		wrtee(flags);
	else
		mtmsr(flags);
D
David Howells 已提交
297
}
L
Linus Torvalds 已提交
298

D
David Howells 已提交
299
static inline unsigned long arch_local_irq_save(void)
L
Linus Torvalds 已提交
300
{
D
David Howells 已提交
301
	unsigned long flags = arch_local_save_flags();
C
Christophe Leroy 已提交
302 303 304 305 306 307 308 309

	if (IS_ENABLED(CONFIG_BOOKE))
		wrtee(0);
	else if (IS_ENABLED(CONFIG_PPC_8xx))
		wrtspr(SPRN_EID);
	else
		mtmsr(flags & ~MSR_EE);

D
David Howells 已提交
310
	return flags;
L
Linus Torvalds 已提交
311 312
}

D
David Howells 已提交
313
static inline void arch_local_irq_disable(void)
L
Linus Torvalds 已提交
314
{
C
Christophe Leroy 已提交
315 316 317 318 319 320
	if (IS_ENABLED(CONFIG_BOOKE))
		wrtee(0);
	else if (IS_ENABLED(CONFIG_PPC_8xx))
		wrtspr(SPRN_EID);
	else
		mtmsr(mfmsr() & ~MSR_EE);
L
Linus Torvalds 已提交
321 322
}

D
David Howells 已提交
323
static inline void arch_local_irq_enable(void)
L
Linus Torvalds 已提交
324
{
C
Christophe Leroy 已提交
325 326 327 328 329 330
	if (IS_ENABLED(CONFIG_BOOKE))
		wrtee(MSR_EE);
	else if (IS_ENABLED(CONFIG_PPC_8xx))
		wrtspr(SPRN_EIE);
	else
		mtmsr(mfmsr() | MSR_EE);
L
Linus Torvalds 已提交
331 332
}

D
David Howells 已提交
333
static inline bool arch_irqs_disabled_flags(unsigned long flags)
334 335 336 337
{
	return (flags & MSR_EE) == 0;
}

D
David Howells 已提交
338 339 340 341 342 343 344
static inline bool arch_irqs_disabled(void)
{
	return arch_irqs_disabled_flags(arch_local_save_flags());
}

#define hard_irq_disable()		arch_local_irq_disable()

345 346 347 348 349
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
	return !(regs->msr & MSR_EE);
}

350 351
static inline void may_hard_irq_enable(void) { }

352
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
353

354 355
#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST

356 357 358
/*
 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
 * or should we not care like we do now ? --BenH.
L
Linus Torvalds 已提交
359
 */
360
struct irq_chip;
K
Kumar Gala 已提交
361

362
#endif  /* __ASSEMBLY__ */
K
Kumar Gala 已提交
363 364
#endif	/* __KERNEL__ */
#endif	/* _ASM_POWERPC_HW_IRQ_H */